[r-cran-vgam] 35/63: Import Upstream version 0.9-3

Andreas Tille tille at debian.org
Tue Jan 24 13:54:34 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vgam.

commit a536e4a5a01e18982d4dcebe4cb2791cb98c75d6
Author: Andreas Tille <tille at debian.org>
Date:   Tue Jan 24 14:16:58 2017 +0100

    Import Upstream version 0.9-3
---
 BUGS                                           |   20 +
 DESCRIPTION                                    |   23 +-
 DISCLAIMER                                     |    9 -
 MD5                                            |  936 ++---
 NAMESPACE                                      |  168 +-
 NEWS                                           |  332 +-
 R/Links.R                                      |   75 +-
 R/aamethods.q                                  |   64 +-
 R/bAIC.q                                       |  339 +-
 R/build.terms.vlm.q                            |  123 +-
 R/calibrate.q                                  |  570 +--
 R/cao.R                                        |  329 +-
 R/cao.fit.q                                    | 2261 ++++++-----
 R/coef.vlm.q                                   |    8 +-
 R/cqo.R                                        |  279 +-
 R/cqo.fit.q                                    | 1085 ++---
 R/deviance.vlm.q                               |   40 +-
 R/family.actuary.R                             |  118 +-
 R/family.aunivariate.R                         |  381 +-
 R/family.basics.R                              |  185 +-
 R/family.binomial.R                            |  494 ++-
 R/family.bivariate.R                           | 1478 +++++--
 R/family.categorical.R                         |  198 +-
 R/family.censored.R                            |  217 +-
 R/family.circular.R                            |   30 +-
 R/family.exp.R                                 |   58 +-
 R/family.extremes.R                            |  530 +--
 R/family.fishing.R                             |  125 -
 R/family.functions.R                           |   16 +-
 R/family.genetic.R                             |   26 +-
 R/family.glmgam.R                              |  481 +--
 R/family.loglin.R                              |   37 +-
 R/family.math.R                                |   88 +-
 R/family.mixture.R                             |   60 +-
 R/family.nonlinear.R                           |   34 +-
 R/family.normal.R                              | 1098 +++---
 R/family.others.R                              |   89 +-
 R/family.positive.R                            | 1304 +++---
 R/family.qreg.R                                |  550 +--
 R/family.quantal.R                             |   38 +-
 R/family.rcim.R                                |  494 ++-
 R/family.rcqo.R                                |  624 +--
 R/family.robust.R                              |   22 +-
 R/family.rrr.R                                 | 2052 +++++-----
 R/family.sur.R                                 |  192 +-
 R/family.survival.R                            |   51 +-
 R/family.ts.R                                  |   88 +-
 R/family.univariate.R                          | 1360 ++++---
 R/family.vglm.R                                |   34 +-
 R/family.zeroinf.R                             | 5021 ++++++++++++++++++------
 R/fittedvlm.R                                  |  104 +-
 R/formula.vlm.q                                |    2 +-
 R/links.q                                      |  499 +--
 R/logLik.vlm.q                                 |   86 +-
 R/lrwaldtest.R                                 |   30 +-
 R/model.matrix.vglm.q                          |  569 +--
 R/mux.q                                        |  111 +-
 R/nobs.R                                       |   28 +-
 R/plot.vglm.q                                  |  446 ++-
 R/predict.vgam.q                               |  200 +-
 R/predict.vglm.q                               |  121 +-
 R/predict.vlm.q                                |   47 +-
 R/print.vglm.q                                 |    4 +-
 R/print.vlm.q                                  |   12 +-
 R/qrrvglm.control.q                            |   64 +-
 R/qtplot.q                                     | 1061 +++--
 R/residuals.vlm.q                              |  322 +-
 R/rrvglm.R                                     |   78 +-
 R/rrvglm.control.q                             |  150 +-
 R/rrvglm.fit.q                                 |  630 +--
 R/s.vam.q                                      |  120 +-
 R/smart.R                                      |   86 +-
 R/summary.vgam.q                               |   41 +-
 R/summary.vglm.q                               |   48 +-
 R/summary.vlm.q                                |   89 +-
 R/uqo.R                                        |  916 -----
 R/vgam.R                                       |  200 +-
 R/vgam.control.q                               |  170 +-
 R/vgam.fit.q                                   |  147 +-
 R/vgam.match.q                                 |    9 +-
 R/vglm.R                                       |   48 +-
 R/vglm.control.q                               |  114 +-
 R/vglm.fit.q                                   |  290 +-
 R/vlm.R                                        |   33 +-
 R/vlm.wfit.q                                   |   56 +-
 R/vsmooth.spline.q                             |  104 +-
 build/vignette.rds                             |  Bin 0 -> 380 bytes
 data/Huggins89.t1.rda                          |  Bin 439 -> 441 bytes
 data/Huggins89table1.rda                       |  Bin 0 -> 443 bytes
 data/Perom.rda                                 |  Bin 431 -> 0 bytes
 data/V1.txt.gz                                 |  Bin 0 -> 65 bytes
 data/alclevels.rda                             |  Bin 549 -> 549 bytes
 data/alcoff.rda                                |  Bin 546 -> 546 bytes
 data/auuc.rda                                  |  Bin 243 -> 245 bytes
 data/backPain.rda                              |  Bin 480 -> 474 bytes
 data/beggs.rda                                 |  Bin 0 -> 196 bytes
 data/car.all.rda                               |  Bin 6987 -> 6969 bytes
 data/corbet.rda                                |  Bin 0 -> 237 bytes
 data/crashbc.rda                               |  Bin 373 -> 374 bytes
 data/crashf.rda                                |  Bin 339 -> 340 bytes
 data/crashi.rda                                |  Bin 489 -> 490 bytes
 data/crashmc.rda                               |  Bin 383 -> 339 bytes
 data/crashp.rda                                |  Bin 375 -> 375 bytes
 data/crashtr.rda                               |  Bin 360 -> 361 bytes
 data/deermice.rda                              |  Bin 0 -> 395 bytes
 data/finney44.rda                              |  Bin 207 -> 209 bytes
 data/hspider.rda                               |  Bin 1343 -> 1344 bytes
 data/leukemia.rda                              |  Bin 328 -> 329 bytes
 data/machinists.txt.gz                         |  Bin 0 -> 80 bytes
 data/marital.nz.rda                            |  Bin 10480 -> 10440 bytes
 data/mmt.rda                                   |  Bin 4205 -> 4238 bytes
 data/pneumo.rda                                |  Bin 266 -> 267 bytes
 data/prats.txt.gz                              |  Bin 0 -> 140 bytes
 data/prinia.rda                                |  Bin 0 -> 1229 bytes
 data/ruge.rda                                  |  Bin 255 -> 257 bytes
 data/toxop.rda                                 |  Bin 481 -> 473 bytes
 data/venice.rda                                |  Bin 971 -> 978 bytes
 data/venice90.rda                              |  Bin 8220 -> 8004 bytes
 data/wffc.indiv.rda                            |  Bin 2590 -> 0 bytes
 data/wffc.nc.rda                               |  Bin 4247 -> 0 bytes
 data/wffc.rda                                  |  Bin 10202 -> 0 bytes
 data/wffc.teams.rda                            |  Bin 540 -> 0 bytes
 inst/doc/categoricalVGAM.R                     |  186 +-
 inst/doc/categoricalVGAM.Rnw                   |  263 +-
 inst/doc/categoricalVGAM.pdf                   |  Bin 677107 -> 642552 bytes
 man/AA.Aa.aa.Rd                                |    2 +-
 man/AB.Ab.aB.ab.Rd                             |    4 +-
 man/AB.Ab.aB.ab2.Rd                            |    6 +-
 man/ABO.Rd                                     |    4 +-
 man/AICvlm.Rd                                  |   61 +-
 man/BICvlm.Rd                                  |  127 +
 man/Coef.qrrvglm-class.Rd                      |   22 +-
 man/Coef.qrrvglm.Rd                            |   27 +-
 man/Coef.vlm.Rd                                |    6 +-
 man/CommonVGAMffArguments.Rd                   |  108 +-
 man/DeLury.Rd                                  |  198 -
 man/G1G2G3.Rd                                  |    2 +-
 man/Huggins89.t1.Rd                            |  165 +-
 man/Inv.gaussian.Rd                            |    2 +-
 man/Links.Rd                                   |   42 +-
 man/Max.Rd                                     |   10 +-
 man/Opt.Rd                                     |    4 +-
 man/Pareto.Rd                                  |   31 +-
 man/{Qvar.Rd => QvarUC.Rd}                     |   47 +-
 man/{Rcam.Rd => Rcim.Rd}                       |    7 +-
 man/SUR.Rd                                     |   22 +-
 man/Tol.Rd                                     |    6 +-
 man/V1.Rd                                      |  103 +
 man/VGAM-package.Rd                            |   14 +-
 man/alaplace3.Rd                               |   19 +-
 man/alaplaceUC.Rd                              |   15 +-
 man/amh.Rd                                     |   13 +-
 man/amhUC.Rd                                   |   37 +-
 man/amlbinomial.Rd                             |    6 +-
 man/amlexponential.Rd                          |    6 +-
 man/amlnormal.Rd                               |   10 +-
 man/amlpoisson.Rd                              |    2 +-
 man/auxposbernoulli.t.Rd                       |  115 +
 man/beggs.Rd                                   |   88 +
 man/benfUC.Rd                                  |    5 +
 man/benini.Rd                                  |    2 +-
 man/beniniUC.Rd                                |   33 +-
 man/beta.ab.Rd                                 |    4 +-
 man/betaII.Rd                                  |    8 +-
 man/betabinomUC.Rd                             |   27 +-
 man/betabinomial.Rd                            |   11 +-
 man/betabinomial.ab.Rd                         |   33 +-
 man/betaff.Rd                                  |    6 +-
 man/betageomUC.Rd                              |   23 +-
 man/betageometric.Rd                           |   29 +-
 man/betanormUC.Rd                              |   43 +-
 man/betaprime.Rd                               |   28 +-
 man/biclaytoncop.Rd                            |  132 +
 man/biclaytoncopUC.Rd                          |  106 +
 man/{gumbelIbiv.Rd => bigumbelI.Rd}            |   12 +-
 man/bilogis4UC.Rd                              |   10 +-
 man/bilogistic4.Rd                             |   16 +-
 man/binom2.or.Rd                               |   38 +-
 man/binomialff.Rd                              |   13 +-
 man/binormal.Rd                                |   14 +-
 man/{pnorm2UC.Rd => binormalUC.Rd}             |   90 +-
 man/binormalcop.Rd                             |  135 +
 man/binormcopUC.Rd                             |   94 +
 man/bisa.Rd                                    |   23 +-
 man/bisaUC.Rd                                  |   13 +-
 man/bistudentt.Rd                              |  125 +
 man/bistudenttUC.Rd                            |  120 +
 man/bivgamma.mckay.Rd                          |   16 +-
 man/brat.Rd                                    |    8 +-
 man/bratUC.Rd                                  |   14 +-
 man/bratt.Rd                                   |   22 +-
 man/calibrate.Rd                               |    9 +-
 man/calibrate.qrrvglm.Rd                       |   23 +-
 man/calibrate.qrrvglm.control.Rd               |   47 +-
 man/cao.Rd                                     |   53 +-
 man/cao.control.Rd                             |  100 +-
 man/cardUC.Rd                                  |   41 +-
 man/cardioid.Rd                                |    4 +
 man/cauchit.Rd                                 |    6 +-
 man/cauchy.Rd                                  |    6 +-
 man/cdf.lmscreg.Rd                             |    2 +-
 man/{cennormal1.Rd => cennormal.Rd}            |   43 +-
 man/cenpoisson.Rd                              |   22 +-
 man/cgumbel.Rd                                 |   26 +-
 man/chinese.nz.Rd                              |   12 +-
 man/chisq.Rd                                   |    6 +-
 man/cloglog.Rd                                 |   28 +-
 man/{ccoef-methods.Rd => concoef-methods.Rd}   |   16 +-
 man/{ccoef.Rd => concoef.Rd}                   |   33 +-
 man/constraints.Rd                             |   18 +-
 man/corbet.Rd                                  |   57 +
 man/cqo.Rd                                     |  163 +-
 man/crashes.Rd                                 |   12 +-
 man/cumulative.Rd                              |   59 +-
 man/{Perom.Rd => deermice.Rd}                  |   44 +-
 man/deplot.lmscreg.Rd                          |    4 +-
 man/depvar.Rd                                  |   14 +-
 man/df.residual.Rd                             |   14 +-
 man/dirichlet.Rd                               |    4 +-
 man/dirmul.old.Rd                              |   14 +-
 man/dirmultinomial.Rd                          |    6 +-
 man/{dcennormal1.Rd => double.cennormal.Rd}    |   30 +-
 man/{dexpbinomial.Rd => double.expbinomial.Rd} |   34 +-
 man/eexpUC.Rd                                  |   20 +-
 man/enormUC.Rd                                 |   22 +-
 man/erf.Rd                                     |   19 +-
 man/erlang.Rd                                  |   18 +-
 man/eunifUC.Rd                                 |   20 +-
 man/expexp.Rd                                  |   13 +-
 man/expexp1.Rd                                 |   16 +-
 man/expgeometric.Rd                            |    8 +-
 man/expgeometricUC.Rd                          |   13 +-
 man/expint.Rd                                  |   94 +
 man/explink.Rd                                 |    6 +-
 man/{explogarithmicUC.Rd => explogUC.Rd}       |   16 +-
 man/{explogarithmic.Rd => explogff.Rd}         |   18 +-
 man/exponential.Rd                             |    6 +-
 man/exppoisson.Rd                              |   11 +-
 man/exppoissonUC.Rd                            |   13 +-
 man/felix.Rd                                   |    7 +-
 man/felixUC.Rd                                 |   13 +-
 man/fff.Rd                                     |    4 +-
 man/fgm.Rd                                     |   13 +-
 man/fgmUC.Rd                                   |   19 +-
 man/fill.Rd                                    |   22 +-
 man/fisherz.Rd                                 |    4 +-
 man/fittedvlm.Rd                               |   38 +-
 man/fnormUC.Rd                                 |   76 -
 man/foldnormUC.Rd                              |   92 +
 man/{fnormal1.Rd => foldnormal.Rd}             |   26 +-
 man/frank.Rd                                   |   14 +-
 man/frankUC.Rd                                 |   49 +-
 man/frechet.Rd                                 |    3 +
 man/frechetUC.Rd                               |    2 +-
 man/freund61.Rd                                |   28 +-
 man/fsqrt.Rd                                   |   18 +-
 man/gamma1.Rd                                  |    5 +-
 man/gamma2.Rd                                  |   13 +-
 man/gamma2.ab.Rd                               |   23 +-
 man/gammahyp.Rd                                |   25 +-
 man/garma.Rd                                   |    6 +-
 man/gaussianff.Rd                              |    6 +-
 man/genbetaII.Rd                               |    3 +-
 man/gengamma.Rd                                |    4 +
 man/gengammaUC.Rd                              |   11 +-
 man/genpoisson.Rd                              |    8 +-
 man/genrayleigh.Rd                             |    6 +-
 man/genrayleighUC.Rd                           |   12 +-
 man/geometric.Rd                               |    6 +-
 man/gev.Rd                                     |    2 +-
 man/golf.Rd                                    |    8 +-
 man/gompertzUC.Rd                              |   11 +-
 man/gpd.Rd                                     |    6 +-
 man/grain.us.Rd                                |    2 +-
 man/grc.Rd                                     |  137 +-
 man/gumbel.Rd                                  |    2 +-
 man/gumbelIIUC.Rd                              |    2 +-
 man/gumbelUC.Rd                                |    4 +-
 man/hatvalues.Rd                               |    4 +-
 man/hormone.Rd                                 |   22 +-
 man/hspider.Rd                                 |   51 +-
 man/huber.Rd                                   |   10 +-
 man/hunua.Rd                                   |    8 +-
 man/hyperg.Rd                                  |   31 +-
 man/hypersecant.Rd                             |   50 +-
 man/hzeta.Rd                                   |   22 +-
 man/hzetaUC.Rd                                 |   37 +-
 man/iam.Rd                                     |    8 +-
 man/identity.Rd                                |   18 +-
 man/inv.gaussianff.Rd                          |   19 +-
 man/invbinomial.Rd                             |    9 +-
 man/is.parallel.Rd                             |    2 +-
 man/kendall.tau.Rd                             |  119 +
 man/koenker.Rd                                 |   13 +-
 man/koenkerUC.Rd                               |   26 +-
 man/kumar.Rd                                   |    2 +-
 man/kumarUC.Rd                                 |    2 +-
 man/lambertW.Rd                                |    2 +
 man/laplace.Rd                                 |    5 +-
 man/laplaceUC.Rd                               |   33 +-
 man/latvar.Rd                                  |   17 +-
 man/leipnik.Rd                                 |   27 +-
 man/lerch.Rd                                   |    2 +-
 man/levy.Rd                                    |   22 +-
 man/lgammaUC.Rd                                |    3 +
 man/lgammaff.Rd                                |   18 +-
 man/lindUC.Rd                                  |   17 +-
 man/lindley.Rd                                 |    5 +
 man/lino.Rd                                    |    2 +-
 man/linoUC.Rd                                  |    2 +-
 man/lms.bcg.Rd                                 |    2 +-
 man/lms.bcn.Rd                                 |   17 +-
 man/lms.yjn.Rd                                 |    2 +-
 man/log1pexp.Rd                                |   66 +
 man/logF.Rd                                    |  110 +
 man/logF.UC.Rd                                 |   75 +
 man/logUC.Rd                                   |   23 +-
 man/logc.Rd                                    |    4 +-
 man/loge.Rd                                    |   28 +-
 man/logff.Rd                                   |   11 +-
 man/logistic.Rd                                |   16 +-
 man/logit.Rd                                   |   12 +-
 man/loglaplace.Rd                              |   30 +-
 man/loglinb2.Rd                                |   17 +-
 man/loglinb3.Rd                                |    4 +-
 man/loglog.Rd                                  |    6 +-
 man/lognormal.Rd                               |   41 +-
 man/lomaxUC.Rd                                 |    2 +-
 man/lrtest.Rd                                  |    2 +-
 man/lvplot.Rd                                  |    5 +-
 man/lvplot.qrrvglm.Rd                          |   45 +-
 man/lvplot.rrvglm.Rd                           |   29 +-
 man/machinists.Rd                              |   80 +
 man/makehamUC.Rd                               |    2 +-
 man/margeff.Rd                                 |   12 +-
 man/{mbinomial.Rd => matched.binomial.Rd}      |   20 +-
 man/maxwell.Rd                                 |    5 +-
 man/maxwellUC.Rd                               |    2 +-
 man/mccullagh89.Rd                             |    2 +-
 man/mix2exp.Rd                                 |    2 +-
 man/{mix2normal1.Rd => mix2normal.Rd}          |   26 +-
 man/mix2poisson.Rd                             |    6 +-
 man/mlogit.Rd                                  |   27 +-
 man/mmt.Rd                                     |   59 +
 man/model.framevlm.Rd                          |    6 +-
 man/model.matrixvlm.Rd                         |   23 +-
 man/moffset.Rd                                 |   29 +-
 man/morgenstern.Rd                             |    5 +-
 man/multinomial.Rd                             |   30 +-
 man/nakagami.Rd                                |    4 +-
 man/nakagamiUC.Rd                              |    7 +-
 man/nbcanlink.Rd                               |   17 +-
 man/nbolf.Rd                                   |    6 +-
 man/negbinomial.Rd                             |   24 +-
 man/negbinomial.size.Rd                        |   12 +-
 man/normal.vcm.Rd                              |  271 ++
 man/notdocumentedyet.Rd                        |   85 +-
 man/olym.Rd                                    |    5 +-
 man/ordpoisson.Rd                              |   10 +-
 man/paretoIV.Rd                                |   19 +-
 man/paretoIVUC.Rd                              |   16 +-
 man/{pareto1.Rd => paretoff.Rd}                |   85 +-
 man/perks.Rd                                   |    2 +-
 man/perksUC.Rd                                 |    4 +-
 man/persp.qrrvglm.Rd                           |   84 +-
 man/plackUC.Rd                                 |   17 +-
 man/plackett.Rd                                |    6 +-
 man/plotqrrvglm.Rd                             |    4 +-
 man/plotrcim0.Rd                               |   31 +-
 man/plotvgam.Rd                                |  154 +-
 man/plotvglm.Rd                                |  110 +
 man/{poissonp.Rd => poisson.points.Rd}         |   50 +-
 man/poisson.pointsUC.Rd                        |   88 +
 man/poissonff.Rd                               |    9 +-
 man/polf.Rd                                    |    6 +-
 man/polonoUC.Rd                                |   20 +-
 man/posbernUC.Rd                               |   57 +-
 man/posbernoulli.b.Rd                          |  230 +-
 man/posbernoulli.t.Rd                          |  267 +-
 man/posbernoulli.tb.Rd                         |  275 +-
 man/posbinomUC.Rd                              |    8 +-
 man/posbinomial.Rd                             |   74 +-
 man/posgeomUC.Rd                               |    4 +-
 man/posnegbinUC.Rd                             |   10 +-
 man/posnegbinomial.Rd                          |   38 +-
 man/posnormUC.Rd                               |    6 +-
 man/{posnormal1.Rd => posnormal.Rd}            |   16 +-
 man/pospoisUC.Rd                               |    4 +-
 man/pospoisson.Rd                              |    2 +-
 man/{powl.Rd => powerlink.Rd}                  |   29 +-
 man/prats.Rd                                   |   95 +
 man/predictqrrvglm.Rd                          |   19 +-
 man/predictvglm.Rd                             |   10 +-
 man/prentice74.Rd                              |    7 +-
 man/prinia.Rd                                  |  129 +
 man/probit.Rd                                  |    8 +-
 man/propodds.Rd                                |    8 +-
 man/prplot.Rd                                  |    2 +-
 man/qrrvglm.control.Rd                         |   40 +-
 man/qtplot.gumbel.Rd                           |    6 +-
 man/qtplot.lmscreg.Rd                          |    4 +-
 man/quasibinomialff.Rd                         |    2 +-
 man/quasipoissonff.Rd                          |    6 +-
 man/qvar.Rd                                    |  114 +
 man/rayleigh.Rd                                |   30 +-
 man/rayleighUC.Rd                              |    5 +-
 man/rcqo.Rd                                    |  133 +-
 man/recexp1.Rd                                 |    6 +-
 man/reciprocal.Rd                              |   27 +-
 man/{recnormal1.Rd => recnormal.Rd}            |   18 +-
 man/rhobit.Rd                                  |    4 +-
 man/riceUC.Rd                                  |   18 +-
 man/riceff.Rd                                  |   14 +-
 man/{rig.Rd => rigff.Rd}                       |   19 +-
 man/rlplot.egev.Rd                             |    8 +-
 man/rrar.Rd                                    |   18 +-
 man/rrvglm-class.Rd                            |    4 +-
 man/rrvglm.Rd                                  |   25 +-
 man/rrvglm.control.Rd                          |   31 +-
 man/rrvglm.optim.control.Rd                    |    8 +
 man/seq2binomial.Rd                            |   17 +-
 man/simplex.Rd                                 |    3 +
 man/simplexUC.Rd                               |   24 +-
 man/skellam.Rd                                 |   19 +-
 man/skellamUC.Rd                               |   24 +-
 man/{snormUC.Rd => skewnormUC.Rd}              |   56 +-
 man/{skewnormal1.Rd => skewnormal.Rd}          |   53 +-
 man/studentt.Rd                                |   39 +-
 man/tikuv.Rd                                   |    6 +-
 man/tikuvUC.Rd                                 |    6 +-
 man/tobit.Rd                                   |   48 +-
 man/tobitUC.Rd                                 |    4 +-
 man/toxop.Rd                                   |    3 +-
 man/triangle.Rd                                |   65 +-
 man/triangleUC.Rd                              |   13 +-
 man/trplot.Rd                                  |    4 +-
 man/trplot.qrrvglm.Rd                          |   16 +-
 man/{tparetoUC.Rd => truncparetoUC.Rd}         |   60 +-
 man/truncweibull.Rd                            |    2 +-
 man/undocumented-methods.Rd                    |   91 +-
 man/{normal1.Rd => uninormal.Rd}               |   70 +-
 man/uqo.Rd                                     |  322 --
 man/uqo.control.Rd                             |  303 --
 man/venice.Rd                                  |    2 +-
 man/vgam-class.Rd                              |    4 +-
 man/vgam.Rd                                    |    9 +-
 man/vglm-class.Rd                              |    2 +-
 man/vglm.Rd                                    |   20 +-
 man/vglm.control.Rd                            |   24 +-
 man/vonmises.Rd                                |   10 +-
 man/vsmooth.spline.Rd                          |   13 +-
 man/{wald.Rd => waldff.Rd}                     |   17 +-
 man/weibull.Rd                                 |    2 +-
 man/weightsvglm.Rd                             |   12 +-
 man/wffc.P2star.Rd                             |  102 -
 man/wffc.Rd                                    |  214 -
 man/wffc.indiv.Rd                              |   50 -
 man/wffc.nc.Rd                                 |   61 -
 man/wffc.teams.Rd                              |   40 -
 man/yeo.johnson.Rd                             |    6 +-
 man/yip88.Rd                                   |   14 +-
 man/zabinomUC.Rd                               |    2 +-
 man/zabinomial.Rd                              |   68 +-
 man/zageomUC.Rd                                |    2 +-
 man/zageometric.Rd                             |   68 +-
 man/zanegbinUC.Rd                              |    8 +-
 man/zanegbinomial.Rd                           |   85 +-
 man/zapoisUC.Rd                                |   10 +-
 man/zapoisson.Rd                               |   71 +-
 man/zetaff.Rd                                  |    4 +-
 man/zibinomUC.Rd                               |    6 +-
 man/zibinomial.Rd                              |   87 +-
 man/zigeomUC.Rd                                |    4 +-
 man/zigeometric.Rd                             |   77 +-
 man/zinegbinUC.Rd                              |    6 +-
 man/zinegbinomial.Rd                           |   65 +-
 man/zipebcom.Rd                                |    5 +-
 man/zipf.Rd                                    |    5 +
 man/zipfUC.Rd                                  |    4 +-
 man/zipoisUC.Rd                                |   14 +-
 man/zipoisson.Rd                               |  115 +-
 src/caqo3.c                                    | 3200 +++++++--------
 src/cqof.f                                     | 2306 +++++++++++
 src/ei.f                                       |  535 +++
 src/lms.f                                      |  190 +-
 src/rgam.f                                     |  623 +--
 src/rgam3.c                                    |  375 +-
 src/specfun3.c                                 |   93 +
 src/tyeepolygamma.f                            |  153 +
 src/vgam.f                                     | 1571 ++++----
 src/vgam3.c                                    |  578 +--
 src/vlinpack1.f                                |   41 +-
 src/vmux.f                                     |  496 +--
 src/vmux3.c                                    |    2 -
 src/zeta3.c                                    |  167 +-
 {inst/doc => vignettes}/categoricalVGAM.Rnw    |  263 +-
 {inst/doc => vignettes}/categoricalVGAMbib.bib |    0
 497 files changed, 32347 insertions(+), 21572 deletions(-)

diff --git a/BUGS b/BUGS
index f0aa161..70475f2 100755
--- a/BUGS
+++ b/BUGS
@@ -1,5 +1,25 @@
 Here is a list of known bugs.
 
+
+2013-11
+
+vgam() can only handle constraint matrices cmat, say, such that
+t(cmat) %*% cmat is diagonal.
+
+
+
+
+
+
+2013-07
+
+quasipoisson()'s scale parameter estimate does not handle
+prior weights correctly.
+
+
+
+
+
 2012-09
 
 
diff --git a/DESCRIPTION b/DESCRIPTION
index 5906366..6548b2a 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,23 +1,22 @@
 Package: VGAM
-Version: 0.9-1
-Date: 2013-04-27
+Version: 0.9-3
+Date: 2013-11-11
 Title: Vector Generalized Linear and Additive Models
 Author: Thomas W. Yee <t.yee at auckland.ac.nz>
 Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 2.15.1), splines, methods, stats, stats4
-Suggests: MASS
+Depends: R (>= 3.0.0), methods, splines, stats, stats4
+Suggests: VGAMdata, MASS
 Description: Vector generalized linear and additive models, and
-        associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
-        Reduced-Rank VGAMs). This package fits many models and
-        distribution by maximum likelihood estimation (MLE) or
-        penalized MLE. Also fits constrained ordination models in
-        ecology.
+    associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
+    Reduced-Rank VGAMs). This package fits many models and
+    distribution by maximum likelihood estimation (MLE) or
+    penalized MLE. Also fits constrained ordination models in
+    ecology.
 License: GPL-2
-Imports: methods, stats, stats4
 URL: http://www.stat.auckland.ac.nz/~yee/VGAM
 LazyLoad: yes
 LazyData: yes
-Packaged: 2013-04-27 04:22:15 UTC; tyee001
+Packaged: 2013-11-11 08:56:33 UTC; tyee001
 NeedsCompilation: yes
 Repository: CRAN
-Date/Publication: 2013-04-27 09:02:44
+Date/Publication: 2013-11-11 10:44:08
diff --git a/DISCLAIMER b/DISCLAIMER
deleted file mode 100755
index c3c3f73..0000000
--- a/DISCLAIMER
+++ /dev/null
@@ -1,9 +0,0 @@
-The VGAM package for R is still in the development stage, meaning that
-new features are still being added and bugs are still being found on a
-regular basis. This product is available on a use-at-your-own-risk basis:
-the Author assumes no liability for loss or damage of any kind resulting
-from the use of this product. The code is distributed in the hope that
-it will be useful, but WITHOUT ANY WARRANTY; without even the implied
-warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-
diff --git a/MD5 b/MD5
index 62c0978..0627046 100644
--- a/MD5
+++ b/MD5
@@ -1,137 +1,138 @@
-f6c2eaaf925e53832fcb53239b4f5cc8 *BUGS
-6a7fba4cfaba8a30efbd9872bcdb525f *DESCRIPTION
-dd959d3a0cd680792122813a7d58d506 *DISCLAIMER
-2af0a233dbb327202c9855a893e5fe4f *NAMESPACE
-1a92d93a22cfa2999d96ddaf2b98f990 *NEWS
-d62d56dbac3a02af9d7c8b2bbcb39e8b *R/Links.R
-adf209eb2798923a1c63284e2f35a74f *R/aamethods.q
+8c5675aa567c646d89290b58a4cfaa9f *BUGS
+b910a842720c3379e3dd13c4f917c933 *DESCRIPTION
+e37447d4cea5fa0da1a71f245361a74e *NAMESPACE
+565401ecc939e282c87a3f98eabd6d39 *NEWS
+59420bff11b9834fd831e16ea40bb5de *R/Links.R
+b68412217ebf58e1fab01023685f2e28 *R/aamethods.q
 15d1737d6888f9bb905bb0d2d82a385b *R/add1.vglm.q
 1738067495adbcc1127a1f8f3cc7988a *R/attrassign.R
-832a8467cdb8625a3be4ae3d17f2b90f *R/bAIC.q
-5cdd8c0f82dea0e413e04bc2cff8c123 *R/build.terms.vlm.q
-b6860bb9ee446a7dd9b684c95aa5bc05 *R/calibrate.q
-6e439ff28115e3dee0999c1fb16549d8 *R/cao.R
-ccee03271e151a65bfdfe831b9f3e8b5 *R/cao.fit.q
-86abaa804bbae3663eba175e424cb507 *R/coef.vlm.q
-bfa85b0c6a4c0d8ef179e167e6fb6d93 *R/cqo.R
-fe11e7df7fc7466a1ad1ae2eb7302790 *R/cqo.fit.q
-abd0b60fa8407957c67d4392d7af26fe *R/deviance.vlm.q
+4671105abcdfd3f97ae5d90d175a900d *R/bAIC.q
+b3f7c41a22160b2be01388bb4729a2e4 *R/build.terms.vlm.q
+311795db1aa566d8ff417aee9c4df740 *R/calibrate.q
+0a1d46f67706361aa60b097432ea8fc2 *R/cao.R
+504faaa01a59e539ffd15f30f9ff5acd *R/cao.fit.q
+fb3f18427d53bac3ec8c617de30c85d8 *R/coef.vlm.q
+0e7cd8ad7b3eb7105d7a7c861f82aaaf *R/cqo.R
+aa2c06a5c9ca30e30759e18ad0e793f9 *R/cqo.fit.q
+cf576980ae24bf3f85a7020dbd256c21 *R/deviance.vlm.q
 a6b5f34153d4828e30d35addf7d6ba9f *R/effects.vglm.q
-c613675237da101f62d7b09a5a9022b9 *R/family.actuary.R
-ef98d702078dfe8028a9ca67293ff0e9 *R/family.aunivariate.R
-821b41568b72112b39e165bee64fff63 *R/family.basics.R
-cd6ac62efb3f8c85f9943b9e366ffcf6 *R/family.binomial.R
-dc12aa7a7020b9fcb98101ecbc151976 *R/family.bivariate.R
-f69cb7e860b7381e6876499575031e00 *R/family.categorical.R
-7b8a2ffd2480d2bc1686acd3975925bc *R/family.censored.R
-6e22b04d33eac0d296369deb9eb0df6d *R/family.circular.R
-635eb4cbaa3c7d3eb7aa589425520f91 *R/family.exp.R
-5221dacf55524f6604bc7f63a087f349 *R/family.extremes.R
-d3fb45972867409ec229acb7a053bee4 *R/family.fishing.R
-9826cf013ff6c981f11f32a06d26d3ab *R/family.functions.R
-7ee48c4f697f650bcd8ed13d50ff99de *R/family.genetic.R
-3c5e4f0c78262e274ac99bc697c0626b *R/family.glmgam.R
-4b271424d2b5c532da022b833fa091c7 *R/family.loglin.R
-ff91b689c8f0f97da4f15ce5a938a248 *R/family.math.R
-0aafeb41fdf7d02837c021b387f94b55 *R/family.mixture.R
-bc95bc6f29a8bbb163a03e800627c802 *R/family.nonlinear.R
-8f365a42782116a4049d78817ef26443 *R/family.normal.R
-71d2f8f47e7be7e42bc725383fe9b8b2 *R/family.others.R
-d4e9e1cdf543f7b59a67b9229aa4adc5 *R/family.positive.R
-cf30ede4751332d2e97a032812719180 *R/family.qreg.R
-b4a7110d940135f7372ae51f0a32070a *R/family.quantal.R
-c365f0b1c200d523c71b3fddffd31ef7 *R/family.rcim.R
-9160d6726da1309528dc856c44c75421 *R/family.rcqo.R
-2a7ba5edcb2a5e996431700f90cc5ca5 *R/family.robust.R
-f4c4f0abbc5708e66af94e0e873a590e *R/family.rrr.R
-089ae9a0fd12c18dcb10fde5fc394291 *R/family.sur.R
-1b6f4e240e52a537fc9855e5608344ca *R/family.survival.R
-e53b98453c106c2176b60a6e2241b08b *R/family.ts.R
-df658830892993fe457fc0146aaa2a3d *R/family.univariate.R
-11583197eff8498de3c6121ab66c707a *R/family.vglm.R
-d59a4ccad1536b81308ec42dffec9a2a *R/family.zeroinf.R
-daae5f4987b87f24e1dc0aa1c3602147 *R/fittedvlm.R
-4b557b8346c0b3634105f243ddfbf24a *R/formula.vlm.q
+5668902675c4e8818c973a8aca4adbd5 *R/family.actuary.R
+9d0e98376672881f6693a5a4c7af5b61 *R/family.aunivariate.R
+b49d01e3292f91ce879c7e7b1145fad4 *R/family.basics.R
+c1a10631b9328703644c6b22ff393d7e *R/family.binomial.R
+fe3f5130bbed41000e153186dd773c8a *R/family.bivariate.R
+1bb78a77f7ac54cc92004d0e9f162e9e *R/family.categorical.R
+3ae5afb6905d7b3c5ea35c706e445920 *R/family.censored.R
+ced3316d303a84675faad1940500beba *R/family.circular.R
+3b7b74a4ef27e54824a4af494c0c721f *R/family.exp.R
+4549f805202ec51f190ceea31f99396c *R/family.extremes.R
+84b6392d2e2ab0572c82e48289a36086 *R/family.functions.R
+f7ffc2d272c714de5312d5cb622ca252 *R/family.genetic.R
+57af159832a0ed6286695e496c8a3106 *R/family.glmgam.R
+a6b54c94a4b3a3194d6e2195389a5cbe *R/family.loglin.R
+196748235f416e4ea7bab4ba3e89c735 *R/family.math.R
+b1df44d9c95b0a181b5c04d608433473 *R/family.mixture.R
+1235bba7beac46448fec0eb4ae270247 *R/family.nonlinear.R
+ff0ae86bd8d96ed432db192282a36a19 *R/family.normal.R
+8af9bc39cfc06bc48b430e5dfa3e74bd *R/family.others.R
+004d050b5870d5be780d2bc9c432a8bc *R/family.positive.R
+222f359413083e8f5df24f5bcc72c326 *R/family.qreg.R
+2a7d98513a51f5f184162b42a3dfb0e4 *R/family.quantal.R
+37446b205d1c2511162d879f8896ac70 *R/family.rcim.R
+f9ed7587232d6df5e85bafa74ec287d9 *R/family.rcqo.R
+374b452b264858120b6fe4c96fb2f09a *R/family.robust.R
+0301dc8dd0500af5ae42fe1c2745bda3 *R/family.rrr.R
+14aa4be83182d3144b232b0bf092a9cb *R/family.sur.R
+a9db58da404d57dba9c49297e09f5de6 *R/family.survival.R
+89c9cd66745c1821d65a3b42b2450867 *R/family.ts.R
+13eabf023bb3d4adb8b953c655a91809 *R/family.univariate.R
+cda84d0b9c9b9febcbce42193cd4d86c *R/family.vglm.R
+1241fa6635376025a3ce09b463818256 *R/family.zeroinf.R
+bb94b04f5e868cba658a15690a98f92a *R/fittedvlm.R
+26ec5b1b01a6c0533d11223e46d48f5c *R/formula.vlm.q
 6ac6db14a8544716dd49fdc95cc06893 *R/generic.q
-104a446ef4f0f15e56f3342ca01b34a0 *R/links.q
-88359e6090cbf9b245bc49ac35805e1e *R/logLik.vlm.q
-a3ccdcdbfa8ca1a1881c8853a7eafd2f *R/lrwaldtest.R
-6f82978825337429b7c05d36c12ed68a *R/model.matrix.vglm.q
-1732357e0c3e1a2e278f364f005762bb *R/mux.q
-ea6f08f274acb629f4cedb9a46e0ec20 *R/nobs.R
-6414d0ff798fffb260139b4131c7808b *R/plot.vglm.q
-f87f2f2a142820503c24a9124a7f7fd4 *R/predict.vgam.q
-90b48a5c5096e443ef28634d1447e600 *R/predict.vglm.q
-a57f83121934ed29f45a6440def49bde *R/predict.vlm.q
-53a8b748527a8b5738121fefb84587fc *R/print.vglm.q
-e1d1e80faf5b350b32676e53a565263f *R/print.vlm.q
-0fa72053f84f1c878c451c1561a66e3a *R/qrrvglm.control.q
-7a85e29e0e6c86a808dbc67a5058a2f2 *R/qtplot.q
-512cf9358cb8da821891c5ef1e7ca4f0 *R/residuals.vlm.q
-d7b993156aea56e835e2af8d3df41cf6 *R/rrvglm.R
-42e7eec20c6ca8bbb76472b3f98f5536 *R/rrvglm.control.q
-470aa87f01b3f571a962465cd2064102 *R/rrvglm.fit.q
+ffb44fe3c44c388a0519b758d63308ce *R/links.q
+7ea40ac8830ca2f0a346a4244eceb228 *R/logLik.vlm.q
+14cc4a53774931fcc2bf130a5961b197 *R/lrwaldtest.R
+ea71f8066cb5ee1ed6615608e3d6d3b7 *R/model.matrix.vglm.q
+fcbd53c0bd1e21449e830a04f6be5b0e *R/mux.q
+22818bb0f90e52348f924f17a83c49ae *R/nobs.R
+bebc81a8fda1b69c9e27230ea163ce8f *R/plot.vglm.q
+6847d405f3a0341820301dabd53d8ab5 *R/predict.vgam.q
+781f33cd4e62a7c257952e9362438e6b *R/predict.vglm.q
+5028a2f7e1673086aa97cecd32cfe51c *R/predict.vlm.q
+bf13a3f20024f5e86dd6d38b06d9b4e4 *R/print.vglm.q
+940248a46a8416309969ac8c5dd40d1c *R/print.vlm.q
+040f26d21df40bc36d99249db0dfbfa3 *R/qrrvglm.control.q
+dcf8385e539b00336f686acbfb668783 *R/qtplot.q
+976febd2962ad51d6b1696c682ced15e *R/residuals.vlm.q
+9883705f0c2220c32b62e9e81751a0bb *R/rrvglm.R
+a3ce0cf9a60b25826617da994ca429f4 *R/rrvglm.control.q
+300acbfd67a6481d082202c3fd691520 *R/rrvglm.fit.q
 d0f49d2c6af83f79ce6894993a42b79d *R/s.q
-59971ce313b9d5d8117ee9be43049940 *R/s.vam.q
-17b2981fe5a5a8b6c8d5ff24e1110c4b *R/smart.R
+8e27b82927b85d6cd0e6d925854e2216 *R/s.vam.q
+176cba5df3fbe75d6fa0813d210d2cee *R/smart.R
 1bccef4ed724ae0a8d0f677c7d12c15d *R/step.vglm.q
-7fdc7139fbe351c53b7a5b64f782ada9 *R/summary.vgam.q
-d8ddb7543987a1d3088e6c264d253d85 *R/summary.vglm.q
-254a4caed282a79fe7bd72a6ac8a54e1 *R/summary.vlm.q
-9fd5ab4d09a51e27b81ed54d0ba98f84 *R/uqo.R
-58d011e757b69c50072aba6636d0459e *R/vgam.R
-c479ba9b1e9dfe567e2d02d667392c0e *R/vgam.control.q
-1bc56d80200a7c3e8974b6ebd3cddbd1 *R/vgam.fit.q
-fa4a8b03864d4c88623072fbc836ddbb *R/vgam.match.q
-a11d62d8e230f9d3f5d1169ffac27703 *R/vglm.R
-5e7d4ef7fbcd4a050378cac4480e6a1b *R/vglm.control.q
-8b749d824c552fa958f669f4461c0480 *R/vglm.fit.q
-df2d63117cb8b126e5f568d0c3c0b5f7 *R/vlm.R
-128626597c68cf1d6bfe46edce99017a *R/vlm.wfit.q
-a65b9ce0f4ca8924a4d612dceb7431a3 *R/vsmooth.spline.q
-1fd723ab36f7d8d06ea80e7f0695839b *data/Huggins89.t1.rda
-5d76a6219b59d73d8606351a4e838435 *data/Perom.rda
-9813abe80f1fd180438de1b64a494d23 *data/alclevels.rda
-dc1953bd5b84c6c769b3216b6c9bfe8e *data/alcoff.rda
-c69d92ac37883bcb93de5c689f617c6c *data/auuc.rda
-e597da31ffc931395065afd49d7e1171 *data/backPain.rda
+e01d1641a65f79de44e2cfee95a2f1e8 *R/summary.vgam.q
+d2aecdb1141d8c2486f2873a54bed7b1 *R/summary.vglm.q
+3c6b2ee4fc757b53078cc50a04aa9aa6 *R/summary.vlm.q
+a2f1a96aea82d912a0fc93f2499fe8a6 *R/vgam.R
+f374f7fae261b36a8c91939c5105b007 *R/vgam.control.q
+09feb272244683c3f0f0cc5c7b79dbf9 *R/vgam.fit.q
+c0d5062be47a6b446b4b4416e0a6f436 *R/vgam.match.q
+4c319e2a242c0aa7cca457b890c34e44 *R/vglm.R
+b54d7372f871e98fb7a81c846e4f7f18 *R/vglm.control.q
+c0ab3dbe4060941da72a0d46aec995b1 *R/vglm.fit.q
+9e44046b8fac44f5b19622a18fd817bb *R/vlm.R
+5a9f82ff56f366f908ef6282f03d5c9e *R/vlm.wfit.q
+e8b766779301274b0e5687ea9bda6039 *R/vsmooth.spline.q
+c6883c2e9dc163e099ef80d9dc60697e *build/vignette.rds
+b4cd6d82916eeed0e7ea66d88d47bfcb *data/Huggins89.t1.rda
+6d595282e66deeba6520b9e6036ab9a9 *data/Huggins89table1.rda
+d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
+bab76494dc8067695f3b634016765a65 *data/alclevels.rda
+c02f13a9cda10a0a0ff58a74ba7b7a84 *data/alcoff.rda
+12ded1bc1c4eb2c470c1667b520f032d *data/auuc.rda
+e762d480945696788738c174b84147c1 *data/backPain.rda
 4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
+3b8b6009d5fbce815d622c05678c496f *data/beggs.rda
 e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-ac781eb03705011daac38279dd8126d9 *data/car.all.rda
+52d2f9cab55848f2dbc0f469b9c0ef94 *data/car.all.rda
 b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
 4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
 3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-a1736f849c17c8def4126685c80a27c7 *data/crashbc.rda
-710992c846632d4bb836e0db7754577c *data/crashf.rda
-d5308a91f8bb3ada33dc46b322cbbf33 *data/crashi.rda
-109603b8ff2aed220209e950e92dcea2 *data/crashmc.rda
-a2bdcbc61dd121258d7d44f4eab13588 *data/crashp.rda
-071eb8e5c533bc745bf06a166365d2a1 *data/crashtr.rda
+da6d3150cb16a66be3063b773a476e3e *data/corbet.rda
+d906323b58926c6b77d1ec90bf94b29e *data/crashbc.rda
+52ee69e2dad45519e4c4b093b91faefb *data/crashf.rda
+c7d8935806f8efa80b49beb66f57c777 *data/crashi.rda
+a2acb5f23d3791ddcc2c47dc80c686cf *data/crashmc.rda
+327758f943701d91882b2f99f6214174 *data/crashp.rda
+f9ac084246f904d2522c66bf3bf74f9a *data/crashtr.rda
+592253084c8e27fd39293e71235ab6c7 *data/deermice.rda
 08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-b351998ad2ed6556fb83dafdbf2c0540 *data/finney44.rda
+b9969e0c972a3af06128eb2566b35c3e *data/finney44.rda
 3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
 bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
 9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-b003bfd39730aa0656fc38ac2c347caf *data/hspider.rda
+dfc26b76841c27a6c6fca69fb137f555 *data/hspider.rda
 dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-ebf3caea112403777897aa8b631ac27d *data/leukemia.rda
+a988992fe21c5ef19588440bc2e65fd5 *data/leukemia.rda
 aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
-75cf48caa6781de4a80496c31604b1ef *data/marital.nz.rda
-d692afa917e63fa7707495b25ae93bee *data/mmt.rda
+7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
+951cc829bda3b03ba29a3dca4a55e51d *data/marital.nz.rda
+9b957fa754a4289a4f46dd182289ac58 *data/mmt.rda
 56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
 fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
 3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-04b56fb5acddca81eb3916826a4c88a3 *data/pneumo.rda
-824247155f0456f146af38c8818314cf *data/ruge.rda
-d55951f9995a47976dcc28bd4c877a6a *data/toxop.rda
+e1a792d5a43fba44f13bd72fc3252c25 *data/pneumo.rda
+0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
+f3d44d37a6379352f8a73ec856b569ca *data/prinia.rda
+1189583668fac01318e26539ecdc52e2 *data/ruge.rda
+d4e79e6a83e94ce43ea81c00a5475427 *data/toxop.rda
 1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-3be014e1cf99d07b22ca4757d4e43408 *data/venice.rda
-db2bece75f2f401b842b47b210541ed8 *data/venice90.rda
+8fbebe25dcb4bd9ff9fe14e3604fef31 *data/venice.rda
+2210f364ad19eff32bba9423b4a593d2 *data/venice90.rda
 e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-ad7680ca4b2ee5cdcfdc6efd64734e2b *data/wffc.indiv.rda
-4d0e86344820512b6e9d661b62c8df22 *data/wffc.nc.rda
-f89fc57a32f5dc7b3ac764ccf9010050 *data/wffc.rda
-0e5d28602f173f25c3ae8ae8ca9ab6d7 *data/wffc.teams.rda
 81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
 532aba4ad4cac611141491a5bb886236 *demo/binom2.or.R
 a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
@@ -140,443 +141,464 @@ d2c02ccaf4d548cc83b3148e55ff0fa3 *demo/lmsqreg.R
 a3d2728927fc5a3090f8f4ae9af19e1a *demo/vgam.R
 00eee385e1a5c716a6f37797c3b4bec5 *demo/zipoisson.R
 60616e1e78fe61c1fd4acdf0d3129747 *inst/CITATION
-fae24431ceffb7f1c6390d81307cda6e *inst/doc/categoricalVGAM.R
-b1a84a83b8fb788d31d509e17936b603 *inst/doc/categoricalVGAM.Rnw
-a844badb9c938f40a4f3d76f6b7cb9a7 *inst/doc/categoricalVGAM.pdf
-e4c5415e487f533b70695b17e40d97bc *inst/doc/categoricalVGAMbib.bib
-e77fe3e9c0a314c51ba4b36b8d56684b *man/AA.Aa.aa.Rd
-3d5d059af0e7d0c88fe059f8fed7e81e *man/AB.Ab.aB.ab.Rd
-038a23a3cfb521f14b4885e49bf0188d *man/AB.Ab.aB.ab2.Rd
-ccf14c227880ca872a7471cf5f7c94b5 *man/ABO.Rd
-37202536ea507b17bb8472e3fd1b78e4 *man/AICvlm.Rd
+2f7d9e53aaea653d70152d64eda3a81a *inst/doc/categoricalVGAM.R
+ab0ec8b49daf41071e1636bb52b71294 *inst/doc/categoricalVGAM.Rnw
+afa76725c38b56f1c3b2e9282ef4dcfe *inst/doc/categoricalVGAM.pdf
+387dc3b872f48144d7dc5fdabb9b15c2 *man/AA.Aa.aa.Rd
+b7384a3c4136b52b9d997d81fab15968 *man/AB.Ab.aB.ab.Rd
+a84233bee5f8105875949d2f47887852 *man/AB.Ab.aB.ab2.Rd
+c58d80b1cb01d6e058b25951e080a05f *man/ABO.Rd
+31d7e2bd48719e41df4b8a2fdabe7f13 *man/AICvlm.Rd
+27f7f9c75e026d1c833dcff3afc9c6c6 *man/BICvlm.Rd
 2dda55df0947c86b4614e2d722efb713 *man/Coef.Rd
-e2087f40465b8feca48d61fb1cecfc6c *man/Coef.qrrvglm-class.Rd
-956683c1b81f04580aa6546a85c7d20a *man/Coef.qrrvglm.Rd
+5e47a4ab6785fd816784baed909dc8c5 *man/Coef.qrrvglm-class.Rd
+563c43dfaca1676ce0a61f19bc485830 *man/Coef.qrrvglm.Rd
 9335dbbcdb81716ec556bf5bcf0be309 *man/Coef.rrvglm-class.Rd
 dd9202d518789994bd081f16a81631ef *man/Coef.rrvglm.Rd
-673fb7bdbda0010ee45586680a0275b1 *man/Coef.vlm.Rd
-9b60092b7d4f21ff458a0279096ef3bb *man/CommonVGAMffArguments.Rd
-06084db07bf8e6b2bc59dd0f40a23f8d *man/DeLury.Rd
-64b643dcd690b1eb601fcc70af495790 *man/G1G2G3.Rd
-fac93d02848bc713742065083217496a *man/Huggins89.t1.Rd
-f7bc9b5114ed94e014016aed05b8e7d3 *man/Inv.gaussian.Rd
-77388e0223539826ca69389d46f80550 *man/Links.Rd
+9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
+863e44c0199b67cbe16316108c576808 *man/CommonVGAMffArguments.Rd
+184a5e03d395c83b20d6e825e22d4759 *man/G1G2G3.Rd
+5d9fd8fec8bfa485686d8ccb002f4c94 *man/Huggins89.t1.Rd
+64ffee88303582877fe76eedb51f797e *man/Inv.gaussian.Rd
+05808209caa226d937f9edf4364db34a *man/Links.Rd
 0a95f8292850ef5b0fcf516400864c84 *man/MNSs.Rd
-45c9ca6851177b813be07e2446614721 *man/Max.Rd
-d11449e8d78b47fe2811767798a3966a *man/Opt.Rd
-f9fb54b978cba49b278630f9403dd73c *man/Pareto.Rd
-a8acd542dbd768859c06a2b6811c0a13 *man/Perom.Rd
-02bd50562a32ff0a21d887511d222364 *man/Qvar.Rd
-4273365f7ee730f68259e69fb65f7746 *man/Rcam.Rd
-e22155cf6e28945d43ed76d0d02e6746 *man/SUR.Rd
+a730679155e139e134b186e7852c1ef9 *man/Max.Rd
+76bbf26207744bec8c21ae1d71701071 *man/Opt.Rd
+624e0666b195bc9596e0869aa35823cc *man/Pareto.Rd
+39d9ad33246126697c462d454a1d190e *man/QvarUC.Rd
+bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
+becc3fe17f46d3c92c2b81da5499da83 *man/SUR.Rd
 2db32b22773df2628c8dbc168636c9f0 *man/SurvS4-class.Rd
 4f4e89cb6c8d7db676f3e5224d450271 *man/SurvS4.Rd
-1f34fdf36c631e984d2a9f28bf607b67 *man/Tol.Rd
-35fb38864c1e10a928af13a607e7b4b8 *man/VGAM-package.Rd
+40c8ffbfe412d1774e540a3e7ddf1f2f *man/Tol.Rd
+6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
+b07f7f8fb09dceb4cb770698fbcb3281 *man/VGAM-package.Rd
 41de97f0bacb4bedc36a589af710ff99 *man/acat.Rd
-d479795a5dfdd0949d86aa70fffc1140 *man/alaplace3.Rd
-8c0662467fc225892c1a1cde14e9fbf5 *man/alaplaceUC.Rd
-af06e5a2e0552a8ef63756f1c3bce00b *man/amh.Rd
-5e1012c84beb593f4558a9df064a3304 *man/amhUC.Rd
-c034aafa09900eda5767b557ae18e665 *man/amlbinomial.Rd
-cdb087cd9e65ef96ba2e848dee9e4eeb *man/amlexponential.Rd
-6cddfc975ac4418a3693fbf3c810d96d *man/amlnormal.Rd
-8c0315925316e09ad8847a5bc960d478 *man/amlpoisson.Rd
+97dc0fad7cb454bfa669ea6de9e564a1 *man/alaplace3.Rd
+573cdf092fc48b9b1c1f10e9af6b0fe5 *man/alaplaceUC.Rd
+98d2372de743a12819fb8b33f583ee0a *man/amh.Rd
+f10fff7b5d459f0325e70423488dde18 *man/amhUC.Rd
+3cd4ccbc6bdca02fc5e30e2455ee8719 *man/amlbinomial.Rd
+f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
+7cb04022bf8b6dadd34def3eb23fb776 *man/amlnormal.Rd
+33f35a4ec1b03b95a6ef9736ac771ec2 *man/amlpoisson.Rd
 9f1ddcb0af49daaec702a1284341d778 *man/auuc.Rd
+c8efe93df8799ff106b6784e1bf50597 *man/auxposbernoulli.t.Rd
 bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
-34b5510370a46ab522a754c731a437be *man/benfUC.Rd
-c1483ea97ab8115ef70f90bc0984ac6d *man/benini.Rd
-b3e26d0011014d3722b4ecb3675c4aea *man/beniniUC.Rd
-28d965f409597a6485f3141173f901a3 *man/beta.ab.Rd
-91deeb79a61f94c1af5d7ac251132821 *man/betaII.Rd
-72c00470a5c89c7ebfc9e695da9b07d4 *man/betabinomUC.Rd
-053b67bde772fc8d0e96b5b0ac5ebc6c *man/betabinomial.Rd
-504ee243a39c7173ac40841afe16339f *man/betabinomial.ab.Rd
-be38265c59ae5f15c757009310e14a92 *man/betaff.Rd
-da3fdbf88efd6225c08377a461e45c50 *man/betageomUC.Rd
-63ba9c485c5d5b4962fa8e215f4ee87e *man/betageometric.Rd
-aa6ee6bd6c48de8d03f18a80b836edae *man/betanormUC.Rd
-f568faafa4b67d1f0bf9ba07ddc4a7f3 *man/betaprime.Rd
-1cf45cc5335d55c0a46d1e7df469ce3d *man/bilogis4UC.Rd
-b81f6ad16bb834d3fde123062ba31ec8 *man/bilogistic4.Rd
-7e042a6903115d2eb77d0ef3a35cd8ab *man/binom2.or.Rd
+6ac5a3f07851ac3f7e19eaa977365e0f *man/beggs.Rd
+4f14c76e1d581aa54d7a523c105fb08a *man/benfUC.Rd
+2fc68a0b1d11b944430c4290f159b9bf *man/benini.Rd
+d970a382e22015a5542a5d2bbe289688 *man/beniniUC.Rd
+5eb9ae4be18386c3c2f8539609db5130 *man/beta.ab.Rd
+1f0cc4a87b011d3367a1049e8dad9a89 *man/betaII.Rd
+5219942fe925a1a83799b51f6d5655ce *man/betabinomUC.Rd
+cc4e4ca6ab31fd48735dbd44629f2209 *man/betabinomial.Rd
+1c27699e07dbf2e5c1d756692c6b6535 *man/betabinomial.ab.Rd
+08edf0cff53ce982a32e28bf9feae6cc *man/betaff.Rd
+1adc8d4b6eac60c0ef38de8459995797 *man/betageomUC.Rd
+0c114f8c3cbe786f1fd17b9898e59386 *man/betageometric.Rd
+151cdf70cb16f8095369b88093ba48c7 *man/betanormUC.Rd
+db809a59222bc951683a84c9f06e48ca *man/betaprime.Rd
+0ab04f3892c3b98eb2c914bf8043afb2 *man/biclaytoncop.Rd
+94e05525dff5548fadbcd6efad58b086 *man/biclaytoncopUC.Rd
+e79003557390777e6cb8ab38380c673d *man/bigumbelI.Rd
+adddf7bb27d9517288660180b4240058 *man/bilogis4UC.Rd
+01b65c61360678a60eb9ebb3c0758db6 *man/bilogistic4.Rd
+64e2272ebc4d5b8a5ed9d934943afd68 *man/binom2.or.Rd
 1f1a653e623b2abbb4662b16070019db *man/binom2.orUC.Rd
 a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
 c3f3f95519510e5a324c74369bc52a63 *man/binom2.rhoUC.Rd
-7dcb53c5b43d65f3837a65463e1f5612 *man/binomialff.Rd
-85bd227a0d4ae18c5511206758f982b3 *man/binormal.Rd
+2d3fb605e1605c90d0d6160ed9d7d67b *man/binomialff.Rd
+b46c088fd812d7f402a2d9757f022880 *man/binormal.Rd
+ca1a757bf6b2402617898d84abbc0c33 *man/binormalUC.Rd
+dbd8b9788f6f7cee598241a83c64726f *man/binormalcop.Rd
+9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
 bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-3c8ee4feffa56a6e15b24f0c502026c6 *man/bisa.Rd
-832abdebf1e3013d0421f5012efd3a7e *man/bisaUC.Rd
-59d5b0478df13fc8ca7c6650e70105ac *man/bivgamma.mckay.Rd
+00a210fc4a1bf5bf21f6da4f63dad66d *man/bisa.Rd
+8104993144f45c1fbe49da814cb05a41 *man/bisaUC.Rd
+18ab34ad46a2437bf0bcc89957164418 *man/bistudentt.Rd
+a55c8615d46c010bdd9d61ee81f1041a *man/bistudenttUC.Rd
+a8639663ca91af2d81b888cb59fc37ae *man/bivgamma.mckay.Rd
 81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
 44f06f92ed85ef1cf5e447ffed182989 *man/borel.tanner.Rd
 4e692566eefaedf275e8693ea2f6efbe *man/bortUC.Rd
-7bc3641f9f81a4eb77a304103e5f1dcc *man/brat.Rd
-0eaf999500ce9554156f37acbfe1e01a *man/bratUC.Rd
-b4c37774de88cd2f3f8f5e89ced2b491 *man/bratt.Rd
+b727c9787c7fcfe1e3dc19f92f6a4cb1 *man/brat.Rd
+4b158e93b6c981f016ed121e987c50b7 *man/bratUC.Rd
+5ee1485749d235a2d1aa1be8849accc7 *man/bratt.Rd
 f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
-8ecd34f0a725bf795101738a60bbb401 *man/calibrate.Rd
-483b5be2dbbd2d6281d08e730e0e607d *man/calibrate.qrrvglm.Rd
-6b6e9dd2da2d784fefb5144eb0e02818 *man/calibrate.qrrvglm.control.Rd
-ef9e501f27ab7c71b817615b21405bfd *man/cao.Rd
-e8c2f9b88e37763580bf77f68b0e8fc8 *man/cao.control.Rd
-e4b532eb5880648443b6fc60b31fbc36 *man/cardUC.Rd
-7aea0f32a547bc26d3dfaf65aab3a8b7 *man/cardioid.Rd
-288036a65bb6f386d29a99dd40e91a32 *man/cauchit.Rd
-81d694e2aea915b2d8ed6c406f517baa *man/cauchy.Rd
-2ab80616c05e7aebdcf769c35316eab1 *man/ccoef-methods.Rd
-35499ce13b26395bc61c5931d202cf24 *man/ccoef.Rd
-5985b55cbfe98a8a7d2b4de3fe3265bf *man/cdf.lmscreg.Rd
-bd25f55e6466226cb79f74482f793a3f *man/cennormal1.Rd
-d2156c3ff1e1ecaa38eaa4bbfe3649c0 *man/cenpoisson.Rd
+b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
+f1b9a1c35838eceaf41c61e06164f9da *man/calibrate.qrrvglm.Rd
+bde0c3c0dcbbd53b6a415ae3b73d2b9c *man/calibrate.qrrvglm.control.Rd
+6283590c76e5dcf1aff1e0a1314d970b *man/cao.Rd
+e270445a52d0e9e00086d4f437e2078b *man/cao.control.Rd
+af70e01bb01bebbc1d06e309d8ec6ba5 *man/cardUC.Rd
+3d662c3707b6b1e9d8dea58850a94f2d *man/cardioid.Rd
+bfe6f5beb1de5e92cbf788afff8c4022 *man/cauchit.Rd
+b4447d89801713c6c067c1d14dce2c25 *man/cauchy.Rd
+9035d92ae411d748c08d35086d5d3be1 *man/cdf.lmscreg.Rd
+94b38b26a83a96d6ab67911eaaaa8954 *man/cennormal.Rd
+4f6fb991110be4815314754a4ed4432d *man/cenpoisson.Rd
 a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
-b6cb82fa50d35036cd635f8b1a1a4ec4 *man/cgumbel.Rd
+b986ad79160959105d5a22a6d110504b *man/cgumbel.Rd
 1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
-8b159dce27c0461aa7ce49eda949f697 *man/chinese.nz.Rd
-d58b97e7b28882f689a67019139cef86 *man/chisq.Rd
+df235f073a2db2ac0bb1530b33b87896 *man/chinese.nz.Rd
+c82dda43b26d7b0e0009ed38b76ba381 *man/chisq.Rd
 8ecbb478efcf4b0184a994182b5b2b94 *man/clo.Rd
-2ebe24734ed0652482c35da374b660db *man/cloglog.Rd
+d3e192aff657835843eed8b6cb3c5fe2 *man/cloglog.Rd
 1aa6ee888bb532eef1f232c9f6a02b5d *man/coalminers.Rd
-9250590d8aae1e18953bbc50cbc922d8 *man/constraints.Rd
-7564384537e0ed18e6dcac3e0df5b32a *man/cqo.Rd
-2f595bffa2e5d997ae33fd6ca7e3f22c *man/crashes.Rd
+8a8b05c233949dd6095d4d11ff31326a *man/concoef-methods.Rd
+647dabdfe18b69f1ef4a7d08b3b5a625 *man/concoef.Rd
+cb0e57f8d57e64cd0698f6acfe494adb *man/constraints.Rd
+523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
+074850576b28ecd9d41090805c0bc9d6 *man/cqo.Rd
+8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
 e591cff73505c3e967aea2aa47a4dddf *man/cratio.Rd
-51843053ae7e7f2535986ba9fa8707e8 *man/cumulative.Rd
+f6b0e6e3ea8064c9556a773963d737ca *man/cumulative.Rd
 c909335c97a9ae26916016dfcc31b804 *man/dagum.Rd
 97868e30408a4a35750f9692f5e87b68 *man/dagumUC.Rd
-e04b86db7152a2d8633c16f07e389357 *man/dcennormal1.Rd
-fa3351f5e58b37cd7c452ee0a991d76d *man/deplot.lmscreg.Rd
-8c45fa4b18d6cfd8fec96f8071cef604 *man/depvar.Rd
-c4b52569e78545a35752e1368c2c16df *man/dexpbinomial.Rd
-6c6f8430f3c65c7ba3ce883eb2c9ad7f *man/df.residual.Rd
-d21eb844e77835fb1d6ae46a2b112a97 *man/dirichlet.Rd
-825897c6d06a47e9ac809bd2251cdb68 *man/dirmul.old.Rd
-77a420a5a6ec80e1af4ed8074d516766 *man/dirmultinomial.Rd
-844efd17a8d861d7cd173c64f1c8173f *man/eexpUC.Rd
-d512d29748153b09903ac96efa50a8d4 *man/enormUC.Rd
+8fa6a29bde444a45be31b3d8979afc00 *man/deermice.Rd
+dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
+af4a340811c4458baf556a340192208b *man/depvar.Rd
+b9edd4abba2c0772b342f440536d50d4 *man/df.residual.Rd
+87b5592713a2367b4559c84944307614 *man/dirichlet.Rd
+07eb43ee6da403b89b19ff55406ab130 *man/dirmul.old.Rd
+d5c9bd16cfcf80f66fa1af56977c236d *man/dirmultinomial.Rd
+6e6523b060c3e982a559bf73fb42a924 *man/double.cennormal.Rd
+0f57c4635e0faf9485cf7e208098ce66 *man/double.expbinomial.Rd
+f8f3e5bb715d519d3c99cc94c81bae93 *man/eexpUC.Rd
+8271f348d0cfbd2765ae2e86c745ba2a *man/enormUC.Rd
 72492c419917c144ffadc656ee56a63b *man/enzyme.Rd
-a29f442ce60d8ac8185738242b4f49ce *man/erf.Rd
-159ea23d4b4c5e3d473abf5c7f7db841 *man/erlang.Rd
-55dad4e8509a4d3522f6c06f53093803 *man/eunifUC.Rd
-607d45ed7e4eaebf6cac40c14a57eda0 *man/expexp.Rd
-f5c104469adfcf4d21cb4c8c525c0850 *man/expexp1.Rd
-391ec14ac5da161f67cb01f91bf474cd *man/expgeometric.Rd
-bba52379a93d8f2e909b579215811554 *man/expgeometricUC.Rd
-99739438b960428c5c03a25d654942e8 *man/explink.Rd
-2fbb7566f2c74baa4051e3ce849c1909 *man/explogarithmic.Rd
-347d45279f0e72bc8c2dab25ace2f28c *man/explogarithmicUC.Rd
-ac3f81c0c335c8b74b12507e1398edc0 *man/exponential.Rd
-bbd414bfb50f4be140ac6b66b29694cd *man/exppoisson.Rd
-8e5ff25491af9631e681241ed305bf94 *man/exppoissonUC.Rd
-2cb7a7ffba4a046d1205295d75d23a18 *man/felix.Rd
-0bfa97ff4d9eead46aa1a822e2c231c7 *man/felixUC.Rd
-77038da711286677c94066f9326b2a20 *man/fff.Rd
-b85c54aaade0e94059fcdfd760c23cbd *man/fgm.Rd
-0c4744ec66aa44b14f5c3dd2d79856a1 *man/fgmUC.Rd
-725193beb8ca3f28903db56ec6d50767 *man/fill.Rd
+b733cc1da9bd902ea8903b9a53cf9bba *man/erf.Rd
+0ca2068324f7898df1516fe8081c45bd *man/erlang.Rd
+3f633760a4767aae2fd8ce930befa08b *man/eunifUC.Rd
+00704fa05f7f6fcd1166a886c0a56b72 *man/expexp.Rd
+996fe6f72ef5c7097c4677153ddfce4e *man/expexp1.Rd
+779c6a5aff218b1b3daf8bd86bcd671e *man/expgeometric.Rd
+f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
+93cc460d2fd8c787aa6feaf5347f1685 *man/expint.Rd
+59e10a79028eef76da5bdc868e6bb38e *man/explink.Rd
+89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
+f2c881a921ae32035e8d41699faa7969 *man/explogff.Rd
+756267192c82e69270f3b6b44e7e7c59 *man/exponential.Rd
+8ba1a5f581e370f49e5b91e12f90e42e *man/exppoisson.Rd
+2bfab14d29e3df39995627cfed355e85 *man/exppoissonUC.Rd
+e3f13f0719fe68193454ccf6949ff5cc *man/felix.Rd
+a971e1d3759018a41544d7976de1f254 *man/felixUC.Rd
+e55a6b3e93a04e0a68d8c363595cb222 *man/fff.Rd
+78e0fe6433266ad454e0169d178aef36 *man/fgm.Rd
+bd384422574aff8eb42ba7bd55634a2e *man/fgmUC.Rd
+f935c7559c9ddbf1266b19543023e0a9 *man/fill.Rd
 b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
-6bb9c425367a0154d70bb5baa702b826 *man/fisherz.Rd
+5fd279ebc2d6ec3df74557cdca6940c0 *man/fisherz.Rd
 464a5be86b451beaef25e096cff36273 *man/fisk.Rd
 8215ca60f756bf8f9f2e3b404741fbd7 *man/fiskUC.Rd
-81d03e605f6e9bfc48c612dd6369b51e *man/fittedvlm.Rd
-e3ffaf55fb9c925685d1259eedc4fd3b *man/fnormUC.Rd
-a449dd872d994d44bb6f7986249f8784 *man/fnormal1.Rd
-80974c2814d703c1c1d4eab536f656a2 *man/frank.Rd
-e6d4221fd51756a2881065dfc303edef *man/frankUC.Rd
-d08c0b1aaf965520260ac15ad66a8d9f *man/frechet.Rd
-0e54e074f0de1b996e1f38fee8d1f844 *man/frechetUC.Rd
-3f27614050eac4ca6b793df27105cdbc *man/freund61.Rd
-2b392459d756beb1213250d266c90076 *man/fsqrt.Rd
-97b73c666866f4daa6e5be208fb7fee3 *man/gamma1.Rd
-5edcb17bbf9d4e0f7a6f96ed709b5ed1 *man/gamma2.Rd
-c0e3957aaf1b96e0a35a2ea95c023fc3 *man/gamma2.ab.Rd
-4aeaf1f465f97afa3305a6ed9dcb049f *man/gammahyp.Rd
-40973d8617d8769e4cf70b17d9b19846 *man/garma.Rd
-3013563566e6982b6e1b939e48cf9c6e *man/gaussianff.Rd
-df1c376b3ca400ad967513a8f3b1da44 *man/genbetaII.Rd
-ac349c9adadfadb8cc9a574409c22956 *man/gengamma.Rd
-bd63e15c3ac9ad8a8213d4cdc8bb3440 *man/gengammaUC.Rd
-c572a5a90988743fd046d5332bef6497 *man/genpoisson.Rd
-b1c3656df6f641f918c4e5bbd4fb239f *man/genrayleigh.Rd
-c31e093e7b6e5a4a7959ba6404b85a23 *man/genrayleighUC.Rd
-ad1646249e1de561bdd9fe261057a97c *man/geometric.Rd
+514c750201a82629ecfd0c5daf5cc9c7 *man/fittedvlm.Rd
+cd73efab4c3e718d1a77a603eb5e341c *man/foldnormUC.Rd
+c9b39250cd464b9f9f8b45abe36b4ee6 *man/foldnormal.Rd
+a039a64693a75336ca125a2f30703a38 *man/frank.Rd
+f96df0cd8d773d5152f39cf2fb12608c *man/frankUC.Rd
+4e9a55fb4be11f3464b81ab678b40d45 *man/frechet.Rd
+7d3ee6f153a63e96ec770dfedbd13204 *man/frechetUC.Rd
+6d7242b05f9006cb2a7343356d734b08 *man/freund61.Rd
+4b7619368e2cc01107743d21b7fd67fc *man/fsqrt.Rd
+da2e9cdccb5b9abe30cbe5cde43d41f7 *man/gamma1.Rd
+dabc3bedd5b206aba441d7ea23a86c4b *man/gamma2.Rd
+44261a600c3189b10f9f2e61c16ad2df *man/gamma2.ab.Rd
+5f57ead6a37803c347e759fd12fb5c96 *man/gammahyp.Rd
+68181a9850e3d8d4cd52ea5c8c45f369 *man/garma.Rd
+2907a13f1f68692ce6e621131fa0d35e *man/gaussianff.Rd
+48fa44983f28bf53d520128d8ead6954 *man/genbetaII.Rd
+68cd2f025132585e1180bf71be281b5a *man/gengamma.Rd
+795f7e16b356cea3db6294b6ed430b91 *man/gengammaUC.Rd
+fba0014f17bf3bc38466ca8ca0e952ff *man/genpoisson.Rd
+ca65498360cbe30840cfa4c9d931fb3b *man/genrayleigh.Rd
+5193d3fe8ab3e3a790247fd93a2c513c *man/genrayleighUC.Rd
+8f48d9859354c4542c32367ee99103c7 *man/geometric.Rd
 78b7d9455f1eaa4572ff54427d77935f *man/get.smart.Rd
 14a7e2eca6a27884e1673bd908df11e1 *man/get.smart.prediction.Rd
-a7cc3a8b2ab30458538d2f36279135aa *man/gev.Rd
+0e3d3c898babad94713a55cb5472243d *man/gev.Rd
 838c81d8d6c94f4f3ae49df0b25d1cfa *man/gevUC.Rd
 f87241a6011f5f5a49921a1842a177ed *man/gew.Rd
-e85bfce5bc1b53316766a1edea3f707c *man/golf.Rd
+711704243b30d0270d3ac2a51e2768a8 *man/golf.Rd
 5cc8c0cabb839b34f4f37de4b57f4428 *man/gompertz.Rd
-3affd7c0ae94702950fb738253059a68 *man/gompertzUC.Rd
-81d287969447618149d22113fa118d40 *man/gpd.Rd
+a521f6b84e19a2acd6080cdd01a538a3 *man/gompertzUC.Rd
+33a1c86c4103534119b18dfa226dd7ea *man/gpd.Rd
 54b49cf2e3ba865dc7c9297948036d9a *man/gpdUC.Rd
-3f3f9b4cb1bd341a9c4c063594516611 *man/grain.us.Rd
-21550b13a293b7e3062daf1fba963c09 *man/grc.Rd
-3ffdad5594e4eec6062097a5c7c974e7 *man/gumbel.Rd
+7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
+c8484625df61017b76ba14d9aa4759f5 *man/grc.Rd
+ebdc9bf4ecc9db057793acbf7c7b4283 *man/gumbel.Rd
 a6df41a1cc82c1744cad46ba89a5b161 *man/gumbelII.Rd
-2127127ee0e62bb2cefe05462bee7c39 *man/gumbelIIUC.Rd
-1f202bf7be31c71a9d9982b7ef477cc9 *man/gumbelIbiv.Rd
-977ee282217151a6c5b83867eab32573 *man/gumbelUC.Rd
+09d6b3c902029eeda151ea7408436746 *man/gumbelIIUC.Rd
+6e8fe2f3bce2e1f173f97fcd5f25d38d *man/gumbelUC.Rd
 fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
-c1a9370d3c80cd92d9510442da0ff940 *man/hatvalues.Rd
-bed7fbc305bb784fb723242146e2ac9a *man/hormone.Rd
-57a5f4c37dd40a74161489df6759fcd4 *man/hspider.Rd
-b9ed0e8079f4e57429b4647193c5cbc5 *man/huber.Rd
+d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
+f13c76795259bf8c257751f6bfc82995 *man/hormone.Rd
+8ef9d44522eaef45b284b7f98794d48b *man/hspider.Rd
+59409b2ff67e8099de04beb52371ad2e *man/huber.Rd
 ea67b113e21bbe6197fff2358cb47179 *man/huberUC.Rd
-b330f328e4d6f0db4928a92b30611267 *man/hunua.Rd
-cd473192d2153433bee1530bce881972 *man/hyperg.Rd
-34ba5a500d1e9395c1e6761334232c0e *man/hypersecant.Rd
-63751a4f55b918aad163a53994a01a07 *man/hzeta.Rd
-c3ca61cb9f3d309e8a08dd528de7d994 *man/hzetaUC.Rd
-1e31e772997c2b18bc113d77e1e0e176 *man/iam.Rd
-f4dd596dc646925e2c68c9679c799472 *man/identity.Rd
-3f07920de00eeb5766f5fbf545e792f5 *man/inv.gaussianff.Rd
-77d16112e2aed1f927ca1d0f4cee0a18 *man/invbinomial.Rd
+d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
+592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
+dafa920729c2512ac9fab7550f1dc2ee *man/hypersecant.Rd
+70e0d9e1f05930b5845f5ccb465c9dd0 *man/hzeta.Rd
+77b69beb073dddc46ab505277604d36c *man/hzetaUC.Rd
+dbcb8ac1c022d2a71cb8692cdf684cfc *man/iam.Rd
+941e6c172212119e9f189b447fe89b1c *man/identity.Rd
+6df749d9e38dcc7c3e9bc1ffc9e60dcf *man/inv.gaussianff.Rd
+798b9114bd27dc81b06409ecb5098ccb *man/invbinomial.Rd
 ceafec1c5c64f77d3bf0e39bee2b0277 *man/invlomax.Rd
 93c76dca757056d75f7978328608cce8 *man/invlomaxUC.Rd
 5aeacd9294068b2ea86d1f7269c56965 *man/invparalogistic.Rd
 d5b78c1484a4756f09a7f109c753626d *man/invparalogisticUC.Rd
-f70dc86e1c466a9dd45efa98a5445fc8 *man/is.parallel.Rd
+6c4bcbe8b24a63891d991d8983365008 *man/is.parallel.Rd
 a286dd7874899803d31aa0a72aad64f2 *man/is.smart.Rd
 1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
-30a15dcaa326928e71982bc7306a79cf *man/koenker.Rd
-50dded53a59735a07217074d8228393f *man/koenkerUC.Rd
-0d9800aa2eb316c662b36593ac2c74a6 *man/kumar.Rd
-8756e8c50075f92aeede56aedff7d2c7 *man/kumarUC.Rd
-7b2e3a9a2fae362f36bea1ab5539e6f9 *man/lambertW.Rd
-0c7294d5f5b568a23c2634a86a07f62b *man/laplace.Rd
-7310aca7179d6f31d9e0da64944e8328 *man/laplaceUC.Rd
-2aa7fa15b90a2e05cb9c261b192040fb *man/latvar.Rd
-a75f79d7fcb3ce0380768c06fbbf0e4c *man/leipnik.Rd
-c93045a9f05888a4675ba3d48e70e7e7 *man/lerch.Rd
+5cf973ee22fcfd1442e61458a9d91ce9 *man/kendall.tau.Rd
+690c801050d201570745a4f635ed2df0 *man/koenker.Rd
+47bca557052f9620a8bfb73e48801b95 *man/koenkerUC.Rd
+a97c9e81bf9b2ba86208a7ab334d4275 *man/kumar.Rd
+2e07c2e87f84e59aac2c1d4d6d7a3789 *man/kumarUC.Rd
+decbd103cc5311735e70d906d170c742 *man/lambertW.Rd
+3f61e79d47f859c3afc0003262113196 *man/laplace.Rd
+1e0d24321650e214570c5ee3b703a261 *man/laplaceUC.Rd
+16b21ecf83bb8fce76079502877b2fbd *man/latvar.Rd
+5345c003b2bc82b5844cb339cb609f05 *man/leipnik.Rd
+2e88465ad75446bbbccf208661193a8c *man/lerch.Rd
 8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
-13b2cc3332ac9559d5d47790a8e206e1 *man/levy.Rd
-5a35593723af5ff2e544345d4e6b868b *man/lgammaUC.Rd
-42d40282918efa270ed17f4bd3eb86a6 *man/lgammaff.Rd
-fd33ebb21f7ab741392b8c15ec54f5e4 *man/lindUC.Rd
-7ca83cec8ecb2fd661ca66bba89dc411 *man/lindley.Rd
-59375533957aa583acf12b0b44b0d718 *man/lino.Rd
-9c786943dcad40f95f4dddd3ff0f37db *man/linoUC.Rd
+465842cdf41dc2bbac523bf563060123 *man/levy.Rd
+0c6b5e56369b01507cef3729eac6290c *man/lgammaUC.Rd
+5eea7fa51644d3179618b0a0d82fa1a6 *man/lgammaff.Rd
+9e95d91d1a94e459178a738700a16499 *man/lindUC.Rd
+c3a54373cf8bd8ab360ea514a2aae05b *man/lindley.Rd
+6f035793e3afef2ae6977c22b6f69681 *man/lino.Rd
+8a4a3a1cc12bdb111c6de98ec1c45e9f *man/linoUC.Rd
 b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
-fc9016da8aeb1d1bb210ef7274f9da3d *man/lms.bcg.Rd
-111314b39e384cb6a87307d87cad309a *man/lms.bcn.Rd
-6e2e5248c45084fbcb0090b86f7f3f46 *man/lms.yjn.Rd
-0d35403673c679344da32f978a2331b2 *man/logUC.Rd
-f0502f0505925ca9d48e6e3994f278a0 *man/logc.Rd
-d962e7f739d3d752e48ceeb9d5f256c9 *man/loge.Rd
-2be2b998e9b4d3d32e72f2c9e0662273 *man/logff.Rd
-14c728f5bfd8968fc74390f1cb95dc44 *man/logistic.Rd
-8d40cf7f3736ad9219312e228348711c *man/logit.Rd
+d567e6c1a92069a8e976eab44ffd12a6 *man/lms.bcg.Rd
+0ef70d825afc1a45b5768eaca3bd88d1 *man/lms.bcn.Rd
+1547fe696e826d09308dd9dd98e7d913 *man/lms.yjn.Rd
+20824c03fc9d40f749ca42d60805124d *man/log1pexp.Rd
+5b95c393c4c558bf6b33afbbc614f370 *man/logF.Rd
+770d2f1b0efbbdd35656567250ebe941 *man/logF.UC.Rd
+22d6d79d0a45641f9b48b84e6e0c22a0 *man/logUC.Rd
+e956c4aae749e9034b7cf7fdf8661a64 *man/logc.Rd
+8c871e5697ed43662cd313fc777c2bcd *man/loge.Rd
+7b13f286faa6848dceee780f8c1ca670 *man/logff.Rd
+9e5b5e84d9fa6d0fd5661882e0465ac0 *man/logistic.Rd
+c1c9415c6f05f8e8d3e6aee71f7ea967 *man/logit.Rd
 1f63716471926cf3baae3150c94beb74 *man/loglapUC.Rd
-a570e779c1f0741c4196a0982fdeddb1 *man/loglaplace.Rd
-9217cff35cff9e9e1394d54a30a20ddb *man/loglinb2.Rd
-480a45fd3cf55ef81365ecdb397e8fe2 *man/loglinb3.Rd
-f1c11784dff391acf166a8986d434354 *man/loglog.Rd
-4c6053656b2fe0276fbe1a99b0174238 *man/lognormal.Rd
+33ee8ead6d8e9c900690ee966a056ea1 *man/loglaplace.Rd
+49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
+a569b31d918209e8b54a62e8594a3268 *man/loglinb3.Rd
+f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
+a6cf3e7329e66d0780f0318f4b53355f *man/lognormal.Rd
 e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
 5ce7aa8f16e81795577cc553d40a1e9c *man/lomax.Rd
-9281fd7fad7d154a35ae0534cf4d2e3b *man/lomaxUC.Rd
+c551ab73c0874a6bdcd66a569897b050 *man/lomaxUC.Rd
 950443559c152cc441b4b08dd5c7e12e *man/lqnorm.Rd
-3f48084e64cd4663677fc8df8e4ecf3d *man/lrtest.Rd
-c066460c787fa701788c400e56edbf80 *man/lvplot.Rd
-8b3ee5b0b1b1ec9659882b0d75a786bc *man/lvplot.qrrvglm.Rd
-30f7cce914cf36078392189f12c0670e *man/lvplot.rrvglm.Rd
+fafc126c62f806baebf8dcf1b3adac17 *man/lrtest.Rd
+f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
+19a0bb0240f8f5bdb5e1729806a4a82c *man/lvplot.qrrvglm.Rd
+0e27d1c327ebf057f1eef3e243accc47 *man/lvplot.rrvglm.Rd
+c5760c3960748f906230ded119478271 *man/machinists.Rd
 6fab686982d148f43e04ca4674dd14cf *man/makeham.Rd
-f459ac6b3f9453e0fb6cf4dfce393b64 *man/makehamUC.Rd
-a836cdea396e90233979a1065e9aa401 *man/margeff.Rd
+c01957cac49ff8e3444d143901efab18 *man/makehamUC.Rd
+c35da03ffb7149d76ddbbf6747964c70 *man/margeff.Rd
 b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
-eae8c8d703abffa56be56cc88743822c *man/maxwell.Rd
-1fc207ea724c1fb681dc0805733571ba *man/maxwellUC.Rd
-ad6f24fe862c9936ea99033ba89d4fcf *man/mbinomial.Rd
-d0ba1cb515890aa57df222840a8ba7d4 *man/mccullagh89.Rd
+ce0b52f5d9275e79be867d5e472155bf *man/matched.binomial.Rd
+3e71e728ec202a03204fd81f6411c881 *man/maxwell.Rd
+0d3df98163de7b80cc3c600a791792c7 *man/maxwellUC.Rd
+1912321bc56a20e92697651612a5c185 *man/mccullagh89.Rd
 4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
 3b5d203389f18b3847122d3a78152f21 *man/micmen.Rd
-49ed6c8e6d160b323f1f2acd75d5daec *man/mix2exp.Rd
-2a272b10b746642a9ee5bbc6cbfc9511 *man/mix2normal1.Rd
-908970d91303cee973dba82825fabd4b *man/mix2poisson.Rd
-6cc2c2af7e4107aebccbe4809d649033 *man/mlogit.Rd
-e41c539196b04b87d33595a73efef01d *man/model.framevlm.Rd
-73bc45aa0257f78953611c9fb6daba39 *man/model.matrixvlm.Rd
-85d73b769924c10e91065f87bf237fb7 *man/moffset.Rd
-7184b188c705a6e326e454f859e76f1b *man/morgenstern.Rd
-21bb447049798227c4080791cb1157b3 *man/multinomial.Rd
-0ef36351d132ce1f91580c5f71237f39 *man/nakagami.Rd
-c69bfdd1afbf8ea05b2d37c27f2b097b *man/nakagamiUC.Rd
-498f65c2a4248ef79d9d8ceaef534069 *man/nbcanlink.Rd
-cf0351ecf6456216e465895afff76ad7 *man/nbolf.Rd
-5f085d3658315ecf2f70d91b422d1baa *man/negbinomial.Rd
-0b6168d2b3d79f02d51dc1f185ad7d35 *man/negbinomial.size.Rd
-70653b46108e5e99fcc5b23b7fe97dda *man/normal1.Rd
-29a2a7258f41ef47450d2de1c261ae87 *man/notdocumentedyet.Rd
-dd58e372f599256d80973bc07c85597b *man/olym.Rd
-0c48bfcd8e3d21e919b3c0f55fd4d8e2 *man/ordpoisson.Rd
+fa9997a45317c4c489bbeb2dad5a4624 *man/mix2exp.Rd
+4f8db594bdcf9cd9e794054ca3e4bd95 *man/mix2normal.Rd
+8cce6252ede9ce339d092542422af715 *man/mix2poisson.Rd
+3916d708fd16ec091b1b85e6eb8ef7fd *man/mlogit.Rd
+a8e171aca3ff63d12fdfd97587a81734 *man/mmt.Rd
+0ba11a09fea865c9841a25948bb4d381 *man/model.framevlm.Rd
+3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
+199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
+a3a01c06a13da4347808bf8c711c6408 *man/morgenstern.Rd
+4cf5efc13bf771e48e16d9fca9634ed7 *man/multinomial.Rd
+01967c3669be9b38d543aa9b4927a4ad *man/nakagami.Rd
+dab44218c0733703c125d8741f07bb80 *man/nakagamiUC.Rd
+9149385b6c2733a76575b094267b9a8f *man/nbcanlink.Rd
+798f2e547a94356359c3d50a57ccef17 *man/nbolf.Rd
+ff125ef7b5a04871d130ab037315e68b *man/negbinomial.Rd
+9f830407df57526fcdbff0044cf54609 *man/negbinomial.size.Rd
+484e10fe67dc21bf6626de4e3d0f941f *man/normal.vcm.Rd
+d1c18da23bbd768038a150e09d7d2920 *man/notdocumentedyet.Rd
+d361e050435d7a4e64474487ecfd782c *man/olym.Rd
+858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
 025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
 24a97e3b9709df47d079f4e2665f497b *man/paralogistic.Rd
 2fc2cf7200b0f4409471aa2e584168a3 *man/paralogisticUC.Rd
-85ba1e6e60fa44f0f79e789bab5616d3 *man/pareto1.Rd
-7d6736ddbbfb94188b43ee784cba88a7 *man/paretoIV.Rd
-00859ab21f1eb0d605d68c2ad78c771c *man/paretoIVUC.Rd
-96c9b961001987506c9e736f053ac2d6 *man/perks.Rd
-e03cf5b8c36eb729c3f9ab0f1520d505 *man/perksUC.Rd
-e3241c34fea9817fe468c92eaeb8ca65 *man/persp.qrrvglm.Rd
+725a5efd172a9dda442a25b138ee2486 *man/paretoIV.Rd
+d0228dcb5ba3bd2a99272100a401c989 *man/paretoIVUC.Rd
+4cdffa085b488c20e9f622b5b211cc25 *man/paretoff.Rd
+f2d5a9aea90051e6675c2701bc58300d *man/perks.Rd
+a3658af3f9766a5ce0dfc20aebdf3186 *man/perksUC.Rd
+e539c4c35e170d0216201a6a002ab5b5 *man/persp.qrrvglm.Rd
 a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
 8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
-b6d928375ee9738785be7ec7fa66d277 *man/plackUC.Rd
-06966c021b6214237508543c52109d57 *man/plackett.Rd
+8ca9de18625c08de9d4acfa8001c7ca3 *man/plackUC.Rd
+bcda813e9efa01ebeff0c6db8fec5b2b *man/plackett.Rd
 791d04a5c3a3bc514bf0ed1fc639f8ab *man/plotdeplot.lmscreg.Rd
-e6eaf56a6f7b23ede6cbd92dbce502ed *man/plotqrrvglm.Rd
+d4e06a919cfb634b706b8fbb159c2409 *man/plotqrrvglm.Rd
 958dcd119ee66e5d5318c4cf19f024f8 *man/plotqtplot.lmscreg.Rd
-45ee1e3b4fe0a2577f5ea8732f1db0f8 *man/plotrcim0.Rd
-613de2bdef6aabc49d265fd1f9ee3648 *man/plotvgam.Rd
+51e5a6c384fa6d24af24bbea491b0323 *man/plotrcim0.Rd
+8ab7d1738b0a3ad0a1d727d9ae158ef1 *man/plotvgam.Rd
 72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
+652ff39d9d0920ed5e728c8ed5fcc013 *man/plotvglm.Rd
 bbe8bffd4bcfa945d9573d135bb543f3 *man/pneumo.Rd
-9f2d37ecfc67140980a2870d0101f743 *man/pnorm2UC.Rd
-8a2b05c37dc154659b9783eea0c5808b *man/poissonff.Rd
-dab0255f3b6f88ca8362af2570311a2e *man/poissonp.Rd
-fe262a77e1fef4fd1c795b198d040bda *man/polf.Rd
-2b1a116706ced6399a4248853e001d89 *man/polonoUC.Rd
-43997b2ec625ae0093dc7485034085bc *man/posbernUC.Rd
-00637f43cacf2b2fe91af295fe378a66 *man/posbernoulli.b.Rd
-f05048b373dfce9317ddbabb088ef0f1 *man/posbernoulli.t.Rd
-392ccdfd3c141d7654aa10bac5136d04 *man/posbernoulli.tb.Rd
-8953a5a5559f58d0ebbabb3b0e50ba99 *man/posbinomUC.Rd
-cfdbefc16cb1001c3027fedd64e65f66 *man/posbinomial.Rd
-6ec345e5d20c36bdde7b7d09c9b71893 *man/posgeomUC.Rd
-d14c926ed9841f43e6ace38ca9a7529f *man/posnegbinUC.Rd
-ac1f3ebc8db196c11356963d4f82d509 *man/posnegbinomial.Rd
-4d39085d9df2a816cce2efdc10af0825 *man/posnormUC.Rd
-7b1ca086982454d5cedb01496c8c8cdd *man/posnormal1.Rd
-bfa5a34fbeeca1ee107e2fc332f1ec1a *man/pospoisUC.Rd
-c33e0546ca2429e1a4bcb9a56ef992e7 *man/pospoisson.Rd
-2fdf20b0d607f422c2b01ea15f271652 *man/powl.Rd
-f5ca83cbbe57ce6a7e98a0318ddc6aac *man/predictqrrvglm.Rd
-ee617c9486f9db20894440ae145b1cf9 *man/predictvglm.Rd
-f1cf2e37dcc09fba04770ecb055cf646 *man/prentice74.Rd
-f26232b73e5f0c2f323d019ba9e46ada *man/probit.Rd
-811cfe4a15b3b140c48d930e3172a195 *man/propodds.Rd
-ccdfc3f7df34475385a243eae0ab5877 *man/prplot.Rd
+f8662b37b320d93d8ed7a3cb34bff24d *man/poisson.points.Rd
+f3822f18acf678b31e8e22bcc749af59 *man/poisson.pointsUC.Rd
+8521dea982d091e7e1693649fa6a00dd *man/poissonff.Rd
+c0578de27756a8b6912b7940f2de96e5 *man/polf.Rd
+696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
+037cd4e84fd5a18732c784b71f3bb649 *man/posbernUC.Rd
+abc93018c379e95040c4f6e21b1526f8 *man/posbernoulli.b.Rd
+3d9d9160e337b4f988bb9254a705c939 *man/posbernoulli.t.Rd
+cb4ac0ab64c6d3a49650e655ae90db11 *man/posbernoulli.tb.Rd
+7657ea90758d0a212597516214ece468 *man/posbinomUC.Rd
+5897f14bdd81eb3e3ad39c4a91b8bc3f *man/posbinomial.Rd
+bda3e76da50396ae5dcf4f318ec7cfb4 *man/posgeomUC.Rd
+a0e8c366513c21a4d84651b7abff9033 *man/posnegbinUC.Rd
+ed57599c85659a342ec65848f9848806 *man/posnegbinomial.Rd
+c593f447dea64e4e0395ec3629efe430 *man/posnormUC.Rd
+ed1743d16c3327574349781fe338bcc7 *man/posnormal.Rd
+137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
+91a5bc77597943b0c6d75fbf3658d1d6 *man/pospoisson.Rd
+f35d86d08cb2181e69403304101af4e7 *man/powerlink.Rd
+af139c6afa9ed0d34045609975dca53f *man/prats.Rd
+9ca451a2f4a9739a455beafc898d0c42 *man/predictqrrvglm.Rd
+6a2efe9d46b7c686e320469698f9c1c7 *man/predictvglm.Rd
+db3e0667302072ff2451204e735a1952 *man/prentice74.Rd
+37a3381ed799347600ffec8e42e20e3c *man/prinia.Rd
+d1b88140c378a21755511fb4a6ae6bce *man/probit.Rd
+d5531627b3f539ed90d41705b46a72ed *man/propodds.Rd
+dcb96910494ff4a7a8fee201557e5aa6 *man/prplot.Rd
 de570e252375d7052edaa7fb175f67eb *man/put.smart.Rd
-9e2f7efa937bc97a63800de0afe9455c *man/qrrvglm.control.Rd
-ddfc6463c5266b7dd79c7a7e9d3f8f6c *man/qtplot.gumbel.Rd
-7894f8d45225244008021bd30565ea32 *man/qtplot.lmscreg.Rd
-eb986116765a0a7229e0988a343f1b6b *man/quasibinomialff.Rd
-c2efda0141a3df852b775baa18af0c7a *man/quasipoissonff.Rd
-67da92796b1e1d1f8866fee2c8cf4954 *man/rayleigh.Rd
-02bfbc64253593edfa891a19f33acd89 *man/rayleighUC.Rd
-ff8c88be946408af6bf1b0931033ee4d *man/rcqo.Rd
+ea988b454e438711d9e7c9cb46e69925 *man/qrrvglm.control.Rd
+0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
+0cbead5889047f9a0a73d9c753ef955e *man/qtplot.lmscreg.Rd
+2d496ded26329ff563f7d838c1f6a2cd *man/quasibinomialff.Rd
+3453476890320828ffadfdf9a7171fcd *man/quasipoissonff.Rd
+6691fe12d23149a7c28a75a62230b2d2 *man/qvar.Rd
+2b0e10b17a1b309556eb8431149cba54 *man/rayleigh.Rd
+8d466449dd9801fe604e7c8e86f8f966 *man/rayleighUC.Rd
+ac35007b1dc2625a01d199b8c7f49503 *man/rcqo.Rd
 1d9601bd76b8c0cddcf567b144b5ef89 *man/rdiric.Rd
-385bd032acb1f2925c49a7748dcb8631 *man/recexp1.Rd
-2af6888fb0758a9fdaf45fc72f844724 *man/reciprocal.Rd
-d3f671ea06066c9bee61317ace112d66 *man/recnormal1.Rd
-9389504a7c7716cb9b183322290b504e *man/rhobit.Rd
-b70c93ab6124de167a4ccab2f8fc2221 *man/riceUC.Rd
-7471692a618c57fe5f5137deadaef4f7 *man/riceff.Rd
-5cfc734589e404f286ce8cda342344bd *man/rig.Rd
-258a5e119f601399b04d4dc51ce2e4ef *man/rlplot.egev.Rd
-801fbf593c190957e8abd87b1a5bbbdf *man/rrar.Rd
-ae184e5777e6d580e7200434a99744e2 *man/rrvglm-class.Rd
-8ba13aec3e907579d7009e2f648daefc *man/rrvglm.Rd
-df2e65a3466384528c48da00a8dd7293 *man/rrvglm.control.Rd
-493070deddef6815cdd2de211f3a65db *man/rrvglm.optim.control.Rd
+e0d655cedebcefe5aa661cf4036b09d6 *man/recexp1.Rd
+49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
+42d285fa089073e392d9c62dd14cf6c0 *man/recnormal.Rd
+a56ddce8598af2320fdadb94c42a9b24 *man/rhobit.Rd
+a35866590c58ec9deb162a0d900bd1f5 *man/riceUC.Rd
+eb9e85df130898be3aac04e146eb0a9c *man/riceff.Rd
+f3302a07fa476f2870dce74a90b1cb77 *man/rigff.Rd
+a0001cabc868c3d8f82232283338499f *man/rlplot.egev.Rd
+3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
+b766f22051e15e2bdf9394746c04e072 *man/rrvglm-class.Rd
+e39a026bc1841c5efacc1a95ca0d788a *man/rrvglm.Rd
+7aa0211643b3345131b2704c71b4404c *man/rrvglm.control.Rd
+59d8b84425a1ce32215e150773386617 *man/rrvglm.optim.control.Rd
 ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
 850477e7023b0617c4dd9bf177881736 *man/s.Rd
-3e48779e7f6cb3965b6b97a3cc6c840c *man/seq2binomial.Rd
+c5192ce3ce27b4a32088eae60a9647b1 *man/seq2binomial.Rd
 71367fe3b494a45c98f9a96e1fd791e0 *man/setup.smart.Rd
-22fd8f8f7a559acaecfbca2c6dbe5818 *man/simplex.Rd
-7cdf80a6cdb171d1f6f9ae200422b159 *man/simplexUC.Rd
+29b795708516a3cc440b5ddb50425004 *man/simplex.Rd
+f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
 4d13e6cf2248dde66a69216540cd2e87 *man/sinmad.Rd
 754b3dbc268f1df1bf8f675da6a2ebf8 *man/sinmadUC.Rd
-8555a29368f14ba2a2ead5344f4ae716 *man/skellam.Rd
-4cdec195b127858706897733934dffc4 *man/skellamUC.Rd
-094fd596b913d88f9941bb26396d4b72 *man/skewnormal1.Rd
+8b7bce1ea79b811ba476d4fab9866530 *man/skellam.Rd
+44b30a23c850108381d7b76c575377eb *man/skellamUC.Rd
+9f648ab47f83275ed69c036a6bd650de *man/skewnormUC.Rd
+d7b412af782b794441e67dbc9848a6a0 *man/skewnormal.Rd
 0c30d059794a31ec06e43da1590496cc *man/slash.Rd
 9d45778b7f284934351777b4b9686c50 *man/slashUC.Rd
 1ed10e28c013e2e08ac5f053b2454714 *man/smart.expression.Rd
 163cdb3e4a225aceee82e2d19488d56e *man/smart.mode.is.Rd
 2b68a9e20182e8892bb7be344e58e997 *man/smartpred.Rd
-d48e1e2fa242ba626e652480e84b0a43 *man/snormUC.Rd
 3849f780d823a1a0aa67bb65ac35510e *man/sratio.Rd
-3fb3e5774481ff1af1ab3dd012fd37c0 *man/studentt.Rd
-2228be8da02861f85cd2bf77d409333f *man/tikuv.Rd
-c0a24f0780ee14e1aadcf261ccf2d80b *man/tikuvUC.Rd
-caedfadbe16b9c5e83dc81c74ba4e20d *man/tobit.Rd
-95db69c0da2ceff7fcb86d6893a861c9 *man/tobitUC.Rd
-5e27256f78d67206249604fee70af378 *man/toxop.Rd
-dd9c86342f896f1b28763fe16a615910 *man/tparetoUC.Rd
-39423c1ea32c5ba0d4286b815ad2712d *man/triangle.Rd
-a262cd49e16acd6fb583cb2aa0fc5a94 *man/triangleUC.Rd
-304a7f28494e6f4a3f6e6bb42d02671f *man/trplot.Rd
-df89cf9f2a94441eaf3d8d625dc992eb *man/trplot.qrrvglm.Rd
-5ddf60a47daa1dde214b91ca9dd7df6d *man/truncweibull.Rd
+db9ae337a136caa28914e5eeea6ae0a6 *man/studentt.Rd
+d033dcd015105ccb015ce263c55adf62 *man/tikuv.Rd
+dc0ae67e1d293040bf2d088e9bd4945b *man/tikuvUC.Rd
+ce3b83e22114d609561ad80353be4f18 *man/tobit.Rd
+53d34225f637b8e3cf35242dc436078b *man/tobitUC.Rd
+8abb5eb3e6670f618e91babde7b396f2 *man/toxop.Rd
+507517bbd56e61a591f43ffff0e1f018 *man/triangle.Rd
+b35739c390fd5566b8851cd070b09492 *man/triangleUC.Rd
+0911220727ac754cb2e370b72c1ba88b *man/trplot.Rd
+c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
+d77a2419400b9ae1059949803b8a1dd2 *man/truncparetoUC.Rd
+686b7a6a1920375d43a8071a22f86e48 *man/truncweibull.Rd
 50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-0f938e4ad276b59e46cabc77a2f8e79f *man/undocumented-methods.Rd
-89ca278b0ede1400678b3525f178aa03 *man/uqo.Rd
-f63e291da13f8a3c89a60e7b174ccd67 *man/uqo.control.Rd
-9ffc09b8e1bca4fe6e4c298e4537adbd *man/venice.Rd
-5d0f6c9e067bd6e7d44891427c0b47ff *man/vgam-class.Rd
-bb56e57215c669e19712b2f3a583172a *man/vgam.Rd
+4ca96b163b2b9c931a39a07f25d040a3 *man/undocumented-methods.Rd
+7e0c6b38a66c0b4feca9420717e08f37 *man/uninormal.Rd
+f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
+ecf0058b783f675c77a3ca1e5ab1a90a *man/vgam-class.Rd
+129c86ebc03ed114be4d836b984fb5e2 *man/vgam.Rd
 c059eb2c3a2c325bd3b9498abe0a5d46 *man/vgam.control.Rd
-3901a430c138688b96027a1c8a96c4fd *man/vglm-class.Rd
-cf27a581829d8d7081e55ebffb0dfecf *man/vglm.Rd
-e9971e040dd16e21b4f4445dcf288faf *man/vglm.control.Rd
+12fd658b80fd45359d027177e43cb3a1 *man/vglm-class.Rd
+f1e74e2eca2768bdd9c0c132c2393266 *man/vglm.Rd
+e1716bf18db44df404475959a6a122a5 *man/vglm.control.Rd
 a8508ebb5ce0d2fed90d3e9e1d081455 *man/vglmff-class.Rd
-9d43253faca810a9baa7f654ac7792b3 *man/vonmises.Rd
-77f9be156a1a59c429db0e480eff0f37 *man/vsmooth.spline.Rd
+b577458c794529beca42f1af8531172e *man/vonmises.Rd
+7787a423c41dec21ed7c4440288ef9b7 *man/vsmooth.spline.Rd
 c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
-e4d3a522ebb0edad3f9f8261d8f40d93 *man/wald.Rd
-b3e006846209fa329deadfc18aab6c9d *man/weibull.Rd
-e3068604e1a1986a32e83c891782a70a *man/weightsvglm.Rd
-a361b06e43268acba1a3ec3f81fd65cd *man/wffc.P2star.Rd
-c4bad409f04a155d39f12f93d489849f *man/wffc.Rd
-48a51ab0fa73a56e7206d44760639788 *man/wffc.indiv.Rd
-a0b29acd25cad083c4bc7ccfa491885e *man/wffc.nc.Rd
-2cf0ef83f7ff09796fbb1f357ac6da61 *man/wffc.teams.Rd
+6b73eaac72f0b349d162f4480684577a *man/waldff.Rd
+dbcdaf4674e14f3e2de11c0fc937a38a *man/weibull.Rd
+0c5747e9524dd180f278210ab769e535 *man/weightsvglm.Rd
 655258cff21a67e1549b204ff3d451a5 *man/wrapup.smart.Rd
-bcb9181a6ca8398fefd44de6552a8938 *man/yeo.johnson.Rd
-e3116eb4708dc7d3a6afdb76e3705284 *man/yip88.Rd
+622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
+d99d82fda3a3f6ab855da4281771753f *man/yip88.Rd
 21a90fbde0228b4e74bba93b50300b54 *man/yulesimon.Rd
 a6128b966f2d5d6df5f36b11bc2c3607 *man/yulesimonUC.Rd
-702b59c0ff9a17b02e63efbe7451ef34 *man/zabinomUC.Rd
-2a4b6a8e46e7fdcc896c4a291d5c2e81 *man/zabinomial.Rd
-7fdb1e52df331edbf0e234b7f455a9e0 *man/zageomUC.Rd
-91a61e2550e8daa836931fcdf23dd8d9 *man/zageometric.Rd
-cbc82d4435bdb4bcf8d8c4a2d5a9e483 *man/zanegbinUC.Rd
-a214209935a1a86d8129d38fe37cc05c *man/zanegbinomial.Rd
-ce015717ce27f27018754d67e3316957 *man/zapoisUC.Rd
-035de7769a8dabd54be20e64592e0bd4 *man/zapoisson.Rd
+b7f37725c67ca8dc93fe767086bfb8e5 *man/zabinomUC.Rd
+6b1e7a5dcefe1eedbf2e7b05989866ce *man/zabinomial.Rd
+7d16a3a8e022ae6d78e55a77e4855241 *man/zageomUC.Rd
+94a6ea72fac92e624106a82761f6c8f9 *man/zageometric.Rd
+ab85bfd95caf0171ec7c515309d7c74f *man/zanegbinUC.Rd
+107900993ddcc02efe5d47b2463d5ddd *man/zanegbinomial.Rd
+14b74bf11cf34181db7919c2f2727d52 *man/zapoisUC.Rd
+1fa7c1787639131cb6d157afdd69a3e0 *man/zapoisson.Rd
 61cce538df41d42d6e5daf8f37635527 *man/zero.Rd
 7985338d08e88fa23cce9cc0a09724b6 *man/zeta.Rd
 e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-86813485832ea3097bccb17a30752861 *man/zetaff.Rd
-2dcc3a027d670144db7a96b4ccf48949 *man/zibinomUC.Rd
-e012ae5e25cc15fdfba42f127bedf773 *man/zibinomial.Rd
-eac0a99dd131fe06d3ed428eb3f4c515 *man/zigeomUC.Rd
-9ea946fdd3d0189c4d634cfb48dd1f06 *man/zigeometric.Rd
-5a3c5dfb9a9340b0cbd930e1c3c30ad0 *man/zinegbinUC.Rd
-243a21fd3b1684694bfae65502ad9c2e *man/zinegbinomial.Rd
-89d598976784c12c45db5af25d1bc66f *man/zipebcom.Rd
-e8e65cb1b0a3b7ae3bfb81222966024d *man/zipf.Rd
-15d3e6361ff82acece70960b06e13d1b *man/zipfUC.Rd
-e06712314cd3b09f403cfd0aea0b4b31 *man/zipoisUC.Rd
-ccbd33a607fe455f79a9d3248234ac35 *man/zipoisson.Rd
-4aaf5efcfbcf1bdf32b13f632ac3ed0f *src/caqo3.c
+aa30ce673db2dd8df0fe47d43305e6c2 *man/zetaff.Rd
+2a95549db11962cd7175b63cd62fd850 *man/zibinomUC.Rd
+82966f9946785f1c1208ac604b796cde *man/zibinomial.Rd
+cf47526db95bc439da054ac97d2da36f *man/zigeomUC.Rd
+228eb0e6ce7d0cafeb4fa171a858d08a *man/zigeometric.Rd
+b4d704d064746b54f31f7d3d5c7e71c8 *man/zinegbinUC.Rd
+be4a96e387bdaee0622eab5af4547ec5 *man/zinegbinomial.Rd
+aafae05baaa222dc5f54b30553e30caf *man/zipebcom.Rd
+de0330d5599faa509ea5038ab38a7eb2 *man/zipf.Rd
+e83338d61c48dfc56179cf190ec05006 *man/zipfUC.Rd
+0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
+ff3ba2c8f8ad4fb8bd0105e21f944396 *man/zipoisson.Rd
+f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
+ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
+8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
 77ed63cecc681dfebc94a028d0cfc996 *src/fgam.f
 f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
 dc1ca5b4e9a67b6d48c25e7107112d9c *src/lerchphi.c
-9dd33afbac4653b7d8bdbd2794b9c262 *src/lms.f
+c54afdee58cf86ecaf1072c492b49001 *src/lms.f
 9cfd5e51c2dba024afc28b0fffaece4a *src/muxr.c
-1f51508edc95c9a11a4443d19ef759af *src/rgam.f
-ef267a93286cc6c6464fd50192ec0702 *src/rgam3.c
+65ef45ba4e422c33db9848bb549ea93c *src/rgam.f
+bd461ca234f78bf7313a986ad9cdcd4b *src/rgam3.c
+6aee7dc8f242ea6e9446ade5b7edeee5 *src/specfun3.c
+4814bb73b4c3eedc7507ad99511c7dc5 *src/tyeepolygamma.f
 10939d9fb380d54da716a835d37fdf75 *src/tyeepolygamma3.c
 79cf39f1d83f25e29a6c56d344ea8d76 *src/vcall2.f
 83c304cbbe3f0a9bfbe7ab5aa0eefd4e *src/vdigami.f
 3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
-cc72ffc1acb79e253cc97fbe2608e9ed *src/vgam.f
-5d87230c617938f7ed3e71123c30a160 *src/vgam3.c
-f910910e33c21855f63634e4e9a99903 *src/vlinpack1.f
+3046b06e0ff0de2724a8c1d57d2f21c7 *src/vgam.f
+b6bf432138f1f11cef21ef473cac82d9 *src/vgam3.c
+bbb4ca20dcf50cd985b411b9a65b68f2 *src/vlinpack1.f
 80c0a0f512ae74ecbed144c5f115fb16 *src/vlinpack2.f
 e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
-9e424b144361fdaa0d8573729df1d442 *src/vmux.f
-0317d171d3fa308b4e19e2c386341945 *src/vmux3.c
-d5c3783cc318a8e1c0b7aafcf5849dee *src/zeta3.c
+4510a716373a71c3f2da66ddb4d39267 *src/vmux.f
+df3bc03743117dbb36e3a74f6ccb9d21 *src/vmux3.c
+b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
+ab0ec8b49daf41071e1636bb52b71294 *vignettes/categoricalVGAM.Rnw
+e4c5415e487f533b70695b17e40d97bc *vignettes/categoricalVGAMbib.bib
diff --git a/NAMESPACE b/NAMESPACE
index 75c0eb1..4f3c56b 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -7,6 +7,35 @@
 useDynLib(VGAM)
 
 
+export(family.name.vlm)
+export(family.name.vglmff)
+exportMethods(family.name)
+export(logLik.qrrvglm)
+
+
+
+importMethodsFrom("methods")
+
+
+
+importFrom("stats4", BIC)
+exportMethods(BIC)
+export(BICvlm)
+export(check.omit.constant)
+export(I.col)
+
+
+
+export(dbiclaytoncop, rbiclaytoncop, biclaytoncop)
+
+
+export(bistudentt, dbistudentt)
+export(dbinormcop, pbinormcop, rbinormcop, binormalcop)
+export(kendall.tau)
+
+export(expint, expexpint, expint.E1)
+
+
 export(pgamma.deriv, pgamma.deriv.unscaled, truncweibull)
 
 export(binom2.rho.ss)
@@ -43,8 +72,8 @@ importMethodsFrom("stats4")
 
 
 
-importFrom(stats4, AIC, coef, summary, plot, logLik, vcov)
-exportMethods(AIC, coef, summary, plot, logLik, vcov)
+importFrom("stats4", AIC, coef, summary, plot, logLik, vcov)
+exportMethods(AIC, AICc, coef, summary, plot, logLik, vcov)
 
 
 export(npred, npred.vlm)
@@ -81,10 +110,10 @@ export(Confint.rrnb, Confint.nb1)
 export(vcovrrvglm)
 
 
-export(posbernoulli.b, posbernoulli.t, posbernoulli.tb, aux.posbernoulli)
+export(posbernoulli.b, posbernoulli.t, posbernoulli.tb,
+       aux.posbernoulli.t)
 export(N.hat.posbernoulli)
 export(dposbern, rposbern)
-export(posbern.aux)
 
 
 
@@ -113,7 +142,7 @@ variable.namesvlm
 export(expgeometric, dexpgeom, pexpgeom, qexpgeom, rexpgeom,
 genrayleigh, dgenray, pgenray, qgenray, rgenray,
 exppoisson, dexppois, pexppois, qexppois, rexppois,
-explogarithmic, dexplog, pexplog, qexplog, rexplog)
+explogff, dexplog, pexplog, qexplog, rexplog)
 
 
 
@@ -121,7 +150,7 @@ explogarithmic, dexplog, pexplog, qexplog, rexplog)
 export(Rcim, plotrcim0,
 rcim, summaryrcim)
 export(moffset)
-export(plotqvar, Qvar)
+export(plotqvar, Qvar, qvar)
 export(depvar, depvar.vlm)
 
 
@@ -138,7 +167,6 @@ export(
 d2theta.deta2, Deviance.categorical.data.vgam, 
 lm2qrrvlm.model.matrix,
 m2avglm, 
-dotFortran, dotC,
 dimm)
 
 
@@ -151,8 +179,10 @@ put.smart)
 
 
 
-export(
-dbinorm, binormal)
+export(dbinorm, pbinorm, rbinorm, binormal)
+
+
+export(pnorm2, dnorm2)
 
 
 
@@ -165,52 +195,52 @@ export(iam,
 fill, fill1, fill2, fill3,
 abbott,
 amh, damh, pamh, ramh, 
-bivgamma.mckay,
+bigamma.mckay,
 freund61,
 frechet2, dfrechet, pfrechet, qfrechet, rfrechet,
-frank, dfrank, pfrank, rfrank, 
+bifrankcop, dbifrankcop, pbifrankcop, rbifrankcop, 
 plackett, dplack, pplack, rplack, 
 benini, dbenini, pbenini, qbenini, rbenini, 
 maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell,
 morgenstern,
-dfgm, pfgm, rfgm, fgm,
-gumbelIbiv,
-erf, erfc, lerch, lambertW,
-tpareto1, dtpareto, qtpareto, rtpareto, ptpareto,
-pareto1, dpareto, qpareto, rpareto, ppareto,
+fgm, dfgm, pfgm, rfgm,
+bigumbelI,
+erf, erfc, lerch, lambertW, log1pexp,
+truncpareto, dtruncpareto, qtruncpareto, rtruncpareto, ptruncpareto,
+paretoff, dpareto, qpareto, rpareto, ppareto,
 paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV,
 paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII,
 paretoII, dparetoII, qparetoII, rparetoII, pparetoII,
 dparetoI, qparetoI, rparetoI, pparetoI,
 cgumbel, egumbel, gumbel,
 dgumbel, pgumbel, qgumbel, rgumbel, 
-fnormal1, dfnorm, pfnorm, qfnorm, rfnorm,
-cennormal1, dcennormal1,
-recnormal1, recexp1,
+foldnormal, dfoldnorm, pfoldnorm, qfoldnorm, rfoldnorm,
+cennormal1,
+cennormal, double.cennormal,
+recnormal, recnormal.control, recexp1,
 cenrayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh, 
 drice, rrice, riceff,
 dskellam, rskellam, skellam,
-inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, wald,
+inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, waldff,
 expexp1, expexp)
 
 
 
 
 export(A1A2A3, a2m, AAaa.nohw,
-AICvlm, AICvgam, AICrrvglm, 
-AICqrrvglm, # AICvglm, 
+AICvlm, AICvgam, AICrrvglm, AICqrrvglm, # AICvglm, 
 anova.vgam,
 anova.vglm, 
 bisa, dbisa, pbisa, qbisa, rbisa,
 betabinomial.ab, betabinomial,
-dexpbinomial,
+double.expbinomial,
 dbetabinom,    pbetabinom,    rbetabinom,
 dbetabinom.ab, pbetabinom.ab, rbetabinom.ab,
 biplot.qrrvglm,
 dbort, rbort, borel.tanner,
 care.exp,
 cauchy, cauchy1,
-ccoef.cao, ccoef.Coef.cao, ccoef.Coef.qrrvglm, ccoef.qrrvglm,
+concoef.cao, concoef.Coef.cao, concoef.Coef.qrrvglm, concoef.qrrvglm,
 cdf, cdf.lms.bcg, cdf.lms.bcn,
 cdf.lms.yjn, cdf.vglm, 
 Coef.cao, Coefficients,
@@ -220,18 +250,18 @@ coefvsmooth.spline, coefvsmooth.spline.fit,
 constraints, constraints.vlm, 
 deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
 deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm, 
-deviance.uqo, deviance.vglm, deviance.vlm,
+deviance.vglm, deviance.vlm,
 df.residual_vlm,
 dirmultinomial, dirmul.old,
-dnorm2,
 dtheta.deta)
 
 
-export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logit,
-       logoff,nreciprocal,
+export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logneg,logit,
+       logoff,negreciprocal,
        probit,reciprocal,rhobit,
        golf,polf,nbolf,nbolf2,Cut)
-export(ordpoisson, poissonp)
+export(ordpoisson)
+export(poisson.points, dpois.points)
 
 
 
@@ -239,7 +269,7 @@ export(ordpoisson, poissonp)
 export(m2adefault, 
 erlang,
 dfelix, felix,
-fitted.values.uqo, fittedvlm, fittedvsmooth.spline, fsqrt,
+fittedvlm, fittedvsmooth.spline, fsqrt,
 formulavlm, formulaNA.VGAM,
 garma, gaussianff,
 hypersecant, hypersecant.1, 
@@ -253,8 +283,8 @@ lms.bcg, lms.bcn, lms.yjn, lms.yjn2,
 lqnorm,
 dbilogis4, pbilogis4, rbilogis4, bilogistic4,
 logistic1, logistic2,
-logLik.vlm, lv.cao,
-latvar.Coef.qrrvglm, latvar.qrrvglm,
+logLik.vlm,
+latvar.cao, latvar.Coef.qrrvglm, latvar.rrvglm, latvar.qrrvglm,
 lvplot.cao,
 Rank, Rank.rrvglm, Rank.qrrvglm, Rank.cao,
 Max.Coef.qrrvglm, Max.qrrvglm,
@@ -264,7 +294,7 @@ model.matrixvlm,
 model.framevlm,
 nakagami, dnaka, pnaka, qnaka, rnaka,
 namesof,
-nlminbcontrol, nloge,
+nlminbcontrol, negloge,
 Opt.Coef.qrrvglm, Opt.qrrvglm, persp.cao)
 
 
@@ -274,19 +304,20 @@ export( micmen )
 export( plot.cao,
 plotpreplotvgam,
 plotvglm, plotvlm,
-plotvsmooth.spline, pnorm2, powl,
+plotvsmooth.spline, powerlink,
 predict.cao, predictcao,
 predictors, predictors.vglm,
-predictqrrvglm, predict.rrvglm, predict.uqo, predict.vgam,
+predictqrrvglm, predict.rrvglm,
+predict.vgam,
 predictvglm, predict.vlm, predictvsmooth.spline,
 predictvsmooth.spline.fit,
   show.Coef.cao,
   show.Coef.qrrvglm, show.Coef.rrvglm, show.rrvglm,
   show.summary.cao, show.summary.qrrvglm,
-  show.summary.rrvglm, show.summary.uqo,
+  show.summary.rrvglm,
   show.summary.vgam,
   show.summary.vglm,
-  show.summary.vlm, show.uqo,
+  show.summary.vlm,
   show.vanova,
 show.vgam, show.vglm, show.vlm,
  show.vglmff,
@@ -300,26 +331,25 @@ explot.lms.bcn,
 rlplot,
 rlplot.egev, rlplot.gev,
 rlplot.vextremes, rlplot.vglm,
-rlplot, rlplot.vglm, rrar.control,
-rrvglm.control.Gaussian)
+rlplot, rlplot.vglm, rrar.control)
 
 
 export(
 SurvS4, is.SurvS4, as.character.SurvS4,
 show.SurvS4,
 simple.exponential, simple.poisson,
-mbinomial,
+matched.binomial,
 seq2binomial, size.binomial,
 stdze1, stdze2,
 summary.cao, summary.grc,
   summary.qrrvglm,
-summary.rrvglm, summary.uqo,
+summary.rrvglm,
 summaryvgam, summaryvglm, summaryvlm,
 s.vam, terms.vlm, 
-theta2eta, Tol.Coef.qrrvglm, Tol.Coef.uqo, Tol.qrrvglm, Tol.uqo,
+theta2eta, Tol.Coef.qrrvglm, Tol.qrrvglm,
 triangle, dtriangle, ptriangle, qtriangle, rtriangle, 
   vcovvlm,
-vglm.fit,
+vglm.fit, vgam.fit,
 vglm.garma.control, vglm.multinomial.control,
 vglm.multinomial.deviance.control, vglm.vcategorical.control,
 vlm, vlm.control,
@@ -433,7 +463,7 @@ bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
 calibrate, cao.control,
 cao,
 cdf.lmscreg, cgo, chisq, clo, 
-ccoef,
+ccoef, concoef,
 Coef, Coef.qrrvglm, Coef.rrvglm, Coef.vlm,
 predictqrrvglm,
 cratio, cumulative, propodds, prplot, prplot.control)
@@ -458,15 +488,17 @@ geometric, truncgeometric,
 dlino, plino, qlino, rlino, lino, 
 grc,
 dhzeta, phzeta, qhzeta, rhzeta, hzeta, 
-nidentity, identity,
+negidentity, identity,
 prentice74,
 amlnormal, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2,
 dkumar, pkumar, qkumar, rkumar, kumar,
 dyules, pyules, ryules, yulesimon, 
 logff, dlog, plog, rlog,
+logF, dlogF,
 loglinb2, loglinb3,
-loglog, lognormal3, lvplot.qrrvglm,
-lvplot, lvplot.rrvglm, lv, latvar, Max, MNSs,
+loglog, lognormal3,
+lvplot.qrrvglm, lvplot.rrvglm,
+Max, MNSs,
 dmultinomial, multinomial, margeff)
 
 
@@ -489,7 +521,7 @@ export(
 meplot, meplot.default, meplot.vlm,
 guplot, guplot.default, guplot.vlm,
 negbinomial, negbinomial.size, polya,
-normal1, SUR,
+uninormal, SUR, normal.vcm,
 nbcanlink,
 tobit, dtobit, ptobit, qtobit, rtobit,
 Opt, 
@@ -500,9 +532,9 @@ poissonff,
 dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
 dposgeom, pposgeom, qposgeom, rposgeom, # posgeometric,
 dposnegbin, pposnegbin, qposnegbin, rposnegbin, posnegbinomial,
-dposnorm, pposnorm, qposnorm, rposnorm, posnormal1,
+dposnorm, pposnorm, qposnorm, rposnorm, posnormal, posnormal.control,
 dpospois, ppospois, qpospois, rpospois, pospoisson,
-qtplot.lmscreg, quasibinomialff, quasipoissonff, rdiric, rig,
+qtplot.lmscreg, quasibinomialff, quasipoissonff, rdiric, rigff,
 rrar, rrvglm.control,
 rrvglm.optim.control)
 
@@ -517,30 +549,25 @@ trplot,
 rcqo,
 cqo,
 qrrvglm.control,
-uqo.control, uqo,
 vgam.control, vgam, vglm.control, vglm,
 vsmooth.spline,
 weibull, yip88,
-dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial,
-dzabinom, pzabinom, qzabinom, rzabinom, zabinomial,
-dzapois, pzapois, qzapois, rzapois, zapoisson,
-dzibinom, pzibinom, qzibinom, rzibinom, zibinomial,
-dzinegbin, pzinegbin, qzinegbin, rzinegbin, zinegbinomial,
-dzigeom, pzigeom, qzigeom, rzigeom, zigeometric,
-dzageom, pzageom, qzageom, rzageom, zageometric,
-dzipois, pzipois, qzipois, rzipois,
-zipoisson, zipoissonff,
-mix2exp, mix2normal1, mix2poisson,
-mix2exp.control, mix2normal1.control, mix2poisson.control,
-skewnormal1, dsnorm, rsnorm,
+dzabinom, pzabinom, qzabinom, rzabinom, zabinomial, zabinomialff,
+dzageom, pzageom, qzageom, rzageom, zageometric, zageometricff,
+dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial, zanegbinomialff,
+dzapois, pzapois, qzapois, rzapois, zapoisson, zapoissonff,
+dzibinom, pzibinom, qzibinom, rzibinom, zibinomial, zibinomialff,
+dzigeom, pzigeom, qzigeom, rzigeom, zigeometric, zigeometricff,
+dzinegbin, pzinegbin, qzinegbin, rzinegbin, zinegbinomial, zinegbinomialff,
+dzipois, pzipois, qzipois, rzipois, zipoisson, zipoissonff,
+mix2exp, mix2normal, mix2poisson,
+mix2exp.control, mix2normal.control, mix2poisson.control,
+skewnormal, dskewnorm, rskewnorm,
 tikuv, dtikuv, ptikuv, qtikuv, rtikuv)
 
 
 
 
-export(DeLury,
-       wffc.P1, wffc.P1star, wffc.P2, wffc.P2star, wffc.P3, wffc.P3star
-)
 
 
 
@@ -549,11 +576,11 @@ export(DeLury,
 
 exportClasses(vglmff, vlm, vglm, vgam,
 rrvglm, qrrvglm, grc,  rcim, 
-vlmsmall, uqo, cao,
+vlmsmall, cao,
 summary.vgam, summary.vglm, summary.vlm,
 summary.qrrvglm,
 summary.cao, summary.rrvglm, 
-Coef.rrvglm, Coef.uqo, Coef.qrrvglm, Coef.cao,
+Coef.rrvglm, Coef.qrrvglm, Coef.cao,
 vcov.qrrvglm,
 vsmooth.spline.fit, vsmooth.spline)
 
@@ -580,11 +607,12 @@ model.matrix,
 summary,
 coef,
 AIC,
+AICc,
 plot,
 logLik,
 vcov,
 deviance,
-calibrate, cdf, ccoef, df.residual,
+calibrate, cdf, df.residual,
 lv, latvar, Max, Opt, Tol,
 biplot, deplot, lvplot, qtplot, rlplot, meplot,
 trplot, vplot,
@@ -593,6 +621,6 @@ weights,
 persp)
 
 
- exportMethods(AIC, coef, summary, plot, logLik, vcov)
+ exportMethods(AIC, AICc, coef, summary, plot, logLik, vcov)
 
 
diff --git a/NEWS b/NEWS
index ac59fa3..8d16b2c 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,176 @@
 
 
 
+                CHANGES IN VGAM VERSION 0.9-3
+
+NEW FEATURES
+
+    o   New argument:
+        posbinomial(omit.constant = FALSE), set to TRUE if
+        comparing M_0/M_h models with M_b/M_t/M_tb/M_bh/M_th/M_tbh.
+    o   rcim() works with family = multinomial; in conjunction with
+        arguments M and cindex to be specified.
+        rcim() also had additional arguments and new defaults.
+    o   New arguments:
+        positive Bernoulli functions have 'p.small' and 'no.warning'
+        arguments.
+    o   AICc() is new.
+    o   family.name() generic is new.
+    o   New data sets: prinia.
+    o   logLik() methods function for "qrrvglm" objects.
+        AIC() methods function for "qrrvglm" objects is corrected.
+        AIC() methods function for "cao" objects is new.
+
+
+BUG FIXES and CHANGES
+
+    o   vgam() with nontrivial constraints is giving incorrect
+        predict(vgam.object) and fitted(vgam.object).
+        Not yet fixed up but will try soon!
+        Thanks to Zachary Kurtz for picking this up.
+    o   Argument 'which.lp' changed to 'which.linpred'.
+        Argument 'which.eta' changed to 'which.linpred'.
+        Argument 'lapred.index' changed to 'linpred.index'.
+        Argument 'whichSpecies' changed to 'which.species'.
+        Argument 'plot.it' changed to 'show.plot'.
+        Argument 'intervalWidth' in plotqvar() changed to 'interval.width'.
+    o   Decommissioned VGAM family functions: cennormal1().
+    o   posbinomial() returns @extra$N.hat and @extra$SE.N.hat
+        if the number of trials is constant across observations.
+    o   calibrate() restored to working order.
+    o   Argument names changed:
+        'szero' renamed to 'str0' thoughout,
+        'allowable.length' renamed to 'length.arg' in is.Numeric().
+    o   Function uqo() has been withdrawn. Reasons:
+        (i)  It needs to be rewritten in C but unfortunately am too busy...
+        (ii) It is a very difficult optimization problem, probably too
+        difficult to solve in general efficiently.
+    o   Arguments in rcqo() have changed.
+    o   Data set Perom withdrawn, but deermice remains.
+    o   Argument 'zero' in binom2.or() had a bug.
+
+
+
+                CHANGES IN VGAM VERSION 0.9-2
+
+NEW FEATURES
+
+    o   New family functions: logF(d), biclaytoncop(dr), binormalcop(dp),
+        bistudentt(d), and a basic normal.vcm(),
+        zabinomialff(), zageometricff(), zanegbinomialff(), zapoissonff(),
+        zibinomialff(), zigeometricff(), zinegbinomialff().
+    o   cao.control()suppress.warnings == TRUE is new, and it
+        suppresses warnings (esp. lack of convergence) by default.
+    o   The convergence criterion now takes into consideration the sample
+        size, somewhat. It should stop premature convergence for very
+        large data sets.
+    o   New functions: dpois.points(), log1pexp(), expint(), expexpint(),
+        expint.E1(), dbinorm(), rbinorm(), kendall.tau(), qvar().
+        Also, depvar(type = c("lm", "lm2")) has a 'type' argument.
+        Also, aux.posbernoulli.t() is new.
+    o   New link functions: logneg().
+    o   New data sets: beggs, corbet, deermice, machinists, prats, V1.
+    o   Argument 'form2' added to vgam(), so vgam.fit() has been modified too.
+    o   posbernoulli.tb() seems correct, and works for any
+        number of sampling occasions. And posbernoulli.[b,t,tb]() have
+        more argument choices.
+    o   BIC() is now available, it is based on AIC(..., k = log(nobs(object))).
+        But users need to use it with care.
+        Also, AICvlm() has a 'corrected = FALSE' argument.
+    o   fittedvlm() now has a 'type.fitted' argument that allows
+        different fitted values to be computed from a vglm()/vgam()
+        object. Several family functions such as zi*()
+        [e.g., zipoisson()] and za*() [e.g., zapoisson()]
+        have a 'type.fitted' argument that matches it.
+
+
+BUG FIXES and CHANGES
+
+    o   Default arguments have changed, esp. wrt 'zero' for:
+        zibinomial(), zinegbinomial().
+    o   cao() used to crash due to memory problems and segment faults.
+    o   Syntax such as parallel = TRUE ~ 1 is now supported. Hence argument
+        'apply.parint' has been removed.
+    o   posbernoulli.b() has a new and superior parameterization, & faster .
+    o   Printed output when trace = TRUE has been improved, especially
+        for large data sets.
+    o   For ordination methods "lv" has been generally replaced by
+        "latvar".
+        "latvar()" is supported, "lv()" will become fully
+        deprecated soon.  But "lvplot()" is retained.
+        Also, this applies to most argument names and list
+        component names returned, e.g.,
+        OLD NAME:                 NEW NAME:
+        isdlv                     isd.latvar
+        varlvI                    varI.latvar
+        lvOrder                   latvar.order
+        OptimumOrder              Optimum.order
+        maxfitted                 max.fitted
+        SD.Ainit                  sd.Ainit
+        SD.Cinit                  sd.Cinit
+        SD.sitescores             sd.sitescores
+    o   For ordination methods "ccoef" has been generally replaced by
+        "concoef". This applies to most methods functions.
+        Attributes have changed too, from "ccoefficients"
+        to "con.coefficients".
+    o   VGAM now suggests \pkg{VGAMdata}.
+    o   Renamed VGAM family functions:
+        OLD NAME:                 NEW NAME:
+        normal1()                 uninormal()
+        bivgamma.mckay()          bigamma.mckay()
+        cennormal1()              cennormal()
+        dcennormal1()             double.cennormal()
+        dexpbinomial()            double.expbinomial()
+        explogarithmic()          explogff()
+        frank()                   bifrankcop(dpr)
+        [dpr]frank()              [dpr]bifrankcop()
+        fnormal1()                foldnormal()
+        [dpqr]fnorm()             [dpqr]foldnorm()
+        gumbelIbiv()              bigumbelI()
+        mbinomial()               matched.binomial()
+        mix2normal1()             mix2normal()
+        mix2normal1.control()     mix2normal.control()
+        nidentity()               negidentity()
+        normal1()                 uninormal()
+        nloge()                   negloge()
+        pnorm2()                  pbinorm(dpr)
+        pareto1()                 paretoff()
+        poissonp()                poisson.points()
+        powl()                    powerlink()
+        recnormal1(d)             recnormal()
+        rig()                     rigff()
+        skewnormal1()             skewnormal()
+        [dr]snorm()               [dr]skewnorm()
+        tpareto1()                truncpareto()
+        wald()                    waldff()
+    o   Decommissioned functions:
+        OLD                       NEW
+        dnorm2()                  dbinorm()
+        pnorm2()                  pbinorm()
+    o   Renamed internal functions:
+        OLD                       NEW
+        lv.cao()                  latvar.cao()
+    o   Renamed arguments:
+        OLD                       NEW
+        equalsd                   eq.sd
+    o   Internally, variables identifiers with "_" have been replaced
+        by a ".", e.g., X_vlm becomes X.vlm.
+        Saved component names follow this change too, e.g.,
+        @extra$ncols_X_lm becomes @extra$ncols.X.lm.
+    o   Improved:
+        fgm() has its explicit EIM programmed in.
+    o   summary() applied to a "rcim0" or "rcim" object now works.
+    o   Family functions which have changed: zigeometric().
+    o   Slotname "rss" changed to "res.ss".
+    o   zinegbinomial()@weight continues to use Fisher scoring
+        until not all the random variates are zeros or nonzeros.
+    o   loglinb2(zero = 3) and loglinb3(zero = 4:6) are defaults
+        now (used to be zero = NULL).
+    o   Data sets moved: wffc, wffc.nc, etc. moved to \pkg{VGAMdata}.
+    o   stats::print.anova() no longer called directly by lrtest().
+
+
+
                 CHANGES IN VGAM VERSION 0.9-1
 
 NEW FEATURES
@@ -380,7 +550,7 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    Objects of class "vglmff" have a "infos" slot to give
+    o   Objects of class "vglmff" have a "infos" slot to give
         information about the family.
     o   New functions: lambertW(), rcam(), wffc.P3(),
         wffc.P3star(), confint_rrnb(), confint_nb1().
@@ -522,15 +692,15 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    New functions: margeff() for marginal effects of a vglm()
+    o   New functions: margeff() for marginal effects of a vglm()
         "multinomial" or "cumulative" model.
-    o    Almost all VGAM family functions now have a "loglikelihood"
+    o   Almost all VGAM family functions now have a "loglikelihood"
         slot that incorporates any constants in the density function.
         Hence the fitted likelihood may differ by a constant from
         previous results.
         In particular, models such as multinomial(), cumulative() 
         and binom2.or() have this new feature.
-    o    vglm() now has a modified 'xij' argument which implements
+    o   vglm() now has a modified 'xij' argument which implements
         eta-specific covariates. Usage now involves the form2' argument,
         and the 'xij' argument does not interfere with constraint matrices.
     Documentation is supplied on the VGAM website, in particular,
@@ -545,7 +715,7 @@ NEW FEATURES
     o   testf90.f90 has been renamed to testf90.f95. This may decrease
         the incidences of compiler problems on some platforms (f95 seems
         more popular than f90).
-    o    For cqo() objects: AIC(), resid() have been written.
+    o   For cqo() objects: AIC(), resid() have been written.
     o   Improved functions: negbinomial() default initial values are
         more robust to outliers in the response, betabinomial() and
         betabin.ab() have better initialization and "loglikelihood"
@@ -568,9 +738,9 @@ BUG FIXES
     o   VGAM interferes much less in regard to generic functions
         such as predict(), fitted(), resid(), wrt other packages and
         also including base's lm(), glm(), etc.
-    o    AIC() method for rrvglm() objects was wrong (did not account
+    o   AIC() method for rrvglm() objects was wrong (did not account
         for argument 'Structural.zero').
-    o    dzibinom(log=TRUE) was wrong.
+    o   dzibinom(log=TRUE) was wrong.
 
 
 
@@ -944,13 +1114,13 @@ NEW FEATURES
 
 BUG FIXES
 
-    o    Family functions lognormal(), lognormal3() now include the
+    o   Family functions lognormal(), lognormal3() now include the
         1/sqrt(2*pi) constant in @loglikelihood because of its use of
         dnorm(..., log=TRUE) and dlnorm(..., log=TRUE).
-    o    [dpqr]lognormal() withdrawn as they exist in R already.
-    o    Documentation for betaff() contained mistakes.
-    o    summary() of a betabin.ab() object used to fail.
-    o    The assign statement has been removed from some FORTRAN code.
+    o   [dpqr]lognormal() withdrawn as they exist in R already.
+    o   Documentation for betaff() contained mistakes.
+    o   summary() of a betabin.ab() object used to fail.
+    o   The assign statement has been removed from some FORTRAN code.
 
 
 
@@ -1134,12 +1304,12 @@ NEW FEATURES
 
 NEW FEATURES
 
-    o    model.frame() and model.matrix() are roughly working for
+    o   model.frame() and model.matrix() are roughly working for
         objects that inherit from "vlm"s, e.g., "vglm" objects.
         Both of these methods functions accept a "data"
         argument etc.
         Also, for these, smart prediction works.
-    o    A methods function for the generic function weights() has
+    o   A methods function for the generic function weights() has
         been written for VGLM objects. It returns
         either the prior or working weights.
 
@@ -1162,39 +1332,39 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    New functions: cao() for "constrained additive ordination", 
+    o   New functions: cao() for "constrained additive ordination", 
         and uqo() for "unconstrained quadratic ordination".
         Both of these are unfinished but will hopefully be
         completed in the forseeable future. 
-    o    The function cgo() has been renamed to cqo(). Ouch!
+    o   The function cgo() has been renamed to cqo(). Ouch!
         CQO stands for "constrained quadratic ordination", and is 
         better than the old name cgo(), for 
         canonical Gaussian ordination.
-    o    The inverse() link function has been renamed to reciprocal().
-    o    More documentation: loglinb2() and loglinb3().
-    o    zipbipp() renamed to zapoisson(), where "za" stand for
+    o   The inverse() link function has been renamed to reciprocal().
+    o   More documentation: loglinb2() and loglinb3().
+    o   zipbipp() renamed to zapoisson(), where "za" stand for
         "zero-altered". This is more in line with the literature.
         New families: zanegbin.mu, positive.negbin.mu.
         New random variates: rposnegbin.mu, rpospois.
-    o    negbin.mu() works now for cgo(). The subsequent methods
+    o   negbin.mu() works now for cgo(). The subsequent methods
         functions have been adapted to work on it too.
         However, negbin.mu() is not recommended because maximum
         likelihood estimation of the index parameter is fraught
         numerically. It is better to use quasipoissonff().
-    o    cgo() now uses the function .Init.Poisson.CGO() to obtain
+    o   cgo() now uses the function .Init.Poisson.CGO() to obtain
         initial values for the canonical coefficients, C.
         The argument Use.Init.Poisson.CGO in qrrvglm.control()
         now controls this feature.
-    o    Lazy loading has been enabled for the VGAM package.
-    o    Name spaces has been introduced into the VGAM package.
-         The consequencies of this might be far reaching for
+    o   Lazy loading has been enabled for the VGAM package.
+    o   Name spaces has been introduced into the VGAM package.
+        The consequencies of this might be far reaching for
         code heavily based on the internals of the VGAM package.
-    o    The application of name spaces means "ff" can be dropped
+    o   The application of name spaces means "ff" can be dropped
         from certain family functions. In particular, poisson() can
         be used instead of poissonff(), and binomial() instead
         of binomialff().  Ditto for quasipoissonff() and
         quasibinomialff().
-    o    names.of() changed to namesof(). Many other function names
+    o   names.of() changed to namesof(). Many other function names
         have been changed, particularly those of the S3 classes
         such as coef. something, e.g., coef.vlm to coefvlm.
         In general, S3 methods functions such as print.summary.vlm have
@@ -1225,24 +1395,24 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    zipbipp() and zipoissonX() are new alternatives to yip88(). 
+    o   zipbipp() and zipoissonX() are new alternatives to yip88(). 
         They fit a zero-inflated Poisson distribution.
         Both can handle covariates for both parameters (p0 or 
         phi, and lambda.)
-         zipbipp() is recommended over the others. 
+        zipbipp() is recommended over the others. 
         zipoissonX() is experimental at this stage
         and should be used with caution.
         rpospois() is new.
-    o    More documentation: rhobit and binom2.rho.
-    o    binom2.or() now has lp1 and lp2 arguments, which allow 
+    o   More documentation: rhobit and binom2.rho.
+    o   binom2.or() now has lp1 and lp2 arguments, which allow 
         a different link function for each of the two marginal
         probabilities. 
-    o    bratt() is a new family function. It fits the Bradley Terry
+    o   bratt() is a new family function. It fits the Bradley Terry
         model with ties.
-    o    flush.console() is used if it exists. This will make
+    o   flush.console() is used if it exists. This will make
         Windows version more nicer for large data sets and when
         trace=TRUE is used. 
-    o    wweights() extracts the working weights of an object.
+    o   wweights() extracts the working weights of an object.
         Used to be called vweights().
 
 
@@ -1267,7 +1437,7 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    demo files now are avaible for VGAM. These include lmsqreg,
+    o   demo files now are avaible for VGAM. These include lmsqreg,
         distributions, and cgo. More will be added later.
 
 
@@ -1283,11 +1453,11 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    cgo(..., FastAlgorithm=TRUE) has been fined-tuned to give
+    o   cgo(..., FastAlgorithm=TRUE) has been fined-tuned to give
         greater speed and accuracy.
-    o    lms.yjn() uses FORTRAN code to implement the Gauss-Legendre
+    o   lms.yjn() uses FORTRAN code to implement the Gauss-Legendre
         algorithm. This results in greater accuracy.
-    o    More documentation, especially for family functions for
+    o   More documentation, especially for family functions for
         extreme values modelling.
 
 
@@ -1302,9 +1472,9 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    persp.qrrvglm() has been written to apply persp() to
+    o   persp.qrrvglm() has been written to apply persp() to
         a rank-2 CGO model.
-    o    cgo(..., FastAlgorithm=TRUE) now has a logical argument
+    o   cgo(..., FastAlgorithm=TRUE) now has a logical argument
         GradientFunction, which if TRUE (default), computes the
         derivatives by using finite-difference approximations.
         The default will cause the speed to generally increase.
@@ -1320,23 +1490,23 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    trplot() is a new generic function, and for objects of
+    o   trplot() is a new generic function, and for objects of
         class "qrrvglm" (a cgo() object), it produces a trajectory plot
         for species. 
-    o    vcov.qrrvglm() now computes standard errors and returns the 
+    o   vcov.qrrvglm() now computes standard errors and returns the 
         variance-covariance matrix for rank-1 QRR-VGLMs.
-    o    A new fast algorithm is implemented for cgo(..., FastAlgorithm=TRUE)
+    o   A new fast algorithm is implemented for cgo(..., FastAlgorithm=TRUE)
         which only works under windows. It is a new undocumented algorithm.
-    o    New family functions: lognormal(), lognormal3(), weibull().
-    o    New family functions: genbetaII(), betaII(), sinmad(), dagum(), 
+    o   New family functions: lognormal(), lognormal3(), weibull().
+    o   New family functions: genbetaII(), betaII(), sinmad(), dagum(), 
         lomax(), invlomax(), fisk(), invparalogistic(), paralogistic().
         Additionally, d*, r* p* and q* forms of the
         density/random-generation etc.  functions for all of these
         except for betaII and genbetaII.
-    o    New link function for (0,1) parameters: tanl() for tan link.
+    o   New link function for (0,1) parameters: tanl() for tan link.
         It has a heavier tail and corresponds to a Cauchy distribution
         (cf. probit for normal).
-    o    New family function: brat() for the Bradley Terry model 
+    o   New family function: brat() for the Bradley Terry model 
         (intercept model only).
 
 
@@ -1345,10 +1515,10 @@ NEW FEATURES
 
 NEW FEATURES
 
-    o    I've changed deplot.lmscreg() so that the "at" argument is now
+    o   I've changed deplot.lmscreg() so that the "at" argument is now
         "y.arg", and the density is returned with name "density" instead
         of "y". That is, "at" is now "y", and "y" is now "density".
-    o    lvplot.rrvglm() and biplot.rrvglm() have been merged and are now
+    o   lvplot.rrvglm() and biplot.rrvglm() have been merged and are now
         equivalent.
 
 
@@ -1362,12 +1532,12 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    Updated to work under R 1.8.1 
-    o    logLik() and AIC() methods functions supported for many VGAM objects.
-    o    lms.bcn.control(), lms.bcg.control(), lms.yjn.control() now 
+    o   Updated to work under R 1.8.1 
+    o   logLik() and AIC() methods functions supported for many VGAM objects.
+    o   lms.bcn.control(), lms.bcg.control(), lms.yjn.control() now 
         have trace=TRUE because monitoring LMS quantile regression models
         is a good idea.
-    o    lms.bcn(), lms.bcg(), lms.yjn() now improved.
+    o   lms.bcn(), lms.bcg(), lms.yjn() now improved.
 
 
 
@@ -1376,21 +1546,21 @@ NEW FEATURES
 BUG FIXES
 
     o   biplot.rrvglm() had a internal bug with @C.
-    o    Runs under R 1.8.0 now, having a fix with "qr" slot.
-    o    etastart, coefstart, mustart arguments were not functional in vgam().
-    o    vchol() did not replace the correct elements; sometimes the index
+    o   Runs under R 1.8.0 now, having a fix with "qr" slot.
+    o   etastart, coefstart, mustart arguments were not functional in vgam().
+    o   vchol() did not replace the correct elements; sometimes the index
         was out of subscript range.
-    o    residuals.vlm() tried to evaluate a deviance slot in a "vglmff" object
+    o   residuals.vlm() tried to evaluate a deviance slot in a "vglmff" object
         even when it was empty.
-    o    Documentation links to functions in other packages now work.
+    o   Documentation links to functions in other packages now work.
 
 NEW FEATURES
 
-    o    lvplot.qrrvglm() has been renamed biplot.qrrvglm(). 
+    o   lvplot.qrrvglm() has been renamed biplot.qrrvglm(). 
         Argument Equal.tolerances changed to EqualTolerances.
         Argument Circular changed to ITolerances.
         rrvglm.control() now split into qrrvglm.control() and itself.
-    o    cgo() now performs canonical Gaussian ordination.
+    o   cgo() now performs canonical Gaussian ordination.
 
 
 
@@ -1403,14 +1573,14 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    gco() is now an easier interface for fitting Gaussian canonical
+    o   gco() is now an easier interface for fitting Gaussian canonical
         ordination models. gco(...) is essentially rrvglm(..., Quadratic=TRUE).
-    o    Documentation for deplot.lmscreg(), qtplot.lmscreg(), cdf.lmscreg()
+    o   Documentation for deplot.lmscreg(), qtplot.lmscreg(), cdf.lmscreg()
         and related functions. Also for positive.poisson(),
         positive.binomial() and yip88(). 
-    o    lvplot.qrrvglm() improved to handle non-diagonal tolerance matrices,
+    o   lvplot.qrrvglm() improved to handle non-diagonal tolerance matrices,
         and a new Rotate option is available for QRR-VGLMs.
-    o    By default, QRR-VGLMs now have the constraint that the latent
+    o   By default, QRR-VGLMs now have the constraint that the latent
         variables are uncorrelated and have unit variances, i.e., 
         their variance-covariance matrix is diag(Rank).
         Also, the Crow1positive argument allows ordinations to be reflected
@@ -1429,19 +1599,19 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    install.packages("VGAM", CRAN="http://www.stat.auckland.ac.nz/~yee") 
+    o   install.packages("VGAM", CRAN="http://www.stat.auckland.ac.nz/~yee") 
         now works for PC and Linux/Unix, i.e., the distribution of the
         VGAM package allows for this type of download.
-    o    poissonff(), quasipoissonff(), binomialff() and 
+    o   poissonff(), quasipoissonff(), binomialff() and 
         quasibinomialff() now handle multiple dispersion parameters when
         mv=TRUE and onedpar=FALSE.
-    o    Generic function predictx(), with methods function for "qrrvglm"
+    o   Generic function predictx(), with methods function for "qrrvglm"
         objects. This solves (with limited functionality) the calibration
         problem.
-    o    predict.qrrvglm() and predict.rrvglm() written (but don't work 100%)
-    o    Coef.rrvglm() now returns an S4 object, which can be printed nicely. 
-    o    summary.qrrvglm() has been improved.
-    o    Documentation for poissonff(), quasipoissonff(), binomialff() and 
+    o   predict.qrrvglm() and predict.rrvglm() written (but don't work 100%)
+    o   Coef.rrvglm() now returns an S4 object, which can be printed nicely. 
+    o   summary.qrrvglm() has been improved.
+    o   Documentation for poissonff(), quasipoissonff(), binomialff() and 
         quasibinomialff(). 
 
 
@@ -1456,7 +1626,7 @@ NEW FEATURES
 
     o    Documentation for lms.bcn(), lms.bcg(), lms.yjn(), and bmi.
          Additionally, the overall documentation has been improved
-        throughout.
+         throughout.
     o    print.Coef.qrrvglm prints the contents of Coef(qrrvglm.object) 
          in a nicer format. It uses S4 features.
 
@@ -1487,8 +1657,8 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    levy() added, plus grc() documentation.
-    o    constraints added to binomialff() and poissonff() since 
+    o   levy() added, plus grc() documentation.
+    o   constraints added to binomialff() and poissonff() since 
         they both handle multivariate responses. 
 
 
@@ -1502,8 +1672,8 @@ BUG FIXES
 
 NEW FEATURES
 
-    o    logff() added.
-    o    The undocumented backchat facility now works for Splus 6.x.
+    o   logff() added.
+    o   The undocumented backchat facility now works for Splus 6.x.
         This should increase the efficiency of vglm() in particular.
         Thanks to Insightful and Dr J. Chambers for helping to get it
         going under the S4 engine. 
@@ -1521,7 +1691,7 @@ BUG FIXES
 NEW FEATURES
 
     o    .Rd documentation included for vglm(), vgam(), rrvglm(), and
-        associated control and class functions, plus smart prediction.
+         associated control and class functions, plus smart prediction.
 
 
 
@@ -1530,9 +1700,9 @@ NEW FEATURES
 NEW FEATURES
 
     o    rrvglm() now has a Quadratic argument to implement the class of 
-    Quadratic Reduced-rank VGLMs, which gives maximum likelihood
-        solutions to Gaussian canonical ordination problems.
-    Documentation is in rrvglm.pdf 
+         Quadratic Reduced-rank VGLMs, which gives maximum likelihood
+         solutions to Gaussian canonical ordination problems.
+         Documentation is in rrvglm.pdf 
 
 
 
@@ -1541,9 +1711,9 @@ NEW FEATURES
 NEW FEATURES
 
     o    vglm() now has a xij argument which implements eta-specific covariates. 
-    Documentation is supplied on the VGAM website. 
+         Documentation is supplied on the VGAM website. 
     o    grc() has been written for Goodman's RC association model for a 
-    contingency table. Documentation is in rrvglm.pdf 
+         contingency table. Documentation is in rrvglm.pdf 
 
 
 
diff --git a/R/Links.R b/R/Links.R
index cb3c275..f0ab8ab 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -18,7 +18,7 @@
  dtheta.deta <-
   function(theta,
            link = "identity",
-           earg = list(theta = theta, # Needed
+           earg = list(theta = theta,  # Needed
                        inverse = FALSE,
                        deriv = 1,
                        short = TRUE,
@@ -32,9 +32,9 @@
     warning("apparent conflict in name of link function")
   }
 
-  earg[["theta"]] <- theta # New data
+  earg[["theta"]] <- theta  # New data
 
-  earg[["deriv"]] <- 1 # New
+  earg[["deriv"]] <- 1  # New
 
 
   do.call(what = function.name, args = earg)
@@ -47,7 +47,7 @@
  d2theta.deta2 <- 
   function(theta,
            link = "identity",
-           earg = list(theta = theta, # Needed
+           earg = list(theta = theta,  # Needed
                        inverse = FALSE,
                        deriv = 2,
                        short = TRUE,
@@ -60,9 +60,9 @@
   if (length(function.name2) && function.name != function.name2)
     warning("apparent conflict in name of link function in D2theta.deta2")
 
-  earg[["theta"]] <- theta # New data
+  earg[["theta"]] <- theta  # New data
 
-  earg[["deriv"]] <- 2 # New
+  earg[["deriv"]] <- 2  # New
 
   do.call(what = function.name, args = earg)
 }
@@ -80,7 +80,7 @@
   if (length(function.name2) && function.name != function.name2)
     warning("apparent conflict in name of link function")
 
-  earg[["theta"]] <- theta # New data
+  earg[["theta"]] <- theta  # New data
 
   do.call(what = function.name, args = earg)
 }
@@ -89,7 +89,7 @@
 
 
  eta2theta <-
-  function(theta, # This is really eta.
+  function(theta,  # This is really eta.
            link = "identity",
            earg = list(theta = NULL)) {
 
@@ -120,7 +120,7 @@
     stop("length(earg) == 0 not allowed")
 
 
-  if (llink == 1) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+  if (llink == 1) {  # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
 
 
     if (is.list(earg[[1]]))
@@ -128,14 +128,14 @@
 
     function.name  <- link
 
-    function.name2 <- attr(earg, "function.name") # May be, e.g., NULL
+    function.name2 <- attr(earg, "function.name")  # May be, e.g., NULL
     if (length(function.name2) && function.name != function.name2)
       warning("apparent conflict in name of link function")
 
 
-    earg[["theta"]] <- theta # New data
+    earg[["theta"]] <- theta  # New data
 
-    earg[["inverse"]] <- TRUE # New
+    earg[["inverse"]] <- TRUE  # New
 
     return(do.call(what = function.name, args = earg))
   }
@@ -148,21 +148,21 @@
 
 
 
-   if (!is.matrix(theta) &&
-       length(theta) == length(earg))
-     theta <- rbind(theta)
+ if (!is.matrix(theta) &&
+     length(theta) == length(earg))
+   theta <- rbind(theta)
 
 
-    ans <- NULL
-    for(iii in 1:llink) {
-        use.earg <- earg[[iii]]
-        use.earg[["inverse"]] <- TRUE # New
-        use.earg[["theta"]] <- theta[, iii] # New
-        use.function.name <- link[iii]
+  ans <- NULL
+  for (iii in 1:llink) {
+    use.earg <- earg[[iii]]
+    use.earg[["inverse"]] <- TRUE  # New
+    use.earg[["theta"]] <- theta[, iii]  # New
+    use.function.name <- link[iii]
 
-        ans <- cbind(ans, do.call(what = use.function.name,
-                                  args = use.earg))
-      }
+    ans <- cbind(ans, do.call(what = use.function.name,
+                              args = use.earg))
+  }
 
   if (length(orig.earg) == ncol(ans) &&
       length(names(orig.earg)) > 0 &&
@@ -175,6 +175,31 @@
 
 
 
+
+
+ namesof <- function(theta,
+                     link = "identity",
+                     earg = list(tag = tag, short = short),
+                     tag = FALSE,
+                     short = TRUE) {
+
+  funname.only <- strsplit(as.character(link), "(", fixed = TRUE)
+  funname.only <- (funname.only[[1]])[1]
+  link <- funname.only
+
+  earg[["theta"]] <- as.character(theta)
+
+  earg[["tag"]] <- tag
+  earg[["short"]] <- short
+
+
+  do.call(link, args = earg)
+}
+
+
+
+
+if (FALSE)
  namesof <- function(theta,
                      link = "identity",
                      earg = list(tag = tag, short = short),
@@ -204,7 +229,7 @@ link2list <- function(link
   big.list <- as.list(as.function(get(fun.name)))
 
 
-  big.list[[length(big.list)]] <- NULL # Kill the body of code
+  big.list[[length(big.list)]] <- NULL  # Kill the body of code
 
 
 
diff --git a/R/aamethods.q b/R/aamethods.q
index da6810f..14612ee 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -9,11 +9,11 @@
 
 
 
-is.Numeric <- function(x, allowable.length = Inf,
+is.Numeric <- function(x, length.arg = Inf,
                        integer.valued = FALSE, positive = FALSE)
     if (all(is.numeric(x)) && all(is.finite(x)) &&
-    (if (is.finite(allowable.length))
-       length(x) == allowable.length else TRUE) &&
+    (if (is.finite(length.arg))
+       length(x) == length.arg else TRUE) &&
     (if (integer.valued) all(x == round(x)) else TRUE) &&
     (if (positive) all(x>0) else TRUE)) TRUE else FALSE
 
@@ -24,17 +24,11 @@ VGAMenv <- new.env()
 
 
 
-.onLoad <- function(lib, pkg)
-  require(methods)  # 25/1/05
  
  
  
-if (!any(search() == "package:methods"))
-    library(methods)
 
 
-if (!any(search() == "package:splines"))
-    require(splines)
 
 
 
@@ -56,7 +50,7 @@ setClass("vglmff", representation(
       "deviance"     = "function",
       "fini"         = "expression",
       "first"        = "expression",
-      "infos"        = "function", # Added 20101203
+      "infos"        = "function",  # Added 20101203
       "initialize"   = "expression",
       "last"         = "expression",
       "linkfun"      = "function",
@@ -67,21 +61,22 @@ setClass("vglmff", representation(
       "summary.dispersion"  = "logical",
       "vfamily"      = "character",
       "deriv"        = "expression",
-      "weight"       = "expression"), #  "call"
+      "weight"       = "expression"),  #  "call"
 prototype = .VGAM.prototype.list)
 
 
-valid.vglmff = function(object) {
-    compulsory = c("initialize", "weight", "deriv", "linkinv")
-    for(ii in compulsory) {
-        if (!length(slot(object, ii)))
-            stop("slot ", ii, " is empty")
-    }
+valid.vglmff <- function(object) {
+  compulsory <- c("initialize", "weight", "deriv", "linkinv")
+  for (ii in compulsory) {
+    if (!length(slot(object, ii)))
+        stop("slot ", ii, " is empty")
+  }
 
-    if (length(as.list(object at linkinv)) != 3)
-        stop("wrong number of arguments in object at linkinv")
+  if (length(as.list(object at linkinv)) != 3)
+    stop("wrong number of arguments in object at linkinv")
 }
 
+
 if (FALSE) 
     setValidity("vglmff", valid.vglmff)
 
@@ -94,6 +89,7 @@ if (FALSE)
 
 
 
+
 show.vglmff <- function(object) {
   f <- object at vfamily
   if (is.null(f))
@@ -106,7 +102,7 @@ show.vglmff <- function(object) {
     cat("Informal classes:", paste(f, collapse = ", "), "\n")
   cat("\n")
 
-  for(ii in 1:length(nn))
+  for (ii in 1:length(nn))
     cat(nn[ii])
   cat("\n")
 
@@ -135,6 +131,10 @@ setMethod("show", "vglmff",
 
 
 
+
+
+
+
 setClass("vlmsmall", representation(
       "call"         = "call",
       "coefficients" = "numeric",
@@ -167,7 +167,7 @@ setClass("vlm", representation(
       "qr"           = "list",
       "R"            = "matrix",
       "rank"         = "integer",
-      "rss"          = "numeric",
+      "res.ss"       = "numeric",
       "smart.prediction" = "list",
       "terms"        = "list",
       "Xm2"          = "matrix",
@@ -187,7 +187,7 @@ setClass("vglm", representation(
 
 
 setClass("vgam", representation(
-      "Bspline"             = "list", # each [[i]] is a "vsmooth.spline.fit"
+      "Bspline"             = "list",  # each [[i]] is a "vsmooth.spline.fit"
       "nl.chisq"            = "numeric",
       "nl.df"               = "numeric",
       "spar"                = "numeric",
@@ -311,7 +311,7 @@ new("vglm", "extra"=from at extra,
  "R"=from at R,
  "rank"=from at rank,
  "residuals"=from at residuals,
- "rss"=from at rss,
+ "res.ss"=from at res.ss,
  "smart.prediction"=from at smart.prediction,
  "terms"=from at terms,
  "weights"=from at weights,
@@ -383,7 +383,7 @@ if (!isGeneric("vcov"))
 
 
 setClass("uqo", representation(
-      "lv"               = "matrix",
+      "latvar"           = "matrix",
       "extra"            = "list",
       "family"           = "vglmff",
       "iter"             = "numeric",
@@ -400,9 +400,19 @@ setGeneric("lvplot", function(object, ...) standardGeneric("lvplot"),
            package = "VGAM")
 
 
-if (!isGeneric("ccoef"))
-    setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"),
-           package = "VGAM")
+
+ if (!isGeneric("ccoef"))
+    setGeneric("ccoef", function(object, ...) {
+    .Deprecated("concoef")
+
+    standardGeneric("ccoef")
+    })
+
+ if (!isGeneric("concoef"))
+    setGeneric("concoef", function(object, ...) {
+    standardGeneric("concoef")
+    })
+
 
 
 
diff --git a/R/bAIC.q b/R/bAIC.q
index 2021cfd..a4d6037 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -9,7 +9,6 @@
 
 
 
-if (TRUE) {
 
 
 
@@ -20,138 +19,318 @@ if (!isGeneric("AIC"))
 
 
 
-AICvlm = function(object, ..., k = 2) {
-    estdisp = object at misc$estimated.dispersion
-    no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
-        length(object at misc$dispersion) else 0
-    -2 * logLik.vlm(object, ...) + k * (length(coefvlm(object)) + no.dpar)
+
+
+check.omit.constant <- function(object) {
+
+
+
+  if (is.logical(object at misc$needto.omit.constant) &&
+       object at misc$needto.omit.constant &&
+      !object at misc$omit.constant)
+    warning("Probably 'omit.constant = TRUE' should have been set. ",
+            "See the family function '",
+            object at family@vfamily[1],
+            "' help file.")
+
 }
 
 
-AICvgam = function(object, ..., k = 2) {
-    estdisp = object at misc$estimated.dispersion
-    no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
-        length(object at misc$dispersion) else 0 
-    nldf = if (is.Numeric(object at nl.df)) sum(object at nl.df) else 0
-    -2 * logLik.vlm(object, ...) +
-    k * (length(coefvlm(object)) + no.dpar + nldf)
+
+
+AICvlm <- function(object, ..., 
+                   corrected = FALSE,
+                   k = 2) {
+  estdisp <- object at misc$estimated.dispersion
+
+
+  check.omit.constant(object)
+
+
+  no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
+    length(object at misc$dispersion) else 0
+
+  tot.par <- length(coefvlm(object)) + no.dpar
+  ans <- (-2) * logLik.vlm(object, ...) + k * tot.par
+
+  if (corrected) {
+    ans <- ans + k * tot.par * (tot.par + 1) / (
+           nobs(object) - tot.par - 1)
+  }
+  ans
 }
 
 
-AICrrvglm = function(object, ..., k = 2) {
-    estdisp = object at misc$estimated.dispersion
-    no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
-        length(object at misc$dispersion) else 0 
-    szero = object at control$szero
-    MMM = object at misc$M
-    Rank = object at control$Rank
-    elts.tildeA = (MMM - Rank - length(szero)) * Rank
-    -2 * logLik.vlm(object, ...) +
-    k * (length(coefvlm(object)) + no.dpar + elts.tildeA)
+
+
+AICvgam <- function(object, ...,
+                    k = 2) {
+
+  estdisp <- object at misc$estimated.dispersion
+
+
+  check.omit.constant(object)
+
+
+  no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
+             length(object at misc$dispersion) else 0 
+  nldf <- if (is.Numeric(object at nl.df)) sum(object at nl.df) else 0
+
+  -2 * logLik.vlm(object, ...) +
+  k * (length(coefvlm(object)) + no.dpar + nldf)
+
 }
 
 
-AICqrrvglm = function(object, ..., k = 2) {
-
-    estdisp = object at misc$estimated.dispersion
-    no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
-        length(object at misc$dispersion) else 0 
-    szero = object at control$szero
-    MMM = object at misc$M
-    Rank = object at control$Rank
-    elts.tildeA = (MMM - Rank - length(szero)) * Rank
-
-    EqualTolerances = object at control$EqualTolerances
-    ITolerances = object at control$ITolerances
-    if (!(length(EqualTolerances) == 1 && is.logical(EqualTolerances)))
-      stop("could not determine whether the fitted object used an ",
-           "equal-tolerances assumption based on ",
-           "argument 'EqualTolerances'")
-    if (!(length(ITolerances) == 1 && is.logical(ITolerances)))
-      stop("could not determine whether the fitted object used an ",
-           "equal-tolerances assumption based on argument 'ITolerances'")
-    NOS = if (length(object at y)) ncol(object at y) else MMM
-    MSratio = MMM / NOS  # First value is g(mean) = quadratic form in l
-    if (round(MSratio) != MSratio) stop("'MSratio' is not an integer")
-    elts.D = ifelse(ITolerances || EqualTolerances, 1, NOS) * Rank*(Rank+1)/2
-
-    deviance(object, ...) +
-    k * (length(coefvlm(object)) + no.dpar + elts.tildeA + elts.D)
+
+
+AICrrvglm <- function(object, ...,
+                      k = 2) {
+
+
+  check.omit.constant(object)
+
+
+  estdisp <- object at misc$estimated.dispersion
+  no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
+    length(object at misc$dispersion) else 0 
+  str0 <- object at control$str0
+  MMM <- object at misc$M
+  Rank <- object at control$Rank
+  elts.tildeA <- (MMM - Rank - length(str0)) * Rank
+
+
+
+  -2 * logLik.vlm(object, ...) +
+  k * (length(coefvlm(object)) + no.dpar + elts.tildeA)
 }
 
 
 
 
+AICqrrvglm <- function(object, ...,
+                       k = 2) {
+
+
+  check.omit.constant(object)
+
+
+  estdisp <- object at misc$estimated.dispersion
+  no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
+             length(object at misc$dispersion) else 0 
+  str0 <- object at control$str0
+  MMM <- object at misc$M
+  Rank <- object at control$Rank
+  elts.tildeA <- (MMM - Rank - length(str0)) * Rank
+
+
+
+
+  EqualTolerances <- object at control$EqualTolerances
+  ITolerances <- object at control$ITolerances
+  if (!(length(EqualTolerances) == 1 && is.logical(EqualTolerances)))
+    stop("could not determine whether the fitted object used an ",
+         "equal-tolerances assumption based on ",
+         "argument 'EqualTolerances'")
+  if (!(length(ITolerances) == 1 && is.logical(ITolerances)))
+    stop("could not determine whether the fitted object used an ",
+         "equal-tolerances assumption based on argument 'ITolerances'")
+
+
+  NOS <- if (length(object at y)) ncol(object at y) else MMM
+  MSratio <- MMM / NOS  # First value is g(mean) = quadratic form in l
+  if (round(MSratio) != MSratio)
+    stop("variable 'MSratio' is not an integer")
+  elts.D <- ifelse(ITolerances || EqualTolerances, 1, NOS) *
+            Rank * (Rank + 1) / 2
+
+
+
+
+
+
+
+  loglik.try <- logLik.qrrvglm(object, ...)
+  if (!is.numeric(loglik.try))
+    warning("cannot compute the log-likelihood of 'object'. ",
+            "Returning NULL")
+
+
+
+
+  elts.B1 <- length(object at extra$B1)
+  elts.C  <- length(object at extra$Cmat)
+  num.params <- elts.B1 + elts.tildeA  + elts.D + elts.C
+
+
+  if (is.numeric(loglik.try)) {
+    (-2) * loglik.try     + k * num.params
+  } else {
+
+    NULL
+  }
+}
+
+
+
+
+
+ 
+ AICcao    <- function(object, ...,
+                       k = 2) {
+
+
+  check.omit.constant(object)
+
+
+  estdisp <- object at misc$estimated.dispersion
+  no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
+             length(object at misc$dispersion) else 0 
+  str0 <- object at control$str0
+  MMM <- object at misc$M
+  Rank <- object at control$Rank
+
+
+
+
+  NOS <- if (length(object at y)) ncol(object at y) else MMM
+  MSratio <- MMM / NOS  # First value is g(mean) = quadratic form in l
+  if (round(MSratio) != MSratio)
+    stop("variable 'MSratio' is not an integer")
+
+
+
+
+  loglik.try <- logLik(object, ...)
+  if (!is.numeric(loglik.try))
+    warning("cannot compute the log-likelihood of 'object'. ",
+            "Returning NULL")
+
+
+
+
+  elts.B1     <- length(object at extra$B1)  # 0 since a NULL
+  elts.C      <- length(object at extra$Cmat)
+  elts.df1.nl <-    sum(object at extra$df1.nl)
+
+  num.params <- elts.B1 + elts.C + (
+                2 * length(object at extra$df1.nl) + elts.df1.nl) -
+                (Rank + length(str0)) * Rank
+
+
+  if (is.numeric(loglik.try)) {
+    (-2) * loglik.try     + k * num.params
+  } else {
+
+    NULL
+  }
+}
+
+
+
+
+
 setMethod("AIC", "vlm",
          function(object, ..., k = 2)
-         AICvlm(object, ..., k = k))
+           AICvlm(object, ..., k = k))
 
 setMethod("AIC", "vglm",
          function(object, ..., k = 2)
-         AICvlm(object, ..., k = k))
+           AICvlm(object, ..., k = k))
 
 setMethod("AIC", "vgam",
          function(object, ..., k = 2)
-         AICvgam(object, ..., k = k))
+          AICvgam(object, ..., k = k))
 
 setMethod("AIC", "rrvglm",
-         function(object, ..., k = 2)
-         AICrrvglm(object, ..., k = k))
+           function(object, ..., k = 2)
+          AICrrvglm(object, ..., k = k))
 
 setMethod("AIC", "qrrvglm",
+            function(object, ..., k = 2)
+          AICqrrvglm(object, ..., k = k))
+
+
+setMethod("AIC", "cao",
+          function(object, ..., k = 2)
+            AICcao(object, ..., k = k))
+
+
+
+
+if (!isGeneric("AICc"))
+  setGeneric("AICc", function(object, ..., k = 2)
+             standardGeneric("AICc"),
+             package = "VGAM")
+
+
+setMethod("AICc", "vlm",
          function(object, ..., k = 2)
-         AICqrrvglm(object, ..., k = k))
-}
+         AICvlm(object, ..., corrected = TRUE, k = k))
 
+setMethod("AICc", "vglm",
+         function(object, ..., k = 2)
+         AICvlm(object, ..., corrected = TRUE, k = k))
 
 
 
 
 
 
-if (FALSE) {
 
 
 
-AICvglm = function(object, ..., k = 2) {
-    crit = logLik.vlm(object, ...)
-    -2 * crit + k * length(coef(object))
-}
 
 
 
 
 
-AICrrvglm = function(object, ..., k = 2) {
-  stop("not working yet")
-  crit = logLik.vlm(object)
-  sign = -2
-  if (!length(crit) || !is.numeric(crit)) {
-      crit = deviance(object)
-      sign = 1
-  }
-  if (!length(crit) || !is.numeric(crit))
-    stop("cannot get at the deviance or loglikelihood of the object")
 
-  sign * crit + 2 * (length(coef(object)) +
-  object at control$rank * (object at misc$M - object at control$rank))
+
+
+if (!isGeneric("BIC"))
+  setGeneric("BIC", function(object, ..., k = log(nobs(object)))
+             standardGeneric("BIC"),
+             package = "VGAM")
+
+
+BICvlm <- function(object, ..., k = log(nobs(object))) {
+  AICvlm(object, ..., k = k)
 }
 
 
 
+setMethod("BIC", "vlm",
+          function(object, ..., k = log(nobs(object)))
+            BICvlm(object, ..., k = k))
+
+setMethod("BIC", "vglm",
+          function(object, ..., k = log(nobs(object)))
+            BICvlm(object, ..., k = k))
+
+setMethod("BIC", "vgam",
+          function(object, ..., k = log(nobs(object)))
+           AICvgam(object, ..., k = k))
+
+setMethod("BIC", "rrvglm",
+           function(object, ..., k = log(nobs(object)))
+          AICrrvglm(object, ..., k = k))
+
+setMethod("BIC", "qrrvglm",
+            function(object, ..., k = log(nobs(object)))
+          AICqrrvglm(object, ..., k = k))
+
+
+setMethod("BIC", "cao",
+          function(object, ..., k = log(nobs(object)))
+            AICcao(object, ..., k = k))
+
 
-setMethod("AIC", signature(object = "vglm"),
-           function(object, ..., k = 2)
-           AICvglm(object, ..., k = k))
 
 
 
-setMethod("AIC", signature(object = "rrvglm"),
-           function(object, ..., k = 2)
-           AICrrvglm(object, ..., k = k))
 
 
-}
 
 
 
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 45212cd..df6dcd4 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -4,21 +4,21 @@
 
 
 
-    if (!isGeneric("terms"))
-        setGeneric("terms", function(x, ...) standardGeneric("terms"))
+if (!isGeneric("terms"))
+  setGeneric("terms", function(x, ...) standardGeneric("terms"))
 
 
 
 
 
-terms.vlm = function(x, ...) {
-    v = x at terms
-    if (!length(v))
-        stop("terms slot is empty")
-    v = v$terms
-    if (!length(v))
-        stop("no terms component")
-    v
+terms.vlm <- function(x, ...) {
+  v <- x at terms
+  if (!length(v))
+    stop("terms slot is empty")
+  v <- v$terms
+  if (!length(v))
+    stop("no terms component")
+  v
 }
 
 
@@ -28,63 +28,66 @@ setMethod("terms", "vlm", function(x, ...) terms.vlm(x, ...))
 
 
 
-Build.terms.vlm = function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
-                           dimname=NULL, coefmat = NULL) {
+Build.terms.vlm <-
+  function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
+           dimname = NULL, coefmat = NULL) {
 
 
-    cov.true = !is.null(cov)
-    if (collapse) {
-        fit = matrix(x %*% coefs, ncol=M, byrow=TRUE)
-        dimnames(fit) = dimname
-        if (M==1)
-            fit = c(fit)
-        if (cov.true) {
-            var = ((x %*% cov) * x) %*% rep(1, length(coefs))
-            list(fitted.values = fit, se.fit = if (M==1) c(sqrt(var)) else 
-                 matrix(sqrt(var), ncol=M, byrow=TRUE, dimnames=dimname))
-        } else {
-            fit
-        }
+  cov.true <- !is.null(cov)
+  if (collapse) {
+    fit <- matrix(x %*% coefs, ncol = M, byrow = TRUE)
+    dimnames(fit) <- dimname
+    if (M == 1)
+      fit <- c(fit)
+    if (cov.true) {
+      var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
+      list(fitted.values = fit,
+           se.fit = if (M == 1) c(sqrt(var)) else
+                    matrix(sqrt(var), ncol = M,
+                           byrow = TRUE, dimnames = dimname))
     } else {
+      fit
+    }
+  } else {
 
-        constant = attr(x, "constant")
-        if (!is.null(constant)) {
-            constant = as.vector( t(coefmat) %*% constant )
-        }
+    constant <- attr(x, "constant")
+    if (!is.null(constant)) {
+      constant <- as.vector( t(coefmat) %*% constant )
+    }
     
-        if (missing(assign))
-            assign = attr(x, "assign")
-        if (is.null(assign))
-            stop("Need an 'assign' list")
-        fit = array(0, c(nrow(x), length(assign)),
-                    list(dimnames(x)[[1]], names(assign)))
+    if (missing(assign))
+      assign <- attr(x, "assign")
+    if (is.null(assign))
+      stop("Need an 'assign' list")
+    fit <- array(0, c(nrow(x), length(assign)),
+                 list(dimnames(x)[[1]], names(assign)))
+    if (cov.true)
+      se <- fit
+    TL <- sapply(assign, length)
+    simple <- TL == 1
+    complex <- TL > 1
+    if (any(simple)) {
+      asss <- unlist(assign[simple])
+      ones <- rep(1, nrow(x))
+      fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
+      if (cov.true)
+        se[, simple] <- abs(x[, asss]) * outer(ones, sqrt(diag(cov))[asss])
+    }
+    if (any(complex)) {
+      assign <- assign[complex]
+      for (term in names(assign)) {
+        TT <- assign[[term]]
+        xt <- x[, TT]
+        fit[, term] <- xt %*% coefs[TT]
         if (cov.true)
-            se = fit
-        TL = sapply(assign, length)
-        simple = TL == 1
-        complex = TL > 1
-        if (any(simple)) {
-            asss = unlist(assign[simple])
-            ones = rep(1, nrow(x))
-            fit[, simple] = x[, asss] * outer(ones, coefs[asss])
-            if (cov.true)
-                se[,simple] = abs(x[,asss]) * outer(ones, sqrt(diag(cov))[asss])
-        }
-        if (any(complex)) {
-            assign = assign[complex]
-            for(term in names(assign)) {
-                TT = assign[[term]]
-                xt = x[, TT]
-                fit[, term] = xt %*% coefs[TT]
-                if (cov.true)
-                  se[, term] = sqrt(drop(((xt %*% cov[TT, TT]) * xt) %*%
-                               rep(1, length(TT))))
-            }
-        }
-        attr(fit, "constant") = constant
-    
-        if (cov.true) list(fitted.values = fit, se.fit = se) else fit
+          se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) * xt) %*%
+                                    rep(1, length(TT))))
+      }
     }
+    attr(fit, "constant") <- constant
+    
+    if (cov.true) list(fitted.values = fit, se.fit = se) else fit
+  }
 }
 
 
diff --git a/R/calibrate.q b/R/calibrate.q
index 22723a0..e9dd31b 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -9,289 +9,341 @@
 
 
 
-calibrate.qrrvglm.control = function(object,
-        trace=FALSE,  # passed into optim()
-        Method.optim="BFGS",   # passed into optim(method=Method)
-        gridSize = if (Rank==1) 9 else 5,
-        varlvI = FALSE, ...) {
-
-    Rank = object at control$Rank
-    EqualTolerances = object at control$EqualTolerances
-    if (!is.Numeric(gridSize, positive = TRUE,
-                    integer.valued = TRUE, allowable.length = 1))
-      stop("bad input for 'gridSize'")
-    if (gridSize < 2)
-      stop("'gridSize' must be >= 2")
-
-    list(# maxit=Maxit.optim,   # Note the name change
-         trace=as.numeric(trace)[1],
-         Method.optim=Method.optim,
-         gridSize=gridSize,
-         varlvI = as.logical(varlvI)[1])
-}
 
 
-if(!isGeneric("calibrate"))
-    setGeneric("calibrate",
-               function(object, ...) standardGeneric("calibrate"))
 
 
-calibrate.qrrvglm = function(object, 
-                             newdata = NULL,
-                        type = c("lv","predictors","response","vcov","all3or4"),
-                             initial.vals = NULL, ...) {
 
-    Quadratic = if (is.logical(object at control$Quadratic))
-                object at control$Quadratic else FALSE  # T if CQO, F if CAO
 
-    if (!length(newdata)) {
-        if (!length(object at y)) stop("no newdata") else
-        newdata = data.frame(object at y)
-    }
 
-    if (mode(type) != "character" && mode(type) != "name")
-        type <- as.character(substitute(type))
-    type <- match.arg(type, c("lv","predictors","response","vcov","all3or4"))[1]
-
-    if (!Quadratic && type=="vcov")
-        stop("cannot have 'type=\"vcov\"' when object is a \"cao\" object")
-
-    if (is.vector(newdata))
-        newdata = rbind(newdata)
-    if (!is.matrix(newdata))
-        newdata = as.matrix(newdata)
-    newdata = newdata[,object at misc$ynames,drop=FALSE]
-
-    obfunct = slot(object at family, object at misc$criterion) # Objective function
-    minimize.obfunct = if (Quadratic) object at control$min.criterion else
-        TRUE  # Logical; TRUE for CAO objects because deviance is minimized
-    if (!is.logical(minimize.obfunct)) 
-        stop("object at control$min.criterion is not a logical")
-    optim.control = calibrate.qrrvglm.control(object=object, ...) # For cao too
-
-    if ((Rank <- object at control$Rank) > 2)
-        stop("currently can only handle Rank=1 and 2")
-    Coefobject = if (Quadratic) {
-        Coef(object, varlvI=optim.control$varlvI)
-    } else {
-        Coef(object)
-    }
-    if (!length(initial.vals)) {
-        L = apply(Coefobject at lv, 2, min)
-        U = apply(Coefobject at lv, 2, max)
-        initial.vals = if (Rank==1)
-            cbind(seq(L, U, length=optim.control$gridSize)) else
-            expand.grid(seq(L[1], U[1], length=optim.control$gridSize),
-                        seq(L[2], U[2], length=optim.control$gridSize))
-    }
-    ok = length(object at control$colx1.index)==1 &&
-         names(object at control$colx1.index) == "(Intercept)"
-    if (!ok) stop("The x1 vector must be an intercept only")
-
-    nn = nrow(newdata)
-    BestOFpar = NULL   # It may be more efficient not to append 
-    BestOFvalues = NULL   # Best OF objective function values
-    for(i1 in 1:nn) {
-        if (optim.control$trace)
-            cat("\nOptimizing for observation", i1, "-----------------\n")
-        OFvalues = OFpar = NULL   # OF means objective function
-        for(ii in 1:nrow(initial.vals)) {
-            if (optim.control$trace) {
-                cat("Starting from grid-point", ii, ":")
-                flush.console()
-            }
-            ans = if (is.R()) {
-                if (Quadratic)
-                optim(par=initial.vals[ii,],
-                      fn=.my.calib.objfunction.qrrvglm,
-                      method=optim.control$Method.optim,  # "BFGS", or "CG" or ...
-                      control = c(fnscale=ifelse(minimize.obfunct,1,-1),
-                                optim.control),
-                      y=newdata[i1,],
-                      extra=object at extra,
-                      objfun=obfunct,
-                      Coefs=Coefobject,
-                      misc.list = object at misc,
-                      everything = FALSE,
-                      mu.function = slot(object at family, "inverse")) else
-                optim(par=initial.vals[ii,],
-                      fn=.my.calib.objfunction.cao,
-                      method=optim.control$Method.optim,  # "BFGS", or "CG" or ...
-                      control = c(fnscale=ifelse(minimize.obfunct,1,-1),
-                                optim.control),
-                      y=newdata[i1,],
-                      extra=object at extra,
-                      objfun=obfunct,
-                      object=object,
-                      Coefs=Coefobject,
-                      misc.list = object at misc,
-                      everything = FALSE,
-                      mu.function = slot(object at family, "inverse"))
-            } else 
-                stop("not implemented in S-PLUS yet")
-
-            if (optim.control$trace) {
-                if (ans$convergence == 0)
-                    cat("Successful convergence\n") else 
-                    cat("Unsuccessful convergence\n")
-                    flush.console()
-            }
-            if (ans$convergence == 0) {
-                OFvalues = c(OFvalues, ans$value)
-                OFpar = rbind(OFpar, ans$par)
-            }
+
+
+
+calibrate.qrrvglm.control <-
+  function(object,
+           trace = FALSE,  # passed into optim()
+           Method.optim = "BFGS",   # passed into optim(method = Method)
+           gridSize = if (Rank == 1) 9 else 5,
+         varI.latvar = FALSE, ...) {
+
+  Rank <- object at control$Rank
+  EqualTolerances <- object at control$EqualTolerances
+  if (!is.Numeric(gridSize, positive = TRUE,
+                  integer.valued = TRUE, length.arg = 1))
+    stop("bad input for 'gridSize'")
+  if (gridSize < 2)
+    stop("'gridSize' must be >= 2")
+
+  list(
+       trace = as.numeric(trace)[1],
+       Method.optim = Method.optim,
+       gridSize = gridSize,
+       varI.latvar = as.logical(varI.latvar)[1])
+}
+
+
+
+
+if (!isGeneric("calibrate"))
+    setGeneric("calibrate",
+               function(object, ...) standardGeneric("calibrate"))
+
+
+ 
+ 
+ 
+calibrate.qrrvglm <-
+  function(object, 
+           newdata = NULL,
+           type = c("latvar", "predictors", "response", "vcov", "all3or4"),
+           initial.vals = NULL, ...) {
+
+
+  Quadratic <- if (is.logical(object at control$Quadratic))
+               object at control$Quadratic else FALSE  # T if CQO, F if CAO
+
+  if (!length(newdata)) {
+    if (!length(object at y))
+      stop("no newdata") else
+      newdata <- data.frame(object at y)
+  }
+
+  if (mode(type) != "character" && mode(type) != "name")
+    type <- as.character(substitute(type))
+  type <- match.arg(type, c("latvar", "predictors",
+                            "response", "vcov", "all3or4"))[1]
+
+  if (!Quadratic && type == "vcov")
+    stop("cannot have 'type=\"vcov\"' when object is ",
+         "a \"cao\" object")
+
+  if (is.vector(newdata))
+    newdata <- rbind(newdata)
+  if (!is.matrix(newdata))
+    newdata <- as.matrix(newdata)
+  newdata <- newdata[, object at misc$ynames, drop = FALSE]
+
+  obfunct <- slot(object at family, object at misc$criterion)
+  minimize.obfunct <-
+    if (Quadratic) object at control$min.criterion else
+    TRUE  # Logical; TRUE for CAO objects because deviance is minimized
+  if (!is.logical(minimize.obfunct)) 
+    stop("object at control$min.criterion is not a logical")
+  optim.control <- calibrate.qrrvglm.control(object = object, ...)
+
+  use.optim.control <- optim.control
+  use.optim.control$Method.optim <-
+  use.optim.control$gridSize <-
+  use.optim.control$varI.latvar <- NULL
+
+
+  if ((Rank <- object at control$Rank) > 2)
+    stop("currently can only handle Rank = 1 and 2")
+  Coefobject <- if (Quadratic) {
+    Coef(object, varI.latvar = optim.control$varI.latvar)
+  } else {
+    Coef(object)
+  }
+  if (!length(initial.vals)) {
+    L <- apply(Coefobject at latvar, 2, min)
+    U <- apply(Coefobject at latvar, 2, max)
+    initial.vals <- if (Rank == 1)
+        cbind(seq(L, U, length = optim.control$gridSize)) else
+        expand.grid(seq(L[1], U[1], length = optim.control$gridSize),
+                    seq(L[2], U[2], length = optim.control$gridSize))
+  }
+  okay <- length(object at control$colx1.index) == 1 &&
+           names(object at control$colx1.index) == "(Intercept)"
+  if (!okay)
+    stop("The x1 vector must be an intercept only")
+
+  nn <- nrow(newdata)
+  BestOFpar <- NULL   # It may be more efficient not to append 
+  BestOFvalues <- NULL   # Best OF objective function values
+  for (i1 in 1:nn) {
+    if (optim.control$trace)
+      cat("\nOptimizing for observation", i1, "-----------------\n")
+    OFvalues <- OFpar <- NULL   # OF means objective function
+    for (ii in 1:nrow(initial.vals)) {
+      if (optim.control$trace) {
+        cat("Starting from grid-point", ii, ":")
+        flush.console()
+      }
+      ans <- if (Quadratic)
+        optim(par = initial.vals[ii, ],
+              fn = .my.calib.objfunction.qrrvglm,
+              method = optim.control$Method.optim,  # "BFGS" or "CG" or...
+              control = c(fnscale = ifelse(minimize.obfunct, 1, -1),
+                          use.optim.control),
+              y = newdata[i1, ],
+              extra = object at extra,
+              objfun = obfunct,
+              Coefs = Coefobject,
+              misc.list = object at misc,
+              everything = FALSE,
+              mu.function = slot(object at family, "linkinv")) else
+        optim(par = initial.vals[ii, ],
+              fn = .my.calib.objfunction.cao,
+              method = optim.control$Method.optim,  # "BFGS" or "CG" or...
+              control = c(fnscale = ifelse(minimize.obfunct, 1, -1),
+                          use.optim.control),
+              y = newdata[i1, ],
+              extra = object at extra,
+                objfun = obfunct,
+                object = object,
+                Coefs = Coefobject,
+                misc.list = object at misc,
+                everything = FALSE,
+                mu.function = slot(object at family, "linkinv"))
+
+        if (optim.control$trace) {
+          if (ans$convergence == 0)
+            cat("Successful convergence\n") else 
+            cat("Unsuccessful convergence\n")
+          flush.console()
         }
-        if (length(OFpar)) {
-            index = if (minimize.obfunct)
-                    (1:nrow(OFpar))[OFvalues==min(OFvalues)] else
-                    (1:nrow(OFpar))[OFvalues==max(OFvalues)]
-            if (length(index) > 1) {
-                warning(paste("multiple solutions found for observation ", i1,
-                              ". Choosing one randomly.", sep=""))
-                index = sample(index, size=1)
-            } else if (length(index) == 0)
-                stop("length(index) is zero")
-            BestOFpar = rbind(BestOFpar, OFpar[index,])
-            BestOFvalues = c(BestOFvalues, OFvalues[index])
-        } else {
-            BestOFpar = rbind(BestOFpar, rep(as.numeric(NA), len=Rank))
-            BestOFvalues = c(BestOFvalues, NA)
+        if (ans$convergence == 0) {
+          OFvalues <- c(OFvalues, ans$value)
+          OFpar <- rbind(OFpar, ans$par)
         }
     }
-
-    pretty = function(BestOFpar, newdata, Rank) {
-        if (Rank==1) {
-            BestOFpar = c(BestOFpar) 
-            names(BestOFpar) = dimnames(newdata)[[1]]
-        } else
-            dimnames(BestOFpar) = list(dimnames(newdata)[[1]],
-                if (Rank==1) "lv" else paste("lv", 1:Rank, sep=""))
-        BestOFpar
-    }
-
-    if (type=="lv") {
-        BestOFpar = pretty(BestOFpar, newdata, Rank)
-        attr(BestOFpar,"objectiveFunction")=pretty(BestOFvalues,newdata,Rank=1)
-        BestOFpar
+    if (length(OFpar)) {
+        index <- if (minimize.obfunct)
+            (1:nrow(OFpar))[OFvalues == min(OFvalues)] else
+            (1:nrow(OFpar))[OFvalues == max(OFvalues)]
+     if (length(index) > 1) {
+         warning(paste("multiple solutions found for observation ", i1,
+                       ". Choosing one randomly.", sep = ""))
+         index <- sample(index, size = 1)
+     } else if (length(index) == 0)
+        stop("length(index) is zero")
+      BestOFpar <- rbind(BestOFpar, OFpar[index, ])
+      BestOFvalues <- c(BestOFvalues, OFvalues[index])
     } else {
-        etaValues = muValues = NULL   #
-        if (Quadratic)
-            vcValues = array(0, c(Rank,Rank,nn))
-        for(i1 in 1:nn) {
-            ans = if (Quadratic) .my.calib.objfunction.qrrvglm(BestOFpar[i1, ],
-                          y=newdata[i1,],
-                          extra=object at extra,
-                          objfun=obfunct,
-                          Coefs=Coefobject,
-                          misc.list = object at misc,
-                          everything = TRUE,
-                          mu.function = slot(object at family, "inverse")) else
-                  .my.calib.objfunction.cao(BestOFpar[i1, ],
-                          y=newdata[i1,],
-                          extra=object at extra,
-                          objfun=obfunct,
-                          object=object,
-                          Coefs=Coefobject,
-                          misc.list = object at misc,
-                          everything = TRUE,
-                          mu.function = slot(object at family, "inverse"))
-            muValues = rbind(muValues, matrix(ans$mu, nrow=1))
-            etaValues = rbind(etaValues, matrix(ans$eta, nrow=1))
-            if (Quadratic)
-                vcValues[,,i1] = ans$vcmat  # Can be NULL for "cao" objects
-        }
-        if (type=="response") {
-             dimnames(muValues) = dimnames(newdata)
-             muValues
-        } else if (type=="predictors") {
-             dimnames(etaValues) = list(dimnames(newdata)[[1]],
-                                        dimnames(object at predictors)[[2]])
-             etaValues
-        } else if (type=="vcov") {
-             if (Quadratic)
-             dimnames(vcValues) = list(as.character(1:Rank), 
-                                       as.character(1:Rank),
-                                       dimnames(newdata)[[1]])
-             vcValues
-        } else if (type=="all3or4") {
-             if (Quadratic)
-             dimnames(vcValues) = list(as.character(1:Rank), 
-                                       as.character(1:Rank),
-                                       dimnames(newdata)[[1]])
-             dimnames(muValues) = dimnames(newdata)
-             dimnames(etaValues) = list(dimnames(newdata)[[1]],
-                                        dimnames(object at predictors)[[2]])
-             BestOFpar = pretty(BestOFpar, newdata, Rank)
-             attr(BestOFpar,"objectiveFunction") =
-                  pretty(BestOFvalues,newdata,Rank=1)
-             list(lv=BestOFpar,
-                  predictors=etaValues,
-                  response=muValues,
-                  vcov = if(Quadratic) vcValues else NULL)
-        } else stop("type not matched")
+      BestOFpar <- rbind(BestOFpar, rep(as.numeric(NA), len = Rank))
+      BestOFvalues <- c(BestOFvalues, NA)
     }
-}
-       
-.my.calib.objfunction.qrrvglm = function(bnu, y, extra = NULL,
-                        objfun, Coefs,
-                        misc.list,
-                        everything=TRUE,
-                        mu.function) {
-
-    bnumat = cbind(bnu)
-    Rank = length(bnu)
-    eta = cbind(c(Coefs at B1)) + Coefs at A %*% bnumat  # bix1 = intercept only
-    M = misc.list$M
-    for(s in 1:M) {
-        temp = Coefs at D[,,s,drop=FALSE]
-        dim(temp) = dim(temp)[1:2]  # c(Rank, Rank)
-        eta[s,1] = eta[s,1] + t(bnumat) %*% temp %*% bnumat
+  }
+
+  pretty <- function(BestOFpar, newdata, Rank) {
+    if (Rank == 1) {
+      BestOFpar <- c(BestOFpar) 
+      names(BestOFpar) <- dimnames(newdata)[[1]]
+    } else
+      dimnames(BestOFpar) <-
+        list(dimnames(newdata)[[1]],
+             if (Rank == 1) "latvar" else
+                            paste("latvar", 1:Rank, sep = ""))
+    BestOFpar
+  }
+
+  if (type == "latvar") {
+    BestOFpar <- pretty(BestOFpar, newdata, Rank)
+    attr(BestOFpar,"objectiveFunction") <-
+      pretty(BestOFvalues,
+             newdata, Rank = 1)
+    BestOFpar
+  } else {
+    etaValues <- muValues <- NULL   #
+    if (Quadratic)
+      vcValues <- array(0, c(Rank, Rank, nn))
+    for (i1 in 1:nn) {
+      ans <- if (Quadratic)
+               .my.calib.objfunction.qrrvglm(BestOFpar[i1, ],
+                    y = newdata[i1, ],
+                    extra = object at extra,
+                    objfun = obfunct,
+                    Coefs = Coefobject,
+                    misc.list = object at misc,
+                    everything = TRUE,
+                    mu.function = slot(object at family, "linkinv")) else
+            .my.calib.objfunction.cao(BestOFpar[i1, ],
+                    y = newdata[i1, ],
+                    extra = object at extra,
+                    objfun = obfunct,
+                    object = object,
+                    Coefs = Coefobject,
+                    misc.list = object at misc,
+                    everything = TRUE,
+                    mu.function = slot(object at family, "linkinv"))
+      muValues <- rbind(muValues, matrix(ans$mu, nrow = 1))
+      etaValues <- rbind(etaValues, matrix(ans$eta, nrow = 1))
+      if (Quadratic)
+        vcValues[,,i1] <- ans$vcmat  # Can be NULL for "cao" objects
     }
-    eta = matrix(eta, 1, M, byrow=TRUE)
-    mu = rbind(mu.function(eta, extra))  # Make sure it has one row 
-    value = objfun(mu=mu, y=y,
-                   w=1,  # ignore prior.weights on the object
-                   residuals=FALSE, eta=eta, extra=extra)
-    if (everything) {
-        vcmat = matrix(0, Rank, Rank)
-        for(s in 1:M) {
-            vec1 = cbind(Coefs at A[s,]) + 2 *
-                   matrix(Coefs at D[,,s], Rank, Rank) %*% bnumat
-            vcmat = vcmat + mu[1,s] * vec1 %*% t(vec1)
-        }
-        vcmat = solve(vcmat)
-    } else vcmat = NULL
-    if (everything) list(eta=eta, mu=mu, value=value, vcmat=vcmat) else value
+    if (type == "response") {
+       dimnames(muValues) <- dimnames(newdata)
+       muValues
+    } else if (type == "predictors") {
+       dimnames(etaValues) <- list(dimnames(newdata)[[1]],
+                                   dimnames(object at predictors)[[2]])
+       etaValues
+    } else if (type == "vcov") {
+       if (Quadratic)
+         dimnames(vcValues) <- list(as.character(1:Rank), 
+                                    as.character(1:Rank),
+                                    dimnames(newdata)[[1]])
+       vcValues
+    } else if (type == "all3or4") {
+       if (Quadratic)
+         dimnames(vcValues) <- list(as.character(1:Rank), 
+                                    as.character(1:Rank),
+                                    dimnames(newdata)[[1]])
+       dimnames(muValues) <- dimnames(newdata)
+       dimnames(etaValues) <- list(dimnames(newdata)[[1]],
+                                   dimnames(object at predictors)[[2]])
+       BestOFpar <- pretty(BestOFpar, newdata, Rank)
+       attr(BestOFpar,"objectiveFunction") <-
+            pretty(BestOFvalues,newdata,Rank = 1)
+       list(latvar     = BestOFpar,
+            predictors = etaValues,
+            response   = muValues,
+            vcov       = if (Quadratic) vcValues else NULL)
+    } else stop("argument 'type' not matched")
+  }
 }
 
 
 
-.my.calib.objfunction.cao = function(bnu, y, extra = NULL,
-                        objfun, object, Coefs,
-                        misc.list,
-                        everything=TRUE,
-                        mu.function) {
-    Rank = length(bnu)
-    NOS = Coefs at NOS 
-    eta = matrix(as.numeric(NA), 1, NOS)
-    for(j in 1:NOS) {
-        eta[1,j] = predictcao(object, grid=bnu, sppno=j, 
-                              Rank=Rank, deriv=0)$yvals
+
+ 
+
+
+.my.calib.objfunction.qrrvglm <-
+  function(bnu, y, extra = NULL,
+           objfun, Coefs,
+           misc.list,
+           everything = TRUE,
+           mu.function) {
+
+  bnumat <- cbind(bnu)
+  Rank <- length(bnu)
+  eta <- cbind(c(Coefs at B1)) + Coefs at A %*% bnumat  # bix1 = intercept only
+  M <- misc.list$M
+  for (s in 1:M) {
+    temp <- Coefs at D[, , s, drop = FALSE]
+    dim(temp) <- dim(temp)[1:2]  # c(Rank, Rank)
+    eta[s, 1] <- eta[s, 1] + t(bnumat) %*% temp %*% bnumat
+  }
+  eta <- matrix(eta, 1, M, byrow = TRUE)
+  mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
+  value <- objfun(mu = mu, y = y,
+                 w = 1,  # ignore prior.weights on the object
+                 residuals = FALSE, eta = eta, extra = extra)
+  if (everything) {
+    vcmat <- matrix(0, Rank, Rank)
+    for (s in 1:M) {
+      vec1 <- cbind(Coefs at A[s, ]) +
+              2 * matrix(Coefs at D[, , s], Rank, Rank) %*% bnumat
+      vcmat <- vcmat + mu[1,s] * vec1 %*% t(vec1)
     }
-    mu = rbind(mu.function(eta, extra))  # Make sure it has one row 
-    value = objfun(mu=mu, y=y,
-                   w=1,  # ignore prior.weights on the object
-                   residuals=FALSE, eta=eta, extra=extra)
-    vcmat = NULL  # No theory as of yet to compute the vcmat
-    if (everything) list(eta=eta, mu=mu, value=value, vcmat=vcmat) else value
+    vcmat <- solve(vcmat)
+  } else {
+    vcmat <- NULL
+  }
+  if (everything)
+    list(eta = eta,
+         mu = mu,
+         value = value,
+         vcmat = vcmat) else
+    value
 }
 
 
-setMethod("calibrate", "qrrvglm", function(object, ...)
-          calibrate.qrrvglm(object, ...))
 
+ 
+
+.my.calib.objfunction.cao <-
+  function(bnu, y, extra = NULL,
+           objfun, object, Coefs,
+           misc.list,
+           everything=TRUE,
+           mu.function) {
+    Rank <- length(bnu)
+    NOS <- Coefs at NOS 
+    eta <- matrix(as.numeric(NA), 1, NOS)
+    for (jlocal in 1:NOS) {
+      eta[1, jlocal] <- predictcao(object, grid = bnu, sppno = jlocal,
+                                   Rank = Rank, deriv = 0)$yvals
+    }
+    mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
+    value <- objfun(mu = mu, y = y,
+                   w = 1,  # ignore prior.weights on the object
+                   residuals = FALSE, eta = eta, extra = extra)
+    vcmat <- NULL  # No theory as of yet to compute the vcmat
+  if (everything)
+    list(eta = eta,
+         mu = mu,
+         value = value,
+         vcmat = vcmat) else
+    value
+}
 
 
 
 
+setMethod("calibrate", "qrrvglm", function(object, ...)
+          calibrate.qrrvglm(object, ...))
+
+
 
diff --git a/R/cao.R b/R/cao.R
index 370a83b..82c699a 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -10,170 +10,173 @@
 
 
 cao  <- function(formula,
-                 family, data=list(), 
-                 weights=NULL, subset=NULL, na.action=na.fail,
-                 etastart=NULL, mustart=NULL, coefstart=NULL,
-                 control=cao.control(...), 
-                 offset=NULL, 
-                 method="cao.fit",
-                 model=FALSE, x.arg=TRUE, y.arg=TRUE,
-                 contrasts=NULL, 
-                 constraints=NULL,
-                 extra=NULL, 
-                 qr.arg=FALSE, smart=TRUE, ...)
-{
-    dataname <- as.character(substitute(data))  # "list" if no data=
-    function.name <- "cao"
-
-    ocall <- match.call()
-
-    if (smart) 
-        setup.smart("write")
-
-    mt <- terms(formula, data = data)
-    if (missing(data)) 
-        data <- environment(formula)
-
-    mf <- match.call(expand.dots = FALSE)
-    mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
-        mf$control <-
-        mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
-    mf$coefstart <- mf$etastart <- mf$... <- NULL
-    mf$smart <- NULL
-    mf$drop.unused.levels <- TRUE 
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame()) 
-    if (method == "model.frame")
-        return(mf)
-    na.act <- attr(mf, "na.action")
-
-    xvars <- as.character(attr(mt, "variables"))[-1]
-    if ((yvar <- attr(mt, "response")) > 0)
-        xvars <- xvars[-yvar]
-    xlev <- if (length(xvars) > 0) {
-        xlev <- lapply(mf[xvars], levels)
-        xlev[!sapply(xlev, is.null)]
+                 family, data = list(), 
+                 weights = NULL, subset = NULL, na.action = na.fail,
+                 etastart = NULL, mustart = NULL, coefstart = NULL,
+                 control = cao.control(...), 
+                 offset = NULL, 
+                 method = "cao.fit",
+                 model = FALSE, x.arg = TRUE, y.arg = TRUE,
+                 contrasts = NULL, 
+                 constraints = NULL,
+                 extra = NULL, 
+                 qr.arg = FALSE, smart = TRUE, ...) {
+  dataname <- as.character(substitute(data))  # "list" if no data=
+  function.name <- "cao"
+
+  ocall <- match.call()
+
+  if (smart) 
+    setup.smart("write")
+
+  mt <- terms(formula, data = data)
+  if (missing(data)) 
+    data <- environment(formula)
+
+  mf <- match.call(expand.dots = FALSE)
+  mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
+    mf$control <-
+    mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+  mf$coefstart <- mf$etastart <- mf$... <- NULL
+  mf$smart <- NULL
+  mf$drop.unused.levels <- TRUE 
+  mf[[1]] <- as.name("model.frame")
+  mf <- eval(mf, parent.frame()) 
+  if (method == "model.frame")
+    return(mf)
+  na.act <- attr(mf, "na.action")
+
+  xvars <- as.character(attr(mt, "variables"))[-1]
+  if ((yvar <- attr(mt, "response")) > 0)
+    xvars <- xvars[-yvar]
+  xlev <- if (length(xvars) > 0) {
+    xlev <- lapply(mf[xvars], levels)
+    xlev[!sapply(xlev, is.null)]
+  }
+
+  y <- model.response(mf, "numeric")  # model.extract(mf, "response")
+  x <- model.matrix(mt, mf, contrasts)
+  attr(x, "assign") <- attrassigndefault(x, mt)
+  offset <- model.offset(mf)
+  if (is.null(offset)) 
+    offset <- 0 # yyy ???
+  w <- model.weights(mf)
+  if (!length(w))
+    w <- rep(1, nrow(mf))
+  else if (ncol(as.matrix(w)) == 1 && any(w < 0))
+    stop("negative weights not allowed")
+
+  if (is.character(family))
+    family <- get(family)
+  if (is.function(family))
+    family <- family()
+  if (!inherits(family, "vglmff")) {
+    stop("'family = ", family, "' is not a VGAM family function")
+  }
+
+  eval(vcontrol.expression)
+
+  if (!is.null(family at first))
+    eval(family at first)
+
+
+  cao.fitter <- get(method)
+
+
+  deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
+  for (tries in 1:control$Bestof) {
+    if (control$trace && (control$Bestof > 1)) {
+      cat(paste("\n========================= Fitting model",
+          tries, "=========================\n"))
+      if (exists("flush.console"))
+        flush.console()
     }
-
-    y <- model.response(mf, "numeric") # model.extract(mf, "response")
-    x <- model.matrix(mt, mf, contrasts)
-    attr(x, "assign") = attrassigndefault(x, mt)
-    offset <- model.offset(mf)
-    if (is.null(offset)) 
-        offset <- 0 # yyy ???
-    w <- model.weights(mf)
-    if (!length(w))
-        w <- rep(1, nrow(mf))
-    else if (ncol(as.matrix(w))==1 && any(w < 0))
-        stop("negative weights not allowed")
-
-    if (is.character(family))
-        family <- get(family)
-    if (is.function(family))
-        family <- family()
-    if (!inherits(family, "vglmff")) {
-        stop("'family=", family, "' is not a VGAM family function")
-    }
-
-    eval(vcontrol.expression)
-
-    if (!is.null(family at first))
-        eval(family at first)
-
-
-    cao.fitter <- get(method)
-
-
-    deviance.Bestof = rep(as.numeric(NA), len = control$Bestof)
-    for(tries in 1:control$Bestof) {
-         if (control$trace && (control$Bestof>1)) {
-             cat(paste("\n========================= Fitting model",
-                 tries, "=========================\n"))
-             if (exists("flush.console"))
-                flush.console()
-         }
-         onefit <- cao.fitter(x=x, y=y, w=w, offset=offset,
-                   etastart=etastart, mustart=mustart, coefstart=coefstart,
-                   family=family,
-                   control=control,
-                   constraints=constraints,
-                   criterion=control$criterion,
-                   extra=extra,
-                   qr.arg = qr.arg,
-                   Terms=mt, function.name=function.name, ...)
-        deviance.Bestof[tries] = onefit$crit.list$deviance
-       if (tries == 1 ||
-           min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
-            fit = onefit
-    }
-    fit$misc$deviance.Bestof = deviance.Bestof
-
-    fit$misc$dataname <- dataname
-
-    if (smart) {
-        fit$smart.prediction <- get.smart.prediction()
-        wrapup.smart()
-    }
-
-    answer <-
-    new("cao",
-      "assign"       = attr(x, "assign"),
-      "Bspline"      = fit$Bspline,
-      "call"         = ocall,
-      "coefficients" = fit$coefficients,
-      "criterion"    = fit$crit.list,
-      "family"       = fit$family,
-      "misc"         = fit$misc,
-      "model"        = if (model) mf else data.frame(),
-      "residuals"    = as.matrix(fit$wresiduals),
-      "smart.prediction" = as.list(fit$smart.prediction),
-      "terms"        = list(terms=mt))
-
-    if (!smart) answer at smart.prediction <- list(smart.arg=FALSE)
-
-    if (qr.arg) {
-        class(fit$qr) = "list"
-        slot(answer, "qr") = fit$qr
-    }
-    if (length(attr(x, "contrasts")))
-        slot(answer, "contrasts") = attr(x, "contrasts")
-    if (length(fit$fitted.values))
-        slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
-    slot(answer, "na.action") = if (length(na.act)) list(na.act) else list()
-    if (length(offset))
-        slot(answer, "offset") = as.matrix(offset)
-    if (length(fit$weights))
-        slot(answer, "weights") = as.matrix(fit$weights)
-    if (x.arg)
-        slot(answer, "x") = fit$x # The 'small' design matrix
-    if (length(xlev))
-        slot(answer, "xlevels") = xlev
-    if (y.arg)
-        slot(answer, "y") = as.matrix(fit$y)
-
-
-    slot(answer, "control") = fit$control
-    slot(answer, "extra") = if (length(fit$extra)) {
-        if (is.list(fit$extra)) fit$extra else {
-            warning("'extra' is not a list, therefore ",
-                    "placing 'extra' into a list")
-            list(fit$extra)
-        }
-    } else list() # R-1.5.0
-
-    slot(answer, "iter") = fit$iter
-    fit$predictors = as.matrix(fit$predictors)  # Must be a matrix 
-    dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
-                                    fit$misc$predictors.names)
-    slot(answer, "predictors") = fit$predictors
-    if (length(fit$prior.weights))
-        slot(answer, "prior.weights") = as.matrix(fit$prior.weights)
-
-
-
-
-
-    answer
+    onefit <-
+      cao.fitter(x = x, y = y, w = w, offset = offset,
+                 etastart = etastart, mustart = mustart,
+                 coefstart = coefstart,
+                 family = family,
+                 control = control,
+                 constraints = constraints,
+                 criterion = control$criterion,
+                 extra = extra,
+                 qr.arg = qr.arg,
+                 Terms = mt, function.name = function.name, ...)
+    deviance.Bestof[tries] <- onefit$crit.list$deviance
+    if (tries == 1 ||
+        min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+      fit <- onefit
+  }
+  fit$misc$deviance.Bestof <- deviance.Bestof
+
+  fit$misc$dataname <- dataname
+
+  if (smart) {
+    fit$smart.prediction <- get.smart.prediction()
+    wrapup.smart()
+  }
+
+  answer <-
+  new("cao",
+    "assign"       = attr(x, "assign"),
+    "Bspline"      = fit$Bspline,
+    "call"         = ocall,
+    "coefficients" = fit$coefficients,
+    "criterion"    = fit$crit.list,
+    "family"       = fit$family,
+    "misc"         = fit$misc,
+    "model"        = if (model) mf else data.frame(),
+    "residuals"    = as.matrix(fit$wresiduals),
+    "smart.prediction" = as.list(fit$smart.prediction),
+    "terms"        = list(terms = mt))
+
+  if (!smart)
+    answer at smart.prediction <- list(smart.arg = FALSE)
+
+  if (qr.arg) {
+    class(fit$qr) <- "list"
+    slot(answer, "qr") <- fit$qr
+  }
+  if (length(attr(x, "contrasts")))
+    slot(answer, "contrasts") <- attr(x, "contrasts")
+  if (length(fit$fitted.values))
+    slot(answer, "fitted.values") <- as.matrix(fit$fitted.values)
+  slot(answer, "na.action") <-
+    if (length(na.act)) list(na.act) else list()
+  if (length(offset))
+    slot(answer, "offset") <- as.matrix(offset)
+  if (length(fit$weights))
+    slot(answer, "weights") <- as.matrix(fit$weights)
+  if (x.arg)
+    slot(answer, "x") <- fit$x  # The 'small' design matrix
+  if (length(xlev))
+    slot(answer, "xlevels") <- xlev
+  if (y.arg)
+    slot(answer, "y") <- as.matrix(fit$y)
+
+
+  slot(answer, "control") <- fit$control
+  slot(answer, "extra") <- if (length(fit$extra)) {
+      if (is.list(fit$extra)) fit$extra else {
+          warning("'extra' is not a list, therefore ",
+                  "placing 'extra' into a list")
+          list(fit$extra)
+      }
+  } else list()  # R-1.5.0
+
+  slot(answer, "iter") <- fit$iter
+  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix 
+  dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
+                                   fit$misc$predictors.names)
+  slot(answer, "predictors") <- fit$predictors
+  if (length(fit$prior.weights))
+    slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
+
+
+
+
+
+  answer
 }
 attr(cao, "smart") <- TRUE
 
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 9486db9..0b4fea6 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -21,10 +21,11 @@ cao.fit <-
   maxitl <- fv <- NULL
 
 
+  eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
 
   specialCM <- NULL
   post <- list()
-  check.rank <- TRUE # 
+  check.rank <- TRUE
   nonparametric <- TRUE
   optim.maxit <- control$optim.maxit
   save.weight <- control$save.weight
@@ -34,9 +35,9 @@ cao.fit <-
   n <- dim(x)[1]
 
 
-  copy_X_vlm <- FALSE    # May be overwritten in @initialize
+  copy.X.vlm <- FALSE  # May be overwritten in @initialize
 
-  X_vlm_save <- NULL
+  X.vlm.save <- NULL
 
   intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
   y.names <- predictors.names <- NULL # May be overwritten in @initialize
@@ -56,11 +57,11 @@ cao.fit <-
                     "binomialff" = 1, "quasipoissonff" = 0,
                     "quasibinomialff" = 0, "negbinomial" = 3,
                     "gamma2" = 5, "gaussianff" = 8,
-            0)  # stop("cannot fit this model using fast algorithm")
+                    0)  # stop("cannot fit this model using fast algorithm")
   if (!modelno)
     stop("the family function does not work with cao()")
   if (modelno == 1)
-    modelno <- get("modelno", envir = VGAM:::VGAMenv)
+    modelno <- get("modelno", envir = VGAMenv)
 
   eval(rrr.init.expression)
 
@@ -92,7 +93,7 @@ cao.fit <-
     eval(family at constraints)
 
 
-  special.matrix <- matrix(-34956.125, M, M)    # An unlikely used matrix
+  special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix
   just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
   findex <- trivial.constraints(just.testing, special.matrix)
   tc1 <- trivial.constraints(constraints)
@@ -104,7 +105,7 @@ cao.fit <-
   dx2 <- dimnames(x)[[2]]
   if (sum(findex)) {
     asx <- attr(x, "assign")
-    for(ii in names(findex))
+    for (ii in names(findex))
       if (findex[ii]) {
         names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
         colx1.index <- c(colx1.index, asx[[ii]])
@@ -115,7 +116,8 @@ cao.fit <-
   colx2.index <- 1:ncol(x)
   names(colx2.index) <- dx2
   colx2.index <- colx2.index[-colx1.index]
-  p1 <- length(colx1.index); p2 <- length(colx2.index)
+  p1 <- length(colx1.index)
+  p2 <- length(colx2.index)
   rrcontrol$colx2.index <- control$colx2.index <- colx2.index
 
 
@@ -124,11 +126,11 @@ cao.fit <-
             matrix(rrcontrol$Cinit, p2, Rank)
           } else {
             if (!rrcontrol$Use.Init.Poisson.QO) {
-              matrix(rnorm(p2*Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
+              matrix(rnorm(p2 * Rank, sd = rrcontrol$sd.Cinit), p2, Rank)
             } else {
                 .Init.Poisson.QO(ymat = as.matrix(y),
-                          X1 = x[,colx1.index, drop = FALSE],
-                          X2 = x[,colx2.index, drop = FALSE],
+                          X1 = x[, colx1.index, drop = FALSE],
+                          X2 = x[, colx2.index, drop = FALSE],
                           Rank = rrcontrol$Rank, trace = rrcontrol$trace,
                           max.ncol.etamat = rrcontrol$Etamat.colmax,
                           Crow1positive = rrcontrol$Crow1positive,
@@ -139,22 +141,24 @@ cao.fit <-
           }
 
 
-  rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
+  rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
 
   Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
 
   nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
-  if (nice31 != 1) stop("not nice")
+  if (nice31 != 1)
+    stop("not nice")
 
   ncolBlist <- unlist(lapply(Blist, ncol))
-  lv.mat <- x[, colx2.index, drop = FALSE] %*% Cmat 
+  latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat 
 
 
   rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
 
   Nice21 <- length(names.colx1.index) == 1 &&
-           names.colx1.index == "(Intercept)"
-  if (!Nice21) stop("'noRRR = ~ 1' is supported only, without constraints")
+            names.colx1.index == "(Intercept)"
+  if (!Nice21)
+    stop("'noRRR = ~ 1' is supported only, without constraints")
   NOS <- ifelse(modelno %in% c(3, 5), M/2, M)
   p1star. <- if (Nice21) ifelse(modelno %in% c(3, 5), 2, 1) else M
   p2star. <- if (Nice21) Rank else stop("not Nice21")
@@ -162,10 +166,13 @@ cao.fit <-
   nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
   lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
 
-  othint <- c(Rank, control$EqualTol, pstar. ,
-                 dim2wz = 1, inited = 0, # w(,dimw) cols
-          modelno, maxitl = control$maxitl, actnits = 0, twice = 0, p1star. ,
-          p2star. , Nice21, lenbeta, controlITolerances = 0, control$trace,
+  othint <-
+        c(Rank, control$EqualTol, pstar. ,
+                 dim2wz = 1, inited = 0,  # w(, dimw) cols
+          modelno, maxitl = control$maxitl,
+          actnits = 0, twice = 0, p1star. ,
+          p2star. , Nice21, lenbeta,
+          controlITolerances = 0, control$trace,
           p1, p2 = p2, imethod = control$imethod, bchat = 0)
   othdbl <- c(small = control$SmallNo, fseps = control$epsilon,
               .Machine$double.eps,
@@ -173,7 +180,7 @@ cao.fit <-
               iShape = rep(control$iShape, len = NOS),
               resss = 0, bfeps = control$bf.epsilon, hstep = 0.1)
 
-  for(iter in 1:optim.maxit) {
+  for (iter in 1:optim.maxit) {
     if (control$trace) {
       cat("\nIteration", iter, "\n")
       flush.console()
@@ -182,9 +189,11 @@ cao.fit <-
       conjgrad <- optim(par = c(Cmat), fn = callcaoc,
                    gr = if (control$GradientFunction) calldcaoc else NULL,
                    method = "BFGS",
-                   control=list(fnscale = 1, trace = as.integer(control$trace),
-                                maxit = control$Maxit.optim, REPORT = 10),
-                   etamat = eta, xmat = x, ymat = y, # as.matrix(y), 
+                   control = list(fnscale = 1,
+                                  trace = as.integer(control$trace),
+                                  maxit = control$Maxit.optim,
+                                  REPORT = 10),
+                   etamat = eta, xmat = x, ymat = y,  # as.matrix(y), 
                    wvec = w, modelno = modelno,
                    Control = control,
                    Nice21 = Nice21,
@@ -194,11 +203,11 @@ cao.fit <-
                    alldump = FALSE)
 
 
-      Cmat <- matrix(conjgrad$par, p2, Rank) # old becoz of scale(cmatrix)
+      Cmat <- matrix(conjgrad$par, p2, Rank)  # old becoz of scale(cmatrix)
 
-   #    Cmat <- Cmat %*% Ut  # Normalized
 
-      if (converged <- (conjgrad$convergence == 0)) break
+    if (converged <- (conjgrad$convergence == 0))
+      break
   }
 
   if (!converged) {
@@ -209,18 +218,18 @@ cao.fit <-
     }
   } else {
   }
-  Cmat <- crow1C(Cmat, control$Crow1positive) # Make sure signs are right
+  Cmat <- crow1C(Cmat, control$Crow1positive)  # Make sure signs are right
 
   flush.console()
-  temp9 <- 
-  callcaoc(cmatrix = Cmat,
-           etamat = eta, xmat = x, ymat = y, wvec = w, modelno = modelno,
-           Control = control,
-           Nice21 = Nice21,
-           p1star. = p1star. , p2star. = p2star. ,
-           n = n, M = M, 
-           othint = othint, othdbl = othdbl,
-           alldump = TRUE)
+  temp9 <- callcaoc(cmatrix = Cmat,
+                    etamat = eta, xmat = x, ymat = y,
+                    wvec = w, modelno = modelno,
+                    Control = control,
+                    Nice21 = Nice21,
+                    p1star. = p1star. , p2star. = p2star. ,
+                    n = n, M = M, 
+                    othint = othint, othdbl = othdbl,
+                    alldump = TRUE)
   if (!is.list(extra))
     extra <- list()
   extra$Cmat <- temp9$Cmat
@@ -276,7 +285,7 @@ cao.fit <-
   fit <- list(
               fitted.values = mu,
               Cmatrix = Cmat,
-              terms = Terms) # terms: This used to be done in vglm() 
+              terms = Terms)  # terms: This used to be done in vglm() 
 
 
 
@@ -330,7 +339,7 @@ cao.control <- function(Rank = 1,
           Crow1positive = TRUE,
           epsilon = 1.0e-05,
           Etamat.colmax = 10,
-          GradientFunction = FALSE,  # For now 24/12/04
+          GradientFunction = FALSE,  # For now 20041224
           iKvector = 0.1,
           iShape = 0.1,
           noRRR = ~ 1,
@@ -339,19 +348,20 @@ cao.control <- function(Rank = 1,
           Use.Init.Poisson.QO = TRUE,
 
           Bestof = if (length(Cinit)) 1 else 10,
-          maxitl = 10,   # was 40 prior to 20100420
+          maxitl = 10,  # was 40 prior to 20100420
           imethod = 1,
           bf.epsilon = 1.0e-7,
           bf.maxit = 10,  # was 40 prior to 20100420
           Maxit.optim = 250,
           optim.maxit = 20,
-          SD.sitescores = 1.0,
-          SD.Cinit = 0.02,
+          sd.sitescores = 1.0,
+          sd.Cinit = 0.02,
+          suppress.warnings = TRUE,
           trace = TRUE,
-          df1.nl = 2.5, # About 1.5--2.5 gives the flexibility of a quadratic
-          df2.nl = 2.5, # About 1.5--2.5 gives the flexibility of a quadratic
-          spar1 = 0,    # 0 means df1.nl is used
-          spar2 = 0,    # 0 means df2.nl is used
+          df1.nl = 2.5,  # About 1.5--2.5 gives the flexibility of a quadratic
+          df2.nl = 2.5,  # About 1.5--2.5 gives the flexibility of a quadratic
+          spar1 = 0,  # 0 means df1.nl is used
+          spar2 = 0,  # 0 means df2.nl is used
           ...) {
 
 
@@ -365,89 +375,93 @@ cao.control <- function(Rank = 1,
 
 
   if (!is.Numeric(iShape, positive = TRUE))
-    stop("bad input for 'iShape'")
+    stop("bad input for argument 'iShape'")
   if (!is.Numeric(iKvector, positive = TRUE))
-    stop("bad input for 'iKvector'")
-  if (!is.Numeric(imethod, positive = TRUE, allowable.length = 1,
+    stop("bad input for argument 'iKvector'")
+  if (!is.Numeric(imethod, positive = TRUE, length.arg = 1,
                   integer.valued = TRUE))
-    stop("bad input for 'imethod'")
+    stop("bad input for argument 'imethod'")
 
-  if (criterion != "deviance") stop("'criterion' must be 'deviance'")
+  if (criterion != "deviance")
+    stop("'criterion' must be 'deviance'")
   if (GradientFunction)
-    stop("14/1/05; GradientFunction = TRUE not working yet")
+    stop("20050114; GradientFunction = TRUE not working yet")
 
   se.fit <- as.logical(FALSE)
-  if (se.fit) stop("se.fit = FALSE handled only")
+  if (se.fit)
+    stop("se.fit = FALSE handled only")
 
   if (length(Cinit) && !is.Numeric(Cinit))
-    stop("Bad input for 'Cinit'")
-  if (!is.Numeric(Bestof, allowable.length = 1,
+    stop("Bad input for argument 'Cinit'")
+  if (!is.Numeric(Bestof, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-    stop("Bad input for 'Bestof'")
-  if (!is.Numeric(maxitl, allowable.length = 1,
+    stop("Bad input for argument 'Bestof'")
+  if (!is.Numeric(maxitl, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-    stop("Bad input for 'maxitl'")
-  if (!is.Numeric(bf.epsilon, allowable.length = 1,
+    stop("Bad input for argument 'maxitl'")
+  if (!is.Numeric(bf.epsilon, length.arg = 1,
                   positive = TRUE))
-    stop("Bad input for 'bf.epsilon'")
+    stop("Bad input for argument 'bf.epsilon'")
   if (!is.Numeric(bf.maxit, integer.valued = TRUE,
-                  positive = TRUE, allowable.length = 1))
-    stop("Bad input for 'bf.maxit'")
+                  positive = TRUE, length.arg = 1))
+    stop("Bad input for argument 'bf.maxit'")
+
   if (!is.Numeric(Etamat.colmax, positive = TRUE,
-                  allowable.length = 1) ||
+                  length.arg = 1) ||
       Etamat.colmax < Rank)
-    stop("bad input for 'Etamat.colmax'")
+    stop("bad input for argument 'Etamat.colmax'")
+
   if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
-                  positive = TRUE, allowable.length = 1))
-    stop("Bad input for 'Maxit.optim'")
-  if (!is.Numeric(optim.maxit, allowable.length = 1,
+                  positive = TRUE, length.arg = 1))
+    stop("Bad input for argument 'Maxit.optim'")
+  if (!is.Numeric(optim.maxit, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-    stop("Bad input for 'optim.maxit'")
-  if (!is.Numeric(SD.sitescores, allowable.length = 1,
+    stop("Bad input for argument 'optim.maxit'")
+  if (!is.Numeric(sd.sitescores, length.arg = 1,
                   positive = TRUE))
-    stop("Bad input for 'SD.sitescores'")
-  if (!is.Numeric(SD.Cinit, allowable.length = 1,
+    stop("Bad input for argument 'sd.sitescores'")
+  if (!is.Numeric(sd.Cinit, length.arg = 1,
                   positive = TRUE))
-    stop("Bad input for 'SD.Cinit'")
+    stop("Bad input for argument 'sd.Cinit'")
   if (!is.Numeric(df1.nl) || any(df1.nl < 0))
-    stop("Bad input for 'df1.nl'")
+    stop("Bad input for argument 'df1.nl'")
   if (any(df1.nl >= 0 & df1.nl < 0.05)) {
     warning("'df1.nl' values between 0 and 0.05 converted to 0.05")
     df1.nl[df1.nl < 0.05] <- 0.05
   }
   if (!is.Numeric(df2.nl) || any(df2.nl < 0))
-    stop("Bad input for 'df2.nl'")
+    stop("Bad input for argument 'df2.nl'")
   if (any(df2.nl >= 0 & df2.nl < 0.05)) {
     warning("'df2.nl' values between 0 and 0.05 converted to 0.05")
-    df2.nl[df2.nl < 0.05] = 0.05
+    df2.nl[df2.nl < 0.05] <- 0.05
   }
   if (!is.Numeric(spar1) || any(spar1 < 0))
-    stop("Bad input for 'spar1'")
+    stop("Bad input for argument 'spar1'")
   if (!is.Numeric(spar2) || any(spar2 < 0))
-    stop("Bad input for 'spar2'")
-  if (!is.Numeric(epsilon, positive = TRUE, allowable.length = 1))
-    stop("Bad input for 'epsilon'")
+    stop("Bad input for argument 'spar2'")
+  if (!is.Numeric(epsilon, positive = TRUE, length.arg = 1))
+    stop("Bad input for argument 'epsilon'")
 
-  if (!is.Numeric(SmallNo, positive = TRUE, allowable.length = 1))
-    stop("Bad input for 'SmallNo'")
+  if (!is.Numeric(SmallNo, positive = TRUE, length.arg = 1))
+    stop("Bad input for argument 'SmallNo'")
   if ((SmallNo < .Machine$double.eps) ||
       (SmallNo > .0001))
     stop("'SmallNo' is out of range") 
 
     ans <- list(
-     Corner = FALSE, # A constant, not a control parameter; unneeded?
-     EqualTolerances = FALSE, # A constant, not a control parameter; needed
-     ITolerances = FALSE, # A constant, not a control parameter; unneeded?
-     Quadratic = FALSE, # A constant, not a control parameter; unneeded?
+     Corner = FALSE,  # A constant, not a control parameter; unneeded?
+     EqualTolerances = FALSE,  # A constant, not a control parameter; needed
+     ITolerances = FALSE,  # A constant, not a control parameter; unneeded?
+     Quadratic = FALSE,  # A constant, not a control parameter; unneeded?
         all.knots = as.logical(all.knots)[1],
         Bestof = Bestof,
         Cinit = Cinit,
-        ConstrainedO = TRUE, # A constant, not a control parameter
+        ConstrainedO = TRUE,  # A constant, not a control parameter
         criterion = criterion,
         Crow1positive = as.logical(rep(Crow1positive, len = Rank)),
         epsilon = epsilon,
         Etamat.colmax = Etamat.colmax,
-        FastAlgorithm = TRUE, # A constant, not a control parameter
+        FastAlgorithm = TRUE,  # A constant, not a control parameter
         GradientFunction = as.logical(GradientFunction),
         maxitl = maxitl,
         bf.epsilon = bf.epsilon,
@@ -457,10 +471,11 @@ cao.control <- function(Rank = 1,
         optim.maxit = optim.maxit,
         noRRR = noRRR,
         Rank = Rank,
-        SD.sitescores = SD.sitescores,
-        SD.Cinit = SD.Cinit,
-        se.fit = se.fit, # If TRUE, then would need storage for S QR fits
+        sd.sitescores = sd.sitescores,
+        sd.Cinit = sd.Cinit,
+        se.fit = se.fit,  # If TRUE, then would need storage for S QR fits
         SmallNo = SmallNo,
+        suppress.warnings = as.logical(suppress.warnings),
         trace = as.integer(trace),
         Use.Init.Poisson.QO = Use.Init.Poisson.QO,
         iKvector = as.numeric(iKvector),
@@ -478,15 +493,15 @@ cao.control <- function(Rank = 1,
 
 
 create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
-  if (!is.Numeric(p1, allowable.length = 1,
+  if (!is.Numeric(p1, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-    stop("bad input for 'p1'")
+    stop("bad input for argument 'p1'")
   Blist. <- vector("list", p1 + Rank)
-  for(rr in 1:(p1+Rank))
+  for (rr in 1:(p1+Rank))
     Blist.[[rr]] <- diag(M)
   names(Blist.) <- if (p1 == 1) c("(Intercept)", names(which)) else stop()
   if (MSratio == 2) {
-    for(r in 1:Rank) 
+    for (r in 1:Rank) 
       Blist.[[p1+r]] <- eijfun(1, M)
   }
   Blist.
@@ -511,238 +526,260 @@ callcaoc <- function(cmatrix,
   p2 <- length(control$colx2.index)
   yn <- dimnames(ymat)[[2]]
   if (length(yn) != ncol(ymat))
-    stop("the column names of 'ymat' must be given")
-  queue <- qbig <- Rank # 19/10/05; number of smooths per species
-  NOS <- if (modelno %in% c(3, 5)) M/2 else M
-    df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
-    spar1  <- procVec(control$spar1,  yn = yn , Default = control$SPAR1)
-    df2.nl <- procVec(control$df2.nl, yn = yn , Default = control$DF2)
-    spar2  <- procVec(control$spar2,  yn = yn , Default = control$SPAR2)
-    if (any(c(length(spar1), length(spar2), length(df1.nl),
-              length(df2.nl)) != NOS))
-      stop("wrong length in at least one of ",
-           "'df1.nl', 'df2.nl', 'spar1', 'spar2'")
-
-    cmatrix <- matrix(cmatrix, p2, Rank) # crow1C() needs a matrix as input
-    cmatrix <- crow1C(cmatrix, crow1positive = control$Crow1positive)
-    numat <- xmat[,control$colx2.index, drop = FALSE] %*% cmatrix
-    evnu <- eigen(var(numat))
-    temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-            evnu$vector %*% evnu$value^(-0.5)
-    cmatrix <- cmatrix %*% temp7
-    cmatrix <- crow1C(cmatrix, crow1positive =control$Crow1positive)
-    numat <- xmat[,control$colx2.index, drop = FALSE] %*% cmatrix
-
-
-    dim(numat) <- c(n, Rank)
-    mynames5 <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
-    nu1mat <- cbind("(Intercept)" = 1, lv = numat)
-    dimnames(nu1mat) <- list(dimnames(xmat)[[1]], c("(Intercept)", mynames5))
-
-    temp.smooth.frame <- vector("list", p1+Rank) # Temporary makeshift frame
-    names(temp.smooth.frame) <- c(names(control$colx1.index), mynames5)
-    for(uu in 1:(p1+Rank)) {
-        temp.smooth.frame[[uu]] <- nu1mat[,uu]
-    }
-    temp.smooth.frame <- data.frame(temp.smooth.frame)
-    for(uu in 1:Rank) {
-        attr(temp.smooth.frame[,uu+p1], "spar") <- 0  # this value unused
-        attr(temp.smooth.frame[,uu+p1], "df") <- 4    # this value unused
-    }
-
-    pstar.  <- p1star.  + p2star.   # = Mdot + Rank
-    nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
-    lenbeta <- pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
-
-    inited <- if (exists(".VGAM.CAO.etamat", envir=VGAM:::VGAMenv)) 1 else 0
-    usethiseta <- if (inited == 1)
-        getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else t(etamat)
-
-    if (any(is.na(usethiseta))) {
-        usethiseta <- t(etamat)  # So that dim(usethiseta) == c(M,n)
-        rmfromVGAMenv("etamat", prefix = ".VGAM.CAO.")
-    }
-
-    usethisbeta <- if (inited == 2)
-        getfromVGAMenv("beta", prefix = ".VGAM.CAO.") else double(lenbeta)
-    othint[5] <- inited   # Refine initialization within C
-    pstar <- NOS * pstar. 
-    bnumat <- if (Nice21) matrix(0, nstar, pstar.) else
-             stop("code not written here")
-
-    M. <- MSratio <- M / NOS     # 1 or 2 usually
-    which <- p1 + (1:Rank) # These columns are smoothed
-    nwhich <- names(which) <- mynames5
-
-    origBlist <- Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
-                                    which = which, p1 = p1) # For 1 species only
-    ncolBlist. <- unlist(lapply(Blist. , ncol))
-    smooth.frame <- s.vam(x = nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
-                         which = which,
-                         smooth.frame = temp.smooth.frame,
-                         bf.maxit = control$bf.maxit,
-                         bf.epsilon = control$bf.epsilon,
-                         trace = FALSE, se.fit = control$se.fit,
-                         X_vlm_save = bnumat, Blist = Blist. ,
-                         ncolBlist = ncolBlist. ,
-                         M =  M. , qbig = NULL, Umat = NULL, # NULL ==> unneeded
-                         all.knots = control$all.knots, nk = NULL,
-                         sf.only = TRUE)
-
-    ldk <- 3 * max(ncolBlist.[nwhich]) + 1   # 11/7/02
-
-    dimw. <- M.   # Smoothing one spp. at a time
-    dim1U. <- M.
-    wz. <- matrix(0, n, dimw. )
-    if (names(Blist.)[1] != "(Intercept)") stop("something wrong here")
-    Blist.[[1]] <- NULL
+    stop("the column names of argument 'ymat' must be given")
 
-    trivc <- rep(2 - M. , len = queue) # All of queue smooths are basic smooths
-    ncbvec <- ncolBlist.[nwhich]
-    ncolb <- max(ncbvec)
+  queue <- qbig <- Rank  # 20051019; number of smooths per species
 
-    qbig. <- NOS * qbig    # == NOS * Rank; holds all the smooths
-    if (!all.equal(as.vector(ncbvec), rep(1, len = queue)))
-        stop("'ncbvec' not right---should be a queue-vector of ones")
-    pbig <- pstar. #
+  NOS <- if (modelno %in% c(3, 5)) M/2 else M
+  df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
+  spar1  <- procVec(control$spar1,  yn = yn , Default = control$SPAR1)
+  df2.nl <- procVec(control$df2.nl, yn = yn , Default = control$DF2)
+  spar2  <- procVec(control$spar2,  yn = yn , Default = control$SPAR2)
+  if (any(c(length(spar1), length(spar2), length(df1.nl),
+            length(df2.nl)) != NOS))
+    stop("wrong length in at least one of arguments ",
+         "'df1.nl', 'df2.nl', 'spar1', 'spar2'")
+
+  cmatrix <- matrix(cmatrix, p2, Rank)  # crow1C() needs a matrix as input
+  cmatrix <- crow1C(cmatrix, crow1positive = control$Crow1positive)
+  numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+  evnu <- eigen(var(numat))
+  temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+           evnu$vector %*% evnu$value^(-0.5)
+  cmatrix <- cmatrix %*% temp7
+  cmatrix <- crow1C(cmatrix, crow1positive = control$Crow1positive)
+  numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+
+
+  dim(numat) <- c(n, Rank)
+  mynames5 <- if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
+  nu1mat <- cbind("(Intercept)" = 1, latvar = numat)
+  dimnames(nu1mat) <- list(dimnames(xmat)[[1]],
+                           c("(Intercept)", mynames5))
+
+  temp.smooth.frame <- vector("list", p1+Rank)  # Temporary makeshift frame
+  names(temp.smooth.frame) <- c(names(control$colx1.index), mynames5)
+  for (uu in 1:(p1+Rank)) {
+    temp.smooth.frame[[uu]] <- nu1mat[, uu]
+  }
+  temp.smooth.frame <- data.frame(temp.smooth.frame)
+  for (uu in 1:Rank) {
+    attr(temp.smooth.frame[,uu+p1], "spar") <- 0  # this value unused
+    attr(temp.smooth.frame[,uu+p1], "df") <- 4    # this value unused
+  }
 
+  pstar. <- p1star. + p2star.  # = Mdot + Rank
+  nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
+  lenbeta <- pstar. * ifelse(Nice21, NOS, 1)  # Holds the linear coeffs
 
-    contr.sp <- list(low = -1.5,## low = 0.      was default till R 1.3.x
-                     high = 1.5,
-                     tol = 1e-4,## tol = 0.001   was default till R 1.3.x
-                     eps = 2e-8,## eps = 0.00244 was default till R 1.3.x
-                     maxit = 500 )
+  inited <- if (exists(".VGAM.CAO.etamat", envir = VGAMenv)) 1 else 0
+  usethiseta <-
+      if (inited == 1)
+        getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else
+        t(etamat)
 
-  if (FALSE)
-    contr.sp <- list(low = -1.5,## low = 0.      was default till R 1.3.x
-                     high = 1.5,
-                     tol = 0.001,     # was default till R 1.3.x
-                     eps = 0.00244,   # was default till R 1.3.x
-                     maxit = 500 )
+  if (any(is.na(usethiseta))) {
+    usethiseta <- t(etamat)  # So that dim(usethiseta) == c(M,n)
+    rmfromVGAMenv("etamat", prefix = ".VGAM.CAO.")
+  }
 
-    npetc <- c(n = nrow(nu1mat), p. = ncol(nu1mat), q = length(which),
-                  se.fit = control$se.fit, 0,
-        control$bf.maxit, qrank = 0, M = M. , nbig = nstar, pbig = pbig,
-        qbig = qbig, dim2wz = dimw. , dim1U = dim1U. , ierror = 0, ldk=ldk,
-        contr.sp$maxit, iinfo = 0)
+  usethisbeta <- if (inited == 2)
+                 getfromVGAMenv("beta", prefix = ".VGAM.CAO.") else
+                 double(lenbeta)
+  othint[5] <- inited   # Refine initialization within C
+  pstar <- NOS * pstar. 
+  bnumat <- if (Nice21) matrix(0, nstar, pstar.) else
+                        stop("code not written here")
+
+  M. <- MSratio <- M / NOS  # 1 or 2 usually
+  which <- p1 + (1:Rank)  # These columns are smoothed
+  nwhich <- names(which) <- mynames5
+
+  origBlist <-
+  Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
+                       which = which, p1 = p1)  # For 1 species only
+  ncolBlist. <- unlist(lapply(Blist. , ncol))
+  smooth.frame <- s.vam(x = nu1mat, zedd = NULL,
+                        wz = NULL, smomat = NULL,
+                        which = which,
+                        smooth.frame = temp.smooth.frame,
+                        bf.maxit = control$bf.maxit,
+                        bf.epsilon = control$bf.epsilon,
+                        trace = FALSE, se.fit = control$se.fit,
+                        X.vlm.save = bnumat, Blist = Blist. ,
+                        ncolBlist = ncolBlist. ,
+                        M =  M. ,
+                        qbig = NULL, Umat = NULL,  # NULL ==> unneeded
+                        all.knots = control$all.knots, nk = NULL,
+                        sf.only = TRUE)
+
+  ldk <- 3 * max(ncolBlist.[nwhich]) + 1   # 11/7/02
+
+  dimw. <- M.   # Smoothing one spp. at a time
+  dim1U. <- M.
+  wz. <- matrix(0, n, dimw. )
+  if (names(Blist.)[1] != "(Intercept)")
+    stop("something wrong here")
+  Blist.[[1]] <- NULL
+
+  trivc <- rep(2 - M. , len = queue)
+  ncbvec <- ncolBlist.[nwhich]
+  ncolb <- max(ncbvec)
+
+  qbig. <- NOS * qbig    # == NOS * Rank; holds all the smooths
+  if (!all.equal(as.vector(ncbvec), rep(1, len = queue)))
+    stop("'ncbvec' not right---should be a queue-vector of ones")
+  pbig <- pstar. #
+
+
+
+  contr.sp <- list(low = -1.5,  ## low = 0.      was default till R 1.3.x
+                   high = 1.5,
+                   tol = 1e-4,  ## tol = 0.001   was default till R 1.3.x
+                   eps = 2e-8,  ## eps = 0.00244 was default till R 1.3.x
+                   maxit = 500 )
+
+
+  npetc <-
+    c(n = nrow(nu1mat), p. = ncol(nu1mat), q = length(which),
+        se.fit = control$se.fit, 0,
+      control$bf.maxit, qrank = 0, M = M. , nbig = nstar, pbig = pbig,
+      qbig = qbig, dim2wz = dimw. , dim1U = dim1U. ,
+        ierror = 0, ldk = ldk,
+      contr.sp$maxit, iinfo = 0)
 
 
 
 
-    if (Rank == 2) {
-        smopar <- (c(spar1, spar2))[interleave.VGAM(4*NOS, M = 2)]
-        dofvec <- (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4*NOS, M = 2)]
-        lamvec <- 0 * dofvec
-        stop("20100414; havent got Rank = 2 going yet")
-    } else {
-        smopar <- c(spar1, spar2)
-        dofvec <- c(df1.nl, df2.nl) + 1.0
-        lamvec <- 0 * dofvec
-    }
+  if (Rank == 2) {
+    smopar <- (c(spar1, spar2))[interleave.VGAM(4 * NOS, M = 2)]
+    dofvec <- (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4 * NOS, M = 2)]
+    lamvec <- 0 * dofvec
+    stop("20100414; havent got Rank = 2 going yet")
+  } else {
+    smopar <- c(spar1, spar2)
+    dofvec <- c(df1.nl, df2.nl) + 1.0
+    lamvec <- 0 * dofvec
+  }
 
-    ans1 <- dotC(name = "vcao6",
-     numat = as.double(numat), ymat = as.double(ymat), wvec = as.double(wvec),
-     etamat = as.double(usethiseta), fv = double(NOS*n), zedd = double(n*M),
-     wz = double(n*M), U = double(M*n), # bnumat = as.double(bnumat),
-     qr = double(nstar*pstar.), qraux = double(pstar.), qpivot = integer(pstar.),
-     n = as.integer(n), M = as.integer(M), NOS = as.integer(NOS),
-         nstar = as.integer(nstar), dim1U = as.integer( M ), # for U, not U. 
-     errcode = integer(1), othint = as.integer(othint),
-     deviance = double(1 + NOS),  # NOS more elts added 20100413
-     beta = as.double(usethisbeta),
-     othdbl = as.double(othdbl),
-         npetc = as.integer(npetc), M. = as.integer( M. ),
-     dofvec = as.double(dofvec),
-     lamvec = as.double(lamvec),
-     smopar = as.double(smopar),
-         match = as.integer(smooth.frame$o), as.integer(smooth.frame$nef), 
-         which = as.integer(which),
-         smomat = as.double(matrix(0, n, qbig. )),
-         nu1mat = as.double(nu1mat),
-     blist = as.double(unlist( Blist. )),
-     as.integer(ncbvec), 
-         smap = as.integer(1:(Rank+1)), # 
-         trivc = as.integer(trivc),
-     levmat = as.double(matrix(0, n, qbig. )),
-         bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
-         xknots = as.double(unlist(smooth.frame$knots)),
-     bindex = as.integer(smooth.frame$bindex),
-         nknots = as.integer(smooth.frame$nknots),
-         kindex = as.integer(smooth.frame$kindex))
+  ans1 <- .C("vcao6",
+  numat = as.double(numat), ymat = as.double(ymat), wvec = as.double(wvec),
+  etamat = as.double(usethiseta), fv = double(NOS*n), zedd = double(n*M),
+  wz = double(n*M), U = double(M*n),  # bnumat = as.double(bnumat),
+  qr = double(nstar*pstar.), qraux = double(pstar.),
+    qpivot = integer(pstar.),
+  n = as.integer(n), M = as.integer(M), NOS = as.integer(NOS),
+      nstar = as.integer(nstar), dim1U = as.integer( M ),  # for U, not U. 
+  errcode = integer(1), othint = as.integer(othint),
+  deviance = double(1 + NOS),  # NOS more elts added 20100413
+  beta = as.double(usethisbeta),
+  othdbl = as.double(othdbl),
+      npetc = as.integer(npetc), M. = as.integer( M. ),
+  dofvec = as.double(dofvec),
+  lamvec = as.double(lamvec),
+  smopar = as.double(smopar),
+      match = as.integer(smooth.frame$matcho),
+      as.integer(smooth.frame$nef),
+      which = as.integer(which),
+      smomat = as.double(matrix(0, n, qbig. )),
+      nu1mat = as.double(nu1mat),
+  blist = as.double(unlist( Blist. )),
+  as.integer(ncbvec), 
+      smap = as.integer(1:(Rank+1)),  # 
+      trivc = as.integer(trivc),
+
+
+
+
+  levmat = double(NOS * sum(smooth.frame$neffec * ncbvec)),
+
+
+
+      bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
+      xknots = as.double(unlist(smooth.frame$knots)),
+  bindex = as.integer(smooth.frame$bindex),
+  lindex = as.integer(smooth.frame$lindex),
+      nknots = as.integer(smooth.frame$nknots),
+      kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM")
 flush.console()
 
 
     if (ans1$errcode == 0) {
-        assign2VGAMenv(c("etamat", "beta"), ans1, prefix = ".VGAM.CAO.")
-        assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAM:::VGAMenv)
+      assign2VGAMenv(c("etamat", "beta"), ans1, prefix = ".VGAM.CAO.")
+      assign(".VGAM.CAO.cmatrix", matrix(cmatrix, p2, Rank),
+             envir = VGAMenv)
     } else {
+      if (!control$suppress.warnings) {
         cat("warning in callcaoc: error code  = ", ans1$errcode, "\n")
-        cat("warning in callcaoc: npetc[14]  = ", ans1$npetc[14], "\n")
+        cat("warning in callcaoc: npetc[14]   = ", ans1$npetc[14], "\n")
         flush.console()
-        rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
+      }
+      rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
     }
 
-    returnans <- if (alldump) {
-        bindex <- ans1$bindex
-        ncolBlist <- ncbvec
-        Bspline2 <- vector("list", NOS)
-        names(Bspline2) <- dimnames(ymat)[[2]]
-        Bspline <- vector("list", length(nwhich))
-        names(Bspline) <- nwhich
-        ind9 <- 0   # moving index
-        for(sppno in 1:NOS) {
-            for(ii in 1:length(nwhich)) {
-              ind7 <- (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
-              ans <- ans1$bcoeff[ind9+ind7]
-              ans <- matrix(ans, ncol=ncolBlist[nwhich[ii]])
-              Bspline[[ii]] = new(Class = "vsmooth.spline.fit",
-                    "Bcoefficients" = ans,
-                    "xmax"          = smooth.frame$xmax[ii],
-                    "xmin"          = smooth.frame$xmin[ii],
-                    "knots"         = as.vector(smooth.frame$knots[[ii]]))
-            }
-            ind9 <- ind9 + smooth.frame$bindex[length(nwhich)+1]-1
-            Bspline2[[sppno]] <- Bspline
+  returnans <- if (alldump) {
+      bindex <- ans1$bindex
+      ncolBlist <- ncbvec
+      Bspline2 <- vector("list", NOS)
+      names(Bspline2) <- dimnames(ymat)[[2]]
+      Bspline <- vector("list", length(nwhich))
+      names(Bspline) <- nwhich
+      ind9 <- 0   # moving index
+      for (sppno in 1:NOS) {
+        for (ii in 1:length(nwhich)) {
+          ind7 <- (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
+          ans <- ans1$bcoeff[ind9+ind7]
+          ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
+          Bspline[[ii]] <-
+            new(Class = "vsmooth.spline.fit",
+                "Bcoefficients" = ans,
+                "xmax"          = smooth.frame$xmax[ii],
+                "xmin"          = smooth.frame$xmin[ii],
+                "knots"         = as.vector(smooth.frame$knots[[ii]]))
         }
+        ind9 <- ind9 + smooth.frame$bindex[length(nwhich)+1] - 1
+        Bspline2[[sppno]] <- Bspline
+      }
 
-        qrank <- npetc[7]  # Assume all species have the same qrank value
-        dim(ans1$etamat) <- c(M,n)    # was c(n,M) prior to 22/8/06
+      qrank <- npetc[7]  # Assume all species have the same qrank value
+      dim(ans1$etamat) <- c(M, n)  # was c(n, M) prior to 20060822
 
 
 
-        df1.nl  <- ans1$dofvec[1:NOS] - 1.0
-        lambda1 <- ans1$lamvec[1:NOS]
-        spar1   <- ans1$smopar[1:NOS]
-        if (Rank == 2) {
- stop("20100414; this isnt working yet")
-             df2.nl  <- ans1$dofvec[NOS + (1:NOS)] - 1.0
-             lambda2 <- ans1$lamvec[NOS + (1:NOS)]
-             spar2   <- ans1$smopar[NOS + (1:NOS)]
-        }
+      df1.nl  <- ans1$dofvec[1:NOS] - 1.0
+      lambda1 <- ans1$lamvec[1:NOS]
+      spar1   <- ans1$smopar[1:NOS]
+      if (Rank == 2) {
+ stop("20100414; this is not working yet")
+        df2.nl  <- ans1$dofvec[NOS + (1:NOS)] - 1.0
+        lambda2 <- ans1$lamvec[NOS + (1:NOS)]
+        spar2   <- ans1$smopar[NOS + (1:NOS)]
+      }
 
-        list(deviance = ans1$deviance[1],
-             alldeviance = ans1$deviance[-1],
-             bcoefficients = ans1$bcoefficients,
-             bindex = ans1$bindex,
-             Bspline = Bspline2,
-             Cmat = matrix(cmatrix, p2, Rank, dimnames=list(
-                           names(control$colx2.index), mynames5)),
-             coefficients = ans1$beta,
-             df1.nl = df1.nl,
-             df2.nl = if (Rank == 2) df2.nl else NULL,
-             df.residual = n*M - qrank - sum(ans1$df - 1),
-             fitted = ans1$fv,  # NOS x n
-             kindex = ans1$kindex,
-             lambda1 = lambda1,
-             lambda2 = if (Rank == 2) lambda2 else NULL,
-             predictors = matrix(ans1$etamat, n, M, byrow = TRUE),
-             wresiduals = ans1$zedd - t(ans1$etamat),   # n x M
-             spar1 = spar1,
-             spar2 = if (Rank == 2) spar2 else NULL)
-    } else
-        ans1$deviance[1]
-    flush.console()
-    returnans
+      list(deviance = ans1$deviance[1],
+           alldeviance = ans1$deviance[-1],
+           bcoefficients = ans1$bcoefficients,
+           bindex = ans1$bindex,
+           Bspline = Bspline2,
+           Cmat = matrix(cmatrix, p2, Rank, dimnames = list(
+                         names(control$colx2.index), mynames5)),
+           coefficients = ans1$beta,
+           df1.nl = df1.nl,
+           df2.nl = if (Rank == 2) df2.nl else NULL,
+           df.residual = n*M - qrank - sum(ans1$df - 1),
+           fitted = ans1$fv,  # NOS x n
+           kindex = ans1$kindex,
+           lambda1 = lambda1,
+           lambda2 = if (Rank == 2) lambda2 else NULL,
+           predictors = matrix(ans1$etamat, n, M, byrow = TRUE),
+           wresiduals = ans1$zedd - t(ans1$etamat),  # n x M
+           spar1 = spar1,
+           spar2 = if (Rank == 2) spar2 else NULL)
+    } else {
+      ans1$deviance[1]
+    }
+  flush.console()
+  returnans
 }
 
 
@@ -763,7 +800,6 @@ calldcaoc <- function(cmatrix,
 
 
 
-
   U <- NULL
 
 
@@ -782,68 +818,74 @@ calldcaoc <- function(cmatrix,
 
   cmatrix <- scale(cmatrix)
 
-  xmat2 <- xmat[,control$colx2.index, drop = FALSE]   #ccc
+  xmat2 <- xmat[, control$colx2.index, drop = FALSE]   #ccc
   numat <- xmat2 %*% matrix(cmatrix, p2, Rank)
   dim(numat) <- c(nrow(xmat), Rank)
-  temp.smooth.frame <- vector("list", 1+Rank) # Temporary makeshift frame
-  mynames5 <- if (Rank == 1) "lv" else paste("lv",1:Rank,sep = "")
+  temp.smooth.frame <- vector("list", 1+Rank)  # Temporary makeshift frame
+  mynames5 <- if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
   names(temp.smooth.frame) <- c("(Intercept)", mynames5)
-  temp.smooth.frame[[1]] <- rep(1, len=n)
-  for(uu in 1:Rank) {
-      temp.smooth.frame[[uu+1]] <- numat[,uu]
+  temp.smooth.frame[[1]] <- rep(1, len = n)
+  for (uu in 1:Rank) {
+    temp.smooth.frame[[uu+1]] <- numat[, uu]
   }
   temp.smooth.frame <- data.frame(temp.smooth.frame)
-  for(uu in 1:Rank) {
-    attr(temp.smooth.frame[,uu+1], "spar") <- 0 # any old value
-    attr(temp.smooth.frame[,uu+1], "df") <- 4 # any old value
+  for (uu in 1:Rank) {
+    attr(temp.smooth.frame[,uu+1], "spar") <- 0  # any old value
+    attr(temp.smooth.frame[,uu+1], "df") <- 4    # any old value
   }
-    pstar.  <- p1star.  + p2star. 
-    nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
-    NOS <- ifelse(modelno %in% c(3, 5), M / 2, M)
-    lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
-
-    if (TRUE) {
-        inited <- if (exists(".VGAM.CAO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
-        usethiseta <- if (inited == 1) get(".VGAM.CAO.etamat",
-            envir = VGAM:::VGAMenv) else t(etamat)
-    }
-    usethisbeta <- if (inited == 2) get(".VGAM.CAO.beta",
-        envir = VGAM:::VGAMenv) else double(lenbeta)
+  pstar.  <- p1star.  + p2star. 
+  nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
+  NOS <- ifelse(modelno %in% c(3, 5), M / 2, M)
+  lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
+
+  if (TRUE) {
+    inited <- if (exists(".VGAM.CAO.etamat", envir = VGAMenv))
+              1 else 0
+    usethiseta <- if (inited == 1)
+                  get(".VGAM.CAO.etamat", envir = VGAMenv) else
+                  t(etamat)
+  }
+  usethisbeta <- if (inited == 2)
+                 get(".VGAM.CAO.beta", envir = VGAMenv) else
+                 double(lenbeta)
 
 
 
 
 
- pstar <- NOS * pstar. 
-    bnumat <- if (Nice21) matrix(0,nstar,pstar) else stop("need 'Nice21'")
+  pstar <- NOS * pstar. 
+  bnumat <- if (Nice21)
+            matrix(0, nstar, pstar) else stop("need 'Nice21'")
 
-    M. <- MSratio <- M / NOS # 1 or 2 usually
+  M. <- MSratio <- M / NOS  # 1 or 2 usually
 
 
-    p1 <- 1
+  p1 <- 1
 
-    which <- p1 + (1:Rank)   # The first 1 is the intercept term
-    nwhich <- names(which) <- mynames5
+  which <- p1 + (1:Rank)  # The first 1 is the intercept term
+  nwhich <- names(which) <- mynames5
 
-    origBlist <- Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
-                                    which = which, p1 = p1) # For 1 species
-    ncolBlist. <- unlist(lapply(Blist. , ncol))
-    nu1mat <- cbind("(Intercept)" = 1, lv = numat)
-    dimnames(nu1mat) <- list(dimnames(xmat)[[1]], c("(Intercept)","lv"))
+  origBlist <- Blist. <-
+    create.cms(Rank = Rank, M = M., MSratio = MSratio,
+               which = which, p1 = p1)  # For 1 species
+  ncolBlist. <- unlist(lapply(Blist. , ncol))
+    nu1mat <- cbind("(Intercept)" = 1, latvar = numat)
+    dimnames(nu1mat) <- list(dimnames(xmat)[[1]],
+                             c("(Intercept)", "latvar"))
 
-    smooth.frame <- s.vam(x = nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
-                    which = which,
-                    smooth.frame = temp.smooth.frame,
-                    bf.maxit = control$bf.maxit,
-                    bf.epsilon = control$bf.epsilon,
-                    trace = FALSE, se.fit = control$se.fit,
-                    X_vlm_save = bnumat, Blist = Blist.,
-                    ncolBlist = ncolBlist. ,
-                    M = M. , qbig = NULL,
+    smooth.frame <- s.vam(x = nu1mat, zedd = NULL, wz = NULL,
+                          smomat = NULL, which = which,
+                          smooth.frame = temp.smooth.frame,
+                          bf.maxit = control$bf.maxit,
+                          bf.epsilon = control$bf.epsilon,
+                          trace = FALSE, se.fit = control$se.fit,
+                          X.vlm.save = bnumat, Blist = Blist.,
+                          ncolBlist = ncolBlist. ,
+                          M = M. , qbig = NULL,
 
-                    Umat = U, # NULL value ==> not needed
-                    all.knots = control$all.knots, nk = NULL,
-                    sf.only = TRUE)
+                          Umat = U,  # NULL value ==> not needed
+                          all.knots = control$all.knots, nk = NULL,
+                          sf.only = TRUE)
 
     ldk <- 4 * max(ncolBlist.[nwhich])   # was M;     # Prior to 11/7/02
     ldk <- 3 * max(ncolBlist.[nwhich]) + 1   # 11/7/02
@@ -859,12 +901,12 @@ calldcaoc <- function(cmatrix,
 
 
 
-    queue <- qbig <- Rank # 19/10/05; number of smooths per species
+    queue <- qbig <- Rank  # 20051019; number of smooths per species
 
 
 
     Blist.[[1]] <- NULL
-    trivc <- rep(2 - M. , len = queue) # All of queue smooths are basic smooths
+    trivc <- rep(2 - M. , len = queue)
     ncbvec <- ncolBlist.[nwhich]
     ncolb <- max(ncbvec)
 
@@ -872,61 +914,60 @@ calldcaoc <- function(cmatrix,
     qbig. <- NOS * qbig    # == NOS * Rank
     pbig <- pstar. # Not sure
     if (FALSE) {
-        df1.nl <- rep(control$df1.nl, len = NOS)  # This is used
-        df2.nl <- rep(control$df2.nl, len = NOS)  # This is used
-        spar1  <- rep(control$spar1,  len = NOS)  # This is used
-        spar2  <- rep(control$spar2,  len = NOS)  # This is used
+      df1.nl <- rep(control$df1.nl, len = NOS)  # This is used
+      df2.nl <- rep(control$df2.nl, len = NOS)  # This is used
+      spar1  <- rep(control$spar1,  len = NOS)  # This is used
+      spar2  <- rep(control$spar2,  len = NOS)  # This is used
     } else {
-        # This is used
-        df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
-        df2.nl <- df1.nl  # 20100417; stopgap
-        spar1  <- procVec(control$spar1,  yn = yn , Default = control$SPAR1)
-        spar2  <- spar1  # 20100417; stopgap
-        dofvec <- c(df1.nl, df2.nl)
-        lamvec <- 0 * dofvec
-        smopar <- c(spar1, spar2)
+      df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
+      df2.nl <- df1.nl  # 20100417; stopgap
+      spar1  <- procVec(control$spar1,  yn = yn , Default = control$SPAR1)
+      spar2  <- spar1  # 20100417; stopgap
+      dofvec <- c(df1.nl, df2.nl)
+      lamvec <- 0 * dofvec
+      smopar <- c(spar1, spar2)
     }
 
 
 
 
 
-    contr.sp <- list(low = -1.5,## low = 0.      was default till R 1.3.x
+    contr.sp <- list(low = -1.5,  ## low = 0.      was default till R 1.3.x
                      high = 1.5,
-                     tol = 1e-4,## tol = 0.001   was default till R 1.3.x
-                     eps = 2e-8,## eps = 0.00244 was default till R 1.3.x
+                     tol = 1e-4,  ## tol = 0.001   was default till R 1.3.x
+                     eps = 2e-8,  ## eps = 0.00244 was default till R 1.3.x
                      maxit = 500 )
 
-  if (FALSE)
-    contr.sp <- list(low = -1.5,## low = 0.      was default till R 1.3.x
-                     high = 1.5,
-                     tol = 0.001,     # was default till R 1.3.x
-                     eps = 0.00244,   # was default till R 1.3.x
-                     maxit = 500 )
 
 
 warning("20100405; this is old:")
-    npetc <- c(n = n, p = 1+Rank, length(which), se.fit = control$se.fit, 0,
+    npetc <-
+      c(n = n, p = 1+Rank, length(which), se.fit = control$se.fit, 0,
         maxitl = control$maxitl, qrank = 0, M =  M. , n.M = n* M. ,
-            pbig = sum( ncolBlist.),
-        qbig = qbig, dimw =  dimw. , dim1U =  dim1U. , ierror = 0, ldk = ldk)
+          pbig = sum( ncolBlist.),
+        qbig = qbig, dimw =  dimw. , dim1U =  dim1U. ,
+          ierror = 0, ldk = ldk)
 
 warning("20100405; this is new:")
     npetc <- c(n = nrow(nu1mat), p.  = ncol(nu1mat),
-                  q = length(which),
-                  se.fit = control$se.fit, 0,
-        control$bf.maxit, qrank = 0, M =  M. , nbig = nstar, pbig = pbig,
-        qbig = qbig, dim2wz =  dimw. , dim1U =  dim1U. , ierror = 0, ldk = ldk,
-        contr.sp$maxit, iinfo = 0)
+               q = length(which),
+               se.fit = control$se.fit, 0,
+    control$bf.maxit, qrank = 0, M =  M. , nbig = nstar, pbig = pbig,
+    qbig = qbig, dim2wz =  dimw. , dim1U =  dim1U. , ierror = 0, ldk = ldk,
+    contr.sp$maxit, iinfo = 0)
 
     flush.console()
 
-    ans1 <- 
-    dotC(name = if (Nice21) "vdcao6" else stop("need 'Nice21'"),
+    if (!Nice21)
+      stop("need 'Nice21'")
+
+    ans1 <- .C("vdcao6",
     numat = as.double(numat), as.double(ymat), as.double(wvec),
-    etamat = as.double(usethiseta), fv = double(NOS*n), zedd = double(n*M),
-    wz = double(n*M), U = double(M*n), # bnumat = as.double(bnumat),
-    qr = double(nstar*pstar.), qraux = double(pstar.), qpivot = integer(pstar.),
+    etamat = as.double(usethiseta), fv = double(NOS*n),
+      zedd = double(n*M),
+    wz = double(n*M), U = double(M*n),  # bnumat = as.double(bnumat),
+    qr = double(nstar*pstar.), qraux = double(pstar.),
+      qpivot = integer(pstar.),
     as.integer(n), as.integer(M), NOS = as.integer(NOS),
         as.integer(nstar), dim1U = as.integer(M),
     errcode = integer(1), othint = as.integer(othint),
@@ -934,84 +975,94 @@ warning("20100405; this is new:")
     othdbl = as.double(othdbl),
     as.double(xmat2),
     cmat = as.double(cmatrix),
-    p2 = as.integer(p2), deriv = double(p2*Rank),
+    p2 = as.integer(p2), deriv = double(p2 * Rank),
     betasave = double(lenbeta), 
     npetc = as.integer(npetc), M. = as.integer( M. ),
     dofvec = as.double(dofvec + 1.0),
     lamvec = as.double(0 * dofvec),
     smopar = as.double(smopar),
-    match = as.integer(smooth.frame$o), as.integer(smooth.frame$nef), 
+    match = as.integer(smooth.frame$matcho),
+    as.integer(smooth.frame$nef), 
     as.integer(which),
     smomat = as.double(matrix(0, n, qbig. )),
         nu1mat = as.double(nu1mat),
     as.double(unlist( Blist. )),
     as.integer(ncbvec), smap = as.integer(1:(Rank+1)),
     trivc = as.integer(trivc),
-    levmat = as.double(matrix(0, n, qbig. )),
-    bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
+
+
+
+
+  levmat = double(NOS * sum(smooth.frame$neffec * ncbvec)),
+
+
+
+    bcoefficients = double(NOS * sum(smooth.frame$nknots * ncbvec)),
     xknots = as.double(unlist(smooth.frame$knots)),
     bindex = as.integer(smooth.frame$bindex),
+    lindex = as.integer(smooth.frame$lindex),
     nknots = as.integer(smooth.frame$nknots),
-    kindex = as.integer(smooth.frame$kindex))
+    kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM")
         flush.console()
 
-         assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAM:::VGAMenv)
-         assign(".VGAM.CAO.z", ans1$zedd, envir = VGAM:::VGAMenv) # z; minus any offset
-         assign(".VGAM.CAO.U", ans1$U, envir = VGAM:::VGAMenv)  # U
+         assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
+         assign(".VGAM.CAO.z", ans1$zedd, envir = VGAMenv)
+         assign(".VGAM.CAO.U", ans1$U, envir = VGAMenv)  # U
        if (ans1$errcode == 0) {
        } else {
-           cat("warning in calldcaoc: error code  = ", ans1$errcode, "\n")
-           flush.console()
+         cat("warning in calldcaoc: error code  = ", ans1$errcode, "\n")
+         flush.console()
        }
 
-    returnans <- if (alldump) {
-        bindex <- ans1$bindex
-        ncolBlist <- ncbvec
-        Bspline2 <- vector("list", NOS)
-        names(Bspline2) <- dimnames(ymat)[[2]]
-        Bspline <- vector("list", length(nwhich))
-        names(Bspline) <- nwhich
-        ind9 <- 0   # moving index
-        for(jay in 1:NOS) {
-            for(ii in 1:length(nwhich)) {
-                ind9 <- ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
-                ans <- ans1$bcoeff[ind9]
-                ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
-                Bspline[[ii]] <- new(Class = "vsmooth.spline.fit",
-                    "Bcoefficients" = ans,
-                    "xmax"          = smooth.frame$xmax[ii],
-                    "xmin"          = smooth.frame$xmin[ii],
-                    "knots"         = as.vector(smooth.frame$knots[[ii]]))
-            }
-            Bspline2[[jay]] <- Bspline
-        }
-
-        qrank <- npetc[7]  # Assume all species have the same qrank value
-        dim(ans1$etamat) <- c(M,n)   # bug: was c(n,M) prior to 22/8/06
-        list(deviance    = ans1$deviance[1],
-             alldeviance = ans1$deviance[-1],
-             bcoefficients = ans1$bcoefficients,
-             bindex = ans1$bindex,
-             Bspline = Bspline2,
-             Cmat=matrix(cmatrix, p2, Rank, dimnames=list(
-                         names(control$colx2.index), mynames5)),
-             coefficients=ans1$beta,
-             df1.nl = ans1$dofvec[1:NOS] - 1,
-             df2.nl = if (Rank == 2) ans1$dofvec[2*(1:NOS) - 1] - 1 else NULL,
-             lambda1 = ans1$lambda[1:NOS],
-             lambda2 = if (Rank == 2) ans1$lambda[2*(1:NOS) - 1] else NULL,
-             df.residual = n*M - qrank - sum(ans1$df - 1),
-             fitted=ans1$fv,
-             kindex = ans1$kindex,
-             predictors=matrix(ans1$etamat, n, M, byrow = TRUE),
-             wresiduals = ans1$zedd - t(ans1$etamat),   # n x M
-             spar1 = ans1$smopar[1:NOS],
-             spar2 = if (Rank == 2) ans1$smopar[2*(1:NOS) - 1] else NULL)
-    } else {
-        ans1$deriv
+  returnans <- if (alldump) {
+    bindex <- ans1$bindex
+    ncolBlist <- ncbvec
+    Bspline2 <- vector("list", NOS)
+    names(Bspline2) <- dimnames(ymat)[[2]]
+    Bspline <- vector("list", length(nwhich))
+    names(Bspline) <- nwhich
+    ind9 <- 0   # moving index
+    for (jay in 1:NOS) {
+      for (ii in 1:length(nwhich)) {
+        ind9 <- ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
+        ans <- ans1$bcoeff[ind9]
+        ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
+        Bspline[[ii]] <-
+          new(Class = "vsmooth.spline.fit",
+              "Bcoefficients" = ans,
+              "xmax"          = smooth.frame$xmax[ii],
+              "xmin"          = smooth.frame$xmin[ii],
+              "knots"         = as.vector(smooth.frame$knots[[ii]]))
+      }
+      Bspline2[[jay]] <- Bspline
     }
-    flush.console()
-    returnans 
+
+    qrank <- npetc[7]  # Assume all species have the same qrank value
+    dim(ans1$etamat) <- c(M,n)   # bug: was c(n,M) prior to 22/8/06
+    list(deviance    = ans1$deviance[1],
+         alldeviance = ans1$deviance[-1],
+         bcoefficients = ans1$bcoefficients,
+         bindex = ans1$bindex,
+         Bspline = Bspline2,
+         Cmat = matrix(cmatrix, p2, Rank, dimnames = list(
+                     names(control$colx2.index), mynames5)),
+         coefficients = ans1$beta,
+         df1.nl = ans1$dofvec[1:NOS] - 1,
+         df2.nl = if (Rank == 2) ans1$dofvec[2 * (1:NOS) - 1] - 1 else NULL,
+         lambda1 = ans1$lambda[1:NOS],
+         lambda2 = if (Rank == 2) ans1$lambda[2 * (1:NOS) - 1] else NULL,
+         df.residual = n * M - qrank - sum(ans1$df - 1),
+         fitted = ans1$fv,
+         kindex = ans1$kindex,
+         predictors=matrix(ans1$etamat, n, M, byrow = TRUE),
+         wresiduals = ans1$zedd - t(ans1$etamat),  # n x M
+         spar1 = ans1$smopar[1:NOS],
+         spar2 = if (Rank == 2) ans1$smopar[2 * (1:NOS) - 1] else NULL)
+  } else {
+    ans1$deriv
+  }
+  flush.console()
+  returnans 
 }
 
 
@@ -1027,51 +1078,58 @@ setClass(Class = "Coef.cao", representation(
       "df2.nl"       = "numeric",
       "dispersion"   = "numeric",
       "eta2"         = "matrix",
-      "lv"           = "matrix",
-      "lvOrder"      = "matrix",
+      "latvar"       = "matrix",
+      "latvar.order" = "matrix",
       "M"            = "numeric",
       "Maximum"      = "numeric",
       "NOS"          = "numeric",
       "Optimum"      = "matrix",
-      "OptimumOrder" = "matrix",
+      "Optimum.order"= "matrix",
       "Rank"         = "numeric",
       "spar1"        = "numeric",
       "spar2"        = "numeric"))
 
 
+
+
+
+
+
 Coef.cao <- function(object,
-    epsOptimum = 0.00001, # determines how accurately Optimum is estimated
-    gridlen = 40, # Number of points on the grid (one level at a time)
-    maxgriditer = 10, # Maximum number of iterations allowed for grid search
-    smallno = 0.05,
-    ...) {
+    epsOptimum = 0.00001,  # Determines how accurately Optimum is estimated
+    gridlen = 40,      # Number of points on the grid (one level at a time)
+    maxgriditer = 10,  # Maximum number of iters allowed for grid search
+    smallno = 0.05, ...) {
 
-  if (!is.Numeric(epsOptimum, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(epsOptimum, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'epsOptimum'")
   if (!is.Numeric(gridlen, positive = TRUE, integer.valued = TRUE) ||
       gridlen < 5)
     stop("bad input for argument 'gridlen'")
   if (!is.Numeric(maxgriditer, positive = TRUE,
-                  allowable.length = 1, integer.valued = TRUE) ||
+                  length.arg = 1, integer.valued = TRUE) ||
       maxgriditer < 3)
     stop("bad input for argument 'maxgriditer'")
   if (!is.logical(ConstrainedO <- object at control$ConstrainedO))
     stop("cannot determine whether the model is constrained or not")
-  if (!is.Numeric(smallno, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(smallno, positive = TRUE, length.arg = 1) ||
      smallno > 0.5 || smallno < 0.0001)
     stop("bad input for argument 'smallno'")
 
+
   ocontrol <- object at control
   if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2") 
-  gridlen <- rep(gridlen, length=Rank)
+  gridlen <- rep(gridlen, length = Rank)
   M <- if (any(slotNames(object) == "predictors") &&
-         is.matrix(object at predictors)) ncol(object at predictors) else
-         object at misc$M
+           is.matrix(object at predictors))
+       ncol(object at predictors) else
+       object at misc$M
   NOS <- if (length(object at y)) ncol(object at y) else M
-    MSratio <- M / NOS # 1 or 2; First value is g(mean)=quadratic form in lv
+    MSratio <- M / NOS  # 1 or 2; First value is g(mean)=quadratic form in latvar
     nice21 <- (length(ocontrol$colx1.index) == 1) &&
-             (names(ocontrol$colx1.index) == "(Intercept)")
-    if (!nice21) stop("Can only handle 'noRRR = ~ 1'")
+              (names(ocontrol$colx1.index) == "(Intercept)")
+    if (!nice21)
+      stop("Can only handle 'noRRR = ~ 1'")
 
     p1 <- length(ocontrol$colx1.index)
     p2 <- length(ocontrol$colx2.index)
@@ -1083,193 +1141,199 @@ Coef.cao <- function(object,
     lp.names <- object at misc$predictors.names
     if (!length(lp.names)) lp.names <- NULL 
 
-    lv.names <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
-    Cmat <- object at extra$Cmat   # p2 x Rank (provided maxitl > 1)
+    latvar.names <-
+      if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
+    Cmat <- object at extra$Cmat  # p2 x Rank (provided maxitl > 1)
     if (ConstrainedO)
-        dimnames(Cmat) <- list(names(ocontrol$colx2.index), lv.names)
-    lv.mat <- if (ConstrainedO) {
-        object at x[,ocontrol$colx2.index, drop = FALSE] %*% Cmat 
+      dimnames(Cmat) <- list(names(ocontrol$colx2.index), latvar.names)
+    latvar.mat <- if (ConstrainedO) {
+      object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat 
     } else {
-        object at lv
+      object at latvar
     }
 
     optimum <- matrix(as.numeric(NA), Rank, NOS,
-                     dimnames = list(lv.names, ynames))
-    extents <- apply(lv.mat, 2, range)  # 2 by R
+                      dimnames = list(latvar.names, ynames))
+    extents <- apply(latvar.mat, 2, range)  # 2 by R
 
-    maximum <- rep(as.numeric(NA), len=NOS)
+    maximum <- rep(as.numeric(NA), len = NOS)
 
-    whichSpecies <- 1:NOS  # Do it for all species
+    which.species <- 1:NOS  # Do it for all species
     if (Rank == 1) {
-        gridd <- cbind(seq(extents[1,1], extents[2,1], len=gridlen))
+      gridd <- cbind(seq(extents[1,1], extents[2,1], len = gridlen))
     } else {
-        gridd <- expand.grid(seq(extents[1,1], extents[2,1], len=gridlen[1]),
-                             seq(extents[1,2], extents[2,2], len=gridlen[2]))
-        eta2matrix <- matrix(0, NOS, 1)
+      gridd <- expand.grid(seq(extents[1,1], extents[2,1], len = gridlen[1]),
+                           seq(extents[1,2], extents[2,2], len = gridlen[2]))
+      eta2matrix <- matrix(0, NOS, 1)
     }
     gridd.orig <- gridd
-    # if (Rank == 2) then this is for initial values
-    for(sppno in 1:length(whichSpecies)) {
-        gridd <- gridd.orig 
-        gridres1 <- gridd[2,1] - gridd[1,1]
-        gridres2 <- if (Rank == 2) gridd[2,2] - gridd[1,2] else 0
-        griditer <- 1
-
-        thisSpecies <- whichSpecies[sppno]
-        indexSpecies <- if (is.character(whichSpecies))
-            match(whichSpecies[sppno], ynames) else whichSpecies[sppno]
-
-        if (is.na(indexSpecies))
-            stop("mismatch found in 'whichSpecies'")
-
-        while(griditer == 1 ||
-              ((griditer <= maxgriditer) &&
-              ((gridres1 > epsOptimum) ||
-               (gridres2 > epsOptimum)))) {
-            temp <- predictcao(object, grid=gridd, sppno=thisSpecies,
-                              Rank=Rank, deriv = 0, MSratio=MSratio)
-            yvals <- temp$yvals  # gridlen-vector
-            xvals <- temp$xvals  # gridlen x Rank; gridd
-            if (length(temp$eta2)) eta2matrix[sppno,1] <- temp$eta2
-
-            nnn <- length(yvals)
-            index <- (1:nnn)[yvals == max(yvals)]
-            if (length(index) != 1) warning("could not find a single maximum")
-            if (Rank == 2) {
-                initvalue <- rep(xvals[index,], length=Rank) # for optim()
-                # Make sure initvalue is in the interior
-                if (abs(initvalue[1] - extents[1,1]) < smallno)
-                    initvalue[1] <- extents[1,1] + smallno
-                if (abs(initvalue[1] - extents[2,1]) < smallno)
-                    initvalue[1] <- extents[2,1] - smallno
-                if (abs(initvalue[2] - extents[1,2]) < smallno)
-                    initvalue[2] <- extents[1,2] + smallno
-                if (abs(initvalue[2] - extents[2,2]) < smallno)
-                    initvalue[2] <- extents[2,2] - smallno
-                break
-            }
-            if (index == 1 || index == nnn) {
-                maximum[sppno] <- optimum[1,sppno] <- NA
-                gridres1 <- epsOptimum + 1 # equivalent to a break
-                break          # just in case
-            } else {
-                maximum[sppno] <- yvals[index] # on the eta scale
-                optimum[1,sppno] <- xvals[index,1]
-                gridd[,1] <- seq(
-                    max(extents[1,1], optimum[1,sppno]-gridres1),
-                    min(extents[2,1], optimum[1,sppno]+gridres1),
-                    len=gridlen)
-                gridres1 <- gridd[2,1] - gridd[1,1]
-                griditer <- griditer + 1
-            }
-        } # of while 
-
+    for (sppno in 1:length(which.species)) {
+      gridd <- gridd.orig 
+      gridres1 <- gridd[2, 1] - gridd[1, 1]
+      gridres2 <- if (Rank == 2) gridd[2, 2] - gridd[1, 2] else 0
+      griditer <- 1
+
+      thisSpecies <- which.species[sppno]
+      indexSpecies <- if (is.character(which.species))
+          match(which.species[sppno], ynames) else which.species[sppno]
+
+      if (is.na(indexSpecies))
+        stop("mismatch found in 'which.species'")
+
+      while(griditer == 1 ||
+            ((griditer <= maxgriditer) &&
+            ((gridres1 > epsOptimum) ||
+             (gridres2 > epsOptimum)))) {
+        temp <- predictcao(object, grid = gridd, sppno = thisSpecies,
+                           Rank = Rank, deriv = 0, MSratio = MSratio)
+        yvals <- temp$yvals  # gridlen-vector
+        xvals <- temp$xvals  # gridlen x Rank; gridd
+        if (length(temp$eta2)) eta2matrix[sppno, 1] <- temp$eta2
+
+        nnn <- length(yvals)
+        index <- (1:nnn)[yvals == max(yvals)]
+        if (length(index) != 1)
+          warning("could not find a single maximum")
         if (Rank == 2) {
-          # Rank = 2, so use optim(). The above was to get initial values.
-            myfun <- function(x, object, sppno, Rank = 1, deriv = 0, MSratio = 1) {
-                # x is a 2-vector
-                x <- matrix(x, 1, length(x))
-                temp <- predictcao(object, grid = x, sppno = sppno,
-                                  Rank = Rank, deriv = deriv, MSratio = MSratio)
-                temp$yval
-            }
-            answer <- optim(initvalue, myfun, gr = NULL, method = "L-BFGS-B",
-                           lower = extents[1,], upper = extents[2,],
-                           control = list(fnscale = -1),  # maximize!
-                           object = object, sppno = sppno, Rank = Rank,
-                           deriv = 0, MSratio = MSratio)
-            # Check to see if the soln is @ boundary. If not, assign it.
-            for(rindex in 1:Rank)
-              if (abs(answer$par[rindex] - extents[1,rindex]) > smallno &&
-                 abs(answer$par[rindex] - extents[2,rindex]) > smallno) {
-                  optimum[rindex,sppno] <- answer$par[rindex]
-                  maximum[sppno] <- answer$value
-              }
-        } # end of Rank = 2
-    } # end of sppno 
+          initvalue <- rep(xvals[index,], length = Rank)  # for optim()
+          if (abs(initvalue[1] - extents[1, 1]) < smallno)
+            initvalue[1] <- extents[1, 1] + smallno
+          if (abs(initvalue[1] - extents[2, 1]) < smallno)
+            initvalue[1] <- extents[2, 1] - smallno
+          if (abs(initvalue[2] - extents[1, 2]) < smallno)
+            initvalue[2] <- extents[1, 2] + smallno
+          if (abs(initvalue[2] - extents[2, 2]) < smallno)
+            initvalue[2] <- extents[2, 2] - smallno
+          break
+        }
+        if (index == 1 || index == nnn) {
+          maximum[sppno] <- optimum[1,sppno] <- NA
+          gridres1 <- epsOptimum + 1  # equivalent to a break
+          break  # just in case
+        } else {
+          maximum[sppno] <- yvals[index]  # on the eta scale
+          optimum[1,sppno] <- xvals[index,1]
+          gridd[,1] <- seq(
+                  max(extents[1, 1], optimum[1,sppno] - gridres1),
+                  min(extents[2, 1], optimum[1,sppno] + gridres1),
+                  len = gridlen)
+          gridres1 <- gridd[2, 1] - gridd[1, 1]
+          griditer <- griditer + 1
+        }
+      } # of while 
+
+      if (Rank == 2) {
+        myfun <- function(x, object, sppno, Rank = 1,
+                          deriv = 0, MSratio = 1) {
+          x <- matrix(x, 1, length(x))
+          temp <- predictcao(object, grid = x, sppno = sppno,
+                             Rank = Rank, deriv = deriv, MSratio = MSratio)
+          temp$yval
+        }
+        answer <- optim(initvalue, myfun, gr = NULL, method = "L-BFGS-B",
+                        lower = extents[1, ], upper = extents[2, ],
+                        control = list(fnscale = -1),  # maximize!
+                        object = object, sppno = sppno, Rank = Rank,
+                        deriv = 0, MSratio = MSratio)
+        for (rindex in 1:Rank)
+          if (abs(answer$par[rindex] - extents[1, rindex]) > smallno &&
+              abs(answer$par[rindex] - extents[2, rindex]) > smallno) {
+            optimum[rindex,sppno] <- answer$par[rindex]
+             maximum[sppno] <- answer$value
+          }
+        }  # end of Rank = 2
+    }  # end of sppno 
     myetamat <- rbind(maximum)
-    if (MSratio == 2) myetamat <- kronecker(myetamat, matrix(1:0, 1, 2))
+    if (MSratio == 2)
+      myetamat <- kronecker(myetamat, matrix(1:0, 1, 2))
     maximum <- object at family@linkinv(eta = myetamat, extra = object at extra)
     maximum <- c(maximum)  # Convert from matrix to vector 
     names(maximum) <- ynames
 
     ans <- new(Class = "Coef.cao",
-              Bspline = object at Bspline,
-              Constrained=ConstrainedO,
-              df1.nl = object at extra$df1.nl,
-              lv = lv.mat,
-              lvOrder = lv.mat,
-              Maximum = maximum,
-              M = M,
-              NOS = NOS, 
-              Optimum=optimum, 
-              OptimumOrder=optimum, 
-              Rank = Rank,
-              spar1 = object at extra$spar1)
-    if (ConstrainedO) {ans at C = Cmat} else {Cmat = NULL}
+               Bspline = object at Bspline,
+               Constrained = ConstrainedO,
+               df1.nl = object at extra$df1.nl,
+               latvar = latvar.mat,
+               latvar.order = latvar.mat,
+               Maximum = maximum,
+               M = M,
+               NOS = NOS, 
+               Optimum = optimum, 
+               Optimum.order = optimum, 
+               Rank = Rank,
+               spar1 = object at extra$spar1)
+    if (ConstrainedO) {
+      ans at C <- Cmat
+    } else {
+      Cmat <- NULL
+    }
     if (Rank == 2) {
-        dimnames(eta2matrix) <- list(
-            object at misc$predictors.names[c(FALSE,TRUE)], " ")
-        ans at eta2 <- eta2matrix
-        ans at df2.nl <- object at extra$df2.nl 
-        ans at spar2  <- object at extra$spar2
+      dimnames(eta2matrix) <-
+        list(object at misc$predictors.names[c(FALSE, TRUE)], " ")
+      ans at eta2 <- eta2matrix
+      ans at df2.nl <- object at extra$df2.nl 
+      ans at spar2  <- object at extra$spar2
     }
 
-    for(rindex in 1:Rank) {
-        ans at OptimumOrder[rindex,] <- order(ans at Optimum[rindex,])
-        ans at lvOrder[,rindex] <- order(ans at lv[,rindex])
+    for (rindex in 1:Rank) {
+      ans at Optimum.order[rindex, ] <- order(ans at Optimum[rindex, ])
+      ans at latvar.order[, rindex]  <- order(ans at latvar[, rindex])
     }
 
-    if (length(object at misc$estimated.dispersion) &&
-       object at misc$estimated.dispersion) {
-        p <- length(object at coefficients)
-        n <- object at misc$n
-        M <- object at misc$M
-        NOS <- if (length(object at y)) ncol(object at y) else M
-        pstar <- p + length(Cmat) # Adjustment 
-        adjusted.dispersion <- object at misc$dispersion *
-                              (n*M - p) / (n*M - pstar)
-        ans at dispersion <- adjusted.dispersion 
-    }
-    if (MSratio == 2) {
-        lcoef <- object at coefficients
-        temp <- lcoef[((1:NOS)-1)*(2+Rank)+2]
-        names(temp) <- object at misc$predictors.names[2*(1:NOS)]
-        ans at dispersion <- temp
-    }
-    dimnames(ans at Optimum) <- list(lv.names, ynames)
-    ans 
+  if (length(object at misc$estimated.dispersion) &&
+      object at misc$estimated.dispersion) {
+    p <- length(object at coefficients)
+    n <- object at misc$n
+    M <- object at misc$M
+    NOS <- if (length(object at y)) ncol(object at y) else M
+    pstar <- p + length(Cmat)  # Adjustment 
+    adjusted.dispersion <- object at misc$dispersion *
+                           (n * M - p) / (n * M - pstar)
+    ans at dispersion <- adjusted.dispersion 
+  }
+  if (MSratio == 2) {
+    lcoef <- object at coefficients
+    temp <- lcoef[((1:NOS)-1) * (2+Rank)+2]
+    names(temp) <- object at misc$predictors.names[2 * (1:NOS)]
+    ans at dispersion <- temp
+  }
+  dimnames(ans at Optimum) <- list(latvar.names, ynames)
+  ans 
 }
 
 
-show.Coef.cao <-
-  function(object, digits = max(2, options()$digits-2), ...) {
-    Rank <- object at Rank
-    NOS <- object at NOS
-    M <- object at M
 
-    Maximum <- if (length(object at Maximum))
-              cbind(Maximum = object at Maximum) else NULL
-    optmat <- cbind(t(object at Optimum))
-    dimnames(optmat) <- list(dimnames(optmat)[[1]],
-        if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
-        else "Optimum")
+show.Coef.cao <- function(object,
+                          digits = max(2, options()$digits-2), ...) {
+  Rank <- object at Rank
+  NOS <- object at NOS
+  M <- object at M
 
-    if ( object at Constrained ) {
-        cat("\nC matrix (constrained/canonical coefficients)\n")
-        print(object at C, digits = digits, ...)
-    }
-    cat("\nOptima and maxima\n")
-    print(cbind(Optimum = optmat,
-                Maximum), digits = max(1, digits-1))
-    cat("\nNonlinear degrees of freedom\n")
-    if (Rank == 1) {
-        print(cbind(df1.nl = object at df1.nl), digits = max(2, digits-1), ...)
-    } else {
-        print(cbind(df1.nl = object at df1.nl,
-                    df2.nl = object at df2.nl), digits = max(2, digits-1), ...)
-    }
-    invisible(object)
+  Maximum <- if (length(object at Maximum))
+             cbind(Maximum = object at Maximum) else NULL
+  optmat <- cbind(t(object at Optimum))
+  dimnames(optmat) <- list(dimnames(optmat)[[1]],
+                           if (Rank > 1)
+                           paste("Optimum",
+                                 dimnames(optmat)[[2]], sep = ".") else
+                                 "Optimum")
+
+  if ( object at Constrained ) {
+    cat("\nC matrix (constrained/canonical coefficients)\n")
+    print(object at C, digits = digits, ...)
+  }
+  cat("\nOptima and maxima\n")
+  print(cbind(Optimum = optmat,
+              Maximum), digits = max(1, digits-1))
+  cat("\nNonlinear degrees of freedom\n")
+  if (Rank == 1) {
+    print(cbind(df1.nl = object at df1.nl), digits = max(2, digits-1), ...)
+  } else {
+    print(cbind(df1.nl = object at df1.nl,
+                df2.nl = object at df2.nl), digits = max(2, digits-1), ...)
+  }
+  invisible(object)
 }
 
 
@@ -1277,7 +1341,7 @@ show.Coef.cao <-
 
 
 setMethod("show", "Coef.cao", function(object)
-    show.Coef.cao(object))
+  show.Coef.cao(object))
 
 
 
@@ -1292,122 +1356,131 @@ setMethod("Coef", "cao", function(object, ...) Coef.cao(object, ...))
 
 
 lvplot.cao <- function(object,
-          add = FALSE, plot.it = TRUE, rugplot = TRUE, y = FALSE, 
+          add = FALSE, show.plot = TRUE, rugplot = TRUE, y = FALSE, 
           type = c("fitted.values", "predictors"),
-          xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
-          ylab = if (Rank == 1) switch(type, predictors = "Predictors", 
+          xlab = paste("Latent Variable",
+                       if (Rank == 1) "" else " 1", sep = ""),
+          ylab = if (Rank == 1) switch(type, predictors = "Predictors",
               fitted.values = "Fitted values") else "Latent Variable 2",
-          pcex=par()$cex, pcol=par()$col, pch=par()$pch, 
-          llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
+          pcex = par()$cex, pcol = par()$col, pch = par()$pch, 
+          llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
           label.arg= FALSE, adj.arg=-0.5, 
-          sites= FALSE, spch = NULL, scol=par()$col, scex=par()$cex,
-          sfont=par()$font,
-          whichSpecies = NULL,
+          sites= FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
+          sfont = par()$font,
+          which.species = NULL,
           check.ok = TRUE, ...) {
     type <- match.arg(type, c("fitted.values", "predictors"))[1]
 
     if ((Rank <- object at control$Rank) > 2)
-        stop("can only handle 'Rank' = 1 or 2 models")
+      stop("can only handle 'Rank' = 1 or 2 models")
     M <- if (any(slotNames(object) == "predictors") &&
-           is.matrix(object at predictors)) ncol(object at predictors) else
-           object at misc$M
+             is.matrix(object at predictors))
+         ncol(object at predictors) else
+         object at misc$M
     NOS <- ncol(object at y)
-    MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
+    MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
     n <- object at misc$n
     colx2.index <- object at control$colx2.index
     cx1i <- object at control$colx1.index
-    if (!length(whichSpecies)) whichSpecies <- 1:NOS
+    if (!length(which.species))
+      which.species <- 1:NOS
     if (check.ok)
-    if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
-        stop("latent variable plots allowable only for 'noRRR = ~ 1' models")
+      if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
+          stop("latent variable plots allowable only ",
+               "for 'noRRR = ~ 1' models")
 
     Coeflist <- Coef(object)
     Cmat <- Coeflist at C
-    lvmat <- Coeflist at lv # n x Rank 
+    latvarmat <- Coeflist at latvar  # n x Rank 
+
+    if (!show.plot)
+      return(latvarmat)
 
-    if (!plot.it) return(lvmat)
+    r.curves <- slot(object, type)
 
-    r.curves <- slot(object, type) # n times (M or S) (\boldeta or \boldmu) 
     if (MSratio != 1 && type == "predictors")
-        stop("can only plot the predictors if M == S")
-    MorS <- ncol(r.curves) # Actually, here, the value is S always.
+      stop("can only plot the predictors if M == S")
+    MorS <- ncol(r.curves)  # Actually, here, the value is S always.
     if (!add) {
-        if (Rank == 1) {
-            matplot(lvmat,
-                    if ( y && type == "fitted.values")
-                        object at y[,whichSpecies, drop = FALSE] else
-                        r.curves[,whichSpecies, drop = FALSE],
-                    type = "n", xlab = xlab, ylab=ylab, ...)
-        } else { # Rank == 2
-            matplot(c(Coeflist at Optimum[1,whichSpecies], lvmat[,1]),
-                    c(Coeflist at Optimum[2,whichSpecies], lvmat[,2]),
-                    type = "n", xlab = xlab, ylab=ylab, ...)
-        }
+      if (Rank == 1) {
+        matplot(latvarmat,
+                if ( y && type == "fitted.values")
+                    object at y[, which.species, drop = FALSE] else
+                    r.curves[, which.species, drop = FALSE],
+                type = "n", xlab = xlab, ylab = ylab, ...)
+      } else { # Rank == 2
+        matplot(c(Coeflist at Optimum[1, which.species], latvarmat[, 1]),
+                c(Coeflist at Optimum[2, which.species], latvarmat[, 2]),
+                type = "n", xlab = xlab, ylab = ylab, ...)
+      }
     }
 
 
-    pch  <- rep(pch,  leng = length(whichSpecies))
-    pcol <- rep(pcol, leng = length(whichSpecies))
-    pcex <- rep(pcex, leng = length(whichSpecies))
-    llty <- rep(llty, leng = length(whichSpecies))
-    lcol <- rep(lcol, leng = length(whichSpecies))
-    llwd <- rep(llwd, leng = length(whichSpecies))
-    adj.arg <- rep(adj.arg, leng = length(whichSpecies))
+    pch  <- rep(pch,  length = length(which.species))
+    pcol <- rep(pcol, length = length(which.species))
+    pcex <- rep(pcex, length = length(which.species))
+    llty <- rep(llty, length = length(which.species))
+    lcol <- rep(lcol, length = length(which.species))
+    llwd <- rep(llwd, length = length(which.species))
+    adj.arg <- rep(adj.arg, length = length(which.species))
 
     sppnames <- if (type == "predictors") dimnames(r.curves)[[2]] else
-        dimnames(object at y)[[2]]
+                                          dimnames(object at y)[[2]]
     if (Rank == 1) {
-        for(sppno in 1:length(whichSpecies)) {
-            thisSpecies <- whichSpecies[sppno]
-            indexSpecies <- if (is.character(whichSpecies))
-               match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
-            if (is.na(indexSpecies))
-                stop("mismatch found in 'whichSpecies'")
-            xx <- lvmat 
-            yy <- r.curves[,indexSpecies]
-            o <- sort.list(xx)
-            xx <- xx[ o ]
-            yy <- yy[ o ]
-            lines(xx, yy, col=lcol[sppno], lwd=llwd[sppno], lty=llty[sppno])
-            if ( y && type == "fitted.values") {
-                ypts <- object at y
-                if (ncol(as.matrix(ypts)) == ncol(r.curves))
-                    points(xx, ypts[o,sppno], col=pcol[sppno],
-                           cex=pcex[sppno], pch=pch[sppno])
-            } 
-        } 
-        if (rugplot) rug(xx) 
-    } else {
-        if (sites) {
-            text(lvmat[,1], lvmat[,2], adj = 0.5,
-              labels = if (is.null(spch)) dimnames(lvmat)[[1]] else 
-              rep(spch, length=nrow(lvmat)), col=scol, cex=scex, font=sfont)
-        }
-        for(sppno in 1:length(whichSpecies)) {
-            thisSpecies <- whichSpecies[sppno]
-            indexSpecies <- if (is.character(whichSpecies))
-                 match(whichSpecies[sppno], sppnames) else
-                 whichSpecies[sppno]
-            if (is.na(indexSpecies))
-                stop("mismatch found in 'whichSpecies'")
-            points(Coeflist at Optimum[1,indexSpecies],
-                   Coeflist at Optimum[2,indexSpecies],
-                   col=pcol[sppno], cex=pcex[sppno], pch=pch[sppno])
+      for (sppno in 1:length(which.species)) {
+        thisSpecies <- which.species[sppno]
+        indexSpecies <- if (is.character(which.species))
+           match(which.species[sppno], sppnames) else which.species[sppno]
+        if (is.na(indexSpecies))
+          stop("mismatch found in 'which.species'")
+        xx <- latvarmat 
+        yy <- r.curves[, indexSpecies]
+        ooo <- sort.list(xx)
+        xx <- xx[ooo]
+        yy <- yy[ooo]
+        lines(xx, yy, col = lcol[sppno],
+              lwd = llwd[sppno], lty = llty[sppno])
+        if (y && type == "fitted.values") {
+          ypts <- object at y
+          if (ncol(as.matrix(ypts)) == ncol(r.curves))
+            points(xx, ypts[ooo, sppno], col = pcol[sppno],
+                   cex = pcex[sppno], pch = pch[sppno])
         }
-        if (label.arg) {
-            for(sppno in 1:length(whichSpecies)) {
-                thisSpecies <- whichSpecies[sppno]
-                indexSpecies <- if (is.character(whichSpecies))
-                   match(whichSpecies[sppno], sppnames) else
-                         whichSpecies[sppno]
-                text(Coeflist at Optimum[1,indexSpecies],
-                     Coeflist at Optimum[2,indexSpecies],
-                     labels=(dimnames(Coeflist at Optimum)[[2]])[indexSpecies],
-                     adj=adj.arg[sppno], col=pcol[sppno], cex=pcex[sppno])
-            }
+      }
+      if (rugplot) rug(xx)
+    } else {
+      if (sites) {
+        text(latvarmat[,1], latvarmat[,2], adj = 0.5,
+             labels = if (is.null(spch)) dimnames(latvarmat)[[1]] else
+             rep(spch, length = nrow(latvarmat)),
+             col = scol, cex = scex, font=sfont)
+      }
+      for (sppno in 1:length(which.species)) {
+          thisSpecies <- which.species[sppno]
+          indexSpecies <- if (is.character(which.species))
+               match(which.species[sppno], sppnames) else
+               which.species[sppno]
+          if (is.na(indexSpecies))
+            stop("mismatch found in 'which.species'")
+          points(Coeflist at Optimum[1, indexSpecies],
+                 Coeflist at Optimum[2, indexSpecies],
+                 col = pcol[sppno], cex = pcex[sppno], pch = pch[sppno])
+      }
+      if (label.arg) {
+        for (sppno in 1:length(which.species)) {
+          thisSpecies <- which.species[sppno]
+          indexSpecies <- if (is.character(which.species))
+             match(which.species[sppno], sppnames) else
+                   which.species[sppno]
+          text(Coeflist at Optimum[1, indexSpecies],
+               Coeflist at Optimum[2, indexSpecies],
+               labels = (dimnames(Coeflist at Optimum)[[2]])[indexSpecies],
+               adj = adj.arg[sppno], col = pcol[sppno],
+               cex = pcex[sppno])
         }
+      }
     }
-    invisible(lvmat)
+    invisible(latvarmat)
 }
 
 
@@ -1420,125 +1493,131 @@ setMethod("lvplot", "cao",
 predict.cao <- function (object, newdata = NULL,
                          type = c("link", "response", "terms"), 
                          deriv = 0, ...) {
-    type <- match.arg(type, c("link", "response", "terms"))[1]
-    if (type != "link" && deriv != 0)
-        stop("Setting deriv = <positive integer> requires type='link'")
-    na.act <- object at na.action
-    object at na.action <- list()
-    ocontrol <- object at control
-    nice21 <- (length(ocontrol$colx1.index) == 1) &&
-             (names(ocontrol$colx1.index) == "(Intercept)")
-    if (!nice21) stop("Can only handle 'noRRR = ~ 1'")
-
-    if (!length(newdata) && type == "response" &&
-         length(object at fitted.values)) {
-        if (length(na.act)) {
-            return(napredict(na.act[[1]], object at fitted.values))
-        } else {
-            return(object at fitted.values)
-        }
-    }
-
-    if (!length(newdata)) {
-        X <- model.matrixvlm(object, type = "lm", ...)
-        offset <- object at offset
-        tt <- terms(object)
-        if (!length(object at x))
-            attr(X, "assign") <- attrassignlm(X, tt)
+  type <- match.arg(type, c("link", "response", "terms"))[1]
+  if (type != "link" && deriv != 0)
+    stop("Setting deriv = <positive integer> requires type='link'")
+  na.act <- object at na.action
+  object at na.action <- list()
+  ocontrol <- object at control
+  nice21 <- (length(ocontrol$colx1.index) == 1) &&
+            (names(ocontrol$colx1.index) == "(Intercept)")
+  if (!nice21)
+    stop("Can only handle 'noRRR = ~ 1'")
+
+  if (!length(newdata) && type == "response" &&
+       length(object at fitted.values)) {
+    if (length(na.act)) {
+      return(napredict(na.act[[1]], object at fitted.values))
     } else {
-        if (is.smart(object) && length(object at smart.prediction)) {
-            setup.smart("read", smart.prediction=object at smart.prediction)
-        }
+      return(object at fitted.values)
+    }
+  }
 
-        tt <- terms(object)  # 11/8/03; object at terms$terms 
-        X <- model.matrix(delete.response(tt), newdata, contrasts = 
-                  if (length(object at contrasts)) object at contrasts else NULL,
-                  xlev = object at xlevels)
+  if (!length(newdata)) {
+    X <- model.matrixvlm(object, type = "lm", ...)
+    offset <- object at offset
+    tt <- terms(object)
+    if (!length(object at x))
+      attr(X, "assign") <- attrassignlm(X, tt)
+  } else {
+    if (is.smart(object) && length(object at smart.prediction)) {
+      setup.smart("read", smart.prediction = object at smart.prediction)
+    }
 
-        if (nice21 && nrow(X)!=nrow(newdata)) {
-            as.save <- attr(X, "assign")
-            X <- X[rep(1, nrow(newdata)),, drop = FALSE]
-            dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
-            attr(X, "assign") <- as.save  # Restored 
-        }
+    tt <- terms(object)  # 11/8/03; object at terms$terms 
+    X <- model.matrix(delete.response(tt), newdata,
+                      contrasts = if (length(object at contrasts))
+                                  object at contrasts else NULL,
+                      xlev = object at xlevels)
+
+    if (nice21 && nrow(X) != nrow(newdata)) {
+      as.save <- attr(X, "assign")
+      X <- X[rep(1, nrow(newdata)),, drop = FALSE]
+      dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
+      attr(X, "assign") <- as.save  # Restored 
+    }
 
-        offset <- if (!is.null(off.num <- attr(tt, "offset"))) {
-            eval(attr(tt, "variables")[[off.num+1]], newdata)
-        } else if (!is.null(object at offset))
-            eval(object at call$offset, newdata)
+    offset <- if (!is.null(off.num <- attr(tt, "offset"))) {
+                eval(attr(tt, "variables")[[off.num+1]], newdata)
+              } else if (!is.null(object at offset))
+                eval(object at call$offset, newdata)
 
-        if (is.smart(object) && length(object at smart.prediction)) {
-            wrapup.smart() 
-        }
-
-        attr(X, "assign") <- attrassigndefault(X, tt)
+    if (is.smart(object) && length(object at smart.prediction)) {
+      wrapup.smart() 
     }
 
-    cancoefs <- ccoef(object)
+    attr(X, "assign") <- attrassigndefault(X, tt)
+  }
+
+    cancoefs <- concoef(object)
 
-    lvmat <- X[,ocontrol$colx2.index, drop = FALSE] %*% cancoefs   # n x Rank
+    latvarmat <- X[, ocontrol$colx2.index, drop = FALSE] %*% cancoefs
 
     Rank <- ocontrol$Rank
     NOS <- ncol(object at y)
     sppnames <- dimnames(object at y)[[2]]
     modelno <- ocontrol$modelno  # 1,2,3,5 or 0
     M <- if (any(slotNames(object) == "predictors") &&
-           is.matrix(object at predictors)) ncol(object at predictors) else
-           object at misc$M
-    MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
+             is.matrix(object at predictors))
+         ncol(object at predictors) else
+         object at misc$M
+    MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
     if (type == "terms") {
-        terms.mat = matrix(0,nrow(X),Rank*NOS) # 1st R cols for spp.1, etc.
-        interceptvector <- rep(0, len=NOS)
+      terms.mat <- matrix(0, nrow(X), Rank*NOS)  # 1st R cols for spp.1, etc.
+      interceptvector <- rep(0, len = NOS)
     } else {
-        etamat <- matrix(0, nrow(X), M)  # Could contain derivatives
+      etamat <- matrix(0, nrow(X), M)  # Could contain derivatives
     }
     ind8 <- 1:Rank
-    whichSpecies <- 1:NOS  # Do it all for all species
-    for(sppno in 1:length(whichSpecies)) {
-        thisSpecies <- whichSpecies[sppno]
-        indexSpecies <- if (is.character(whichSpecies))
-            match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
-        if (is.na(indexSpecies))
-            stop("mismatch found in 'whichSpecies'")
-
-        temp345 <- predictcao(object, grid=lvmat, sppno=thisSpecies,
-                             Rank=Rank, deriv=deriv, MSratio=MSratio,
-                             type=ifelse(type == "response", "link", type))
-        if (MSratio == 2) {
-            if (any(type == c("link", "response"))) {
-                etamat[,2*sppno-1] <- temp345$yvals 
-                etamat[,2*sppno  ] <- temp345$eta2 
-            } else {
-                terms.mat[,ind8] <- temp345
-                interceptvector[sppno] <- attr(temp345, "constant")
-            }
-        } else {
-            if (any(type == c("link", "response"))) {
-                etamat[,sppno] <- temp345$yvals 
-            } else {
-                terms.mat[,ind8] <- temp345
-                interceptvector[sppno] <- attr(temp345, "constant")
-            }
-        }
-        ind8 <- ind8 + Rank
+    which.species <- 1:NOS  # Do it all for all species
+    for (sppno in 1:length(which.species)) {
+      thisSpecies <- which.species[sppno]
+      indexSpecies <- if (is.character(which.species))
+        match(which.species[sppno], sppnames) else which.species[sppno]
+      if (is.na(indexSpecies))
+        stop("mismatch found in 'which.species'")
+
+     temp345 <-
+       predictcao(object, grid = latvarmat, sppno = thisSpecies,
+                  Rank = Rank, deriv = deriv, MSratio = MSratio,
+                  type = ifelse(type == "response", "link", type))
+     if (MSratio == 2) {
+       if (any(type == c("link", "response"))) {
+         etamat[, 2*sppno-1] <- temp345$yvals 
+         etamat[, 2*sppno  ] <- temp345$eta2 
+       } else {
+         terms.mat[, ind8] <- temp345
+         interceptvector[sppno] <- attr(temp345, "constant")
+       }
+     } else {
+       if (any(type == c("link", "response"))) {
+         etamat[, sppno] <- temp345$yvals 
+       } else {
+         terms.mat[, ind8] <- temp345
+         interceptvector[sppno] <- attr(temp345, "constant")
+       }
+     }
+     ind8 <- ind8 + Rank
     }
 
-    if (length(offset) && any(offset != 0))
-        etamat <- etamat + offset
-
-    if (type == "link") {
-        dimnames(etamat) <- list(dimnames(X)[[1]], if (deriv == 0) 
-                                object at misc$predictors.names else NULL)
-        return(etamat)
-    } else if (type == "response") {
-        fv <- object at family@linkinv(etamat, extra=object at extra)
-        dimnames(fv) <- list(dimnames(fv)[[1]],
-                            dimnames(object at fitted.values)[[2]])
-        return(fv)
-    } else {
-        attr(terms.mat, "constant") = interceptvector
-        terms.mat
-    }
+  if (length(offset) && any(offset != 0))
+    etamat <- etamat + offset
+
+  if (type == "link") {
+    dimnames(etamat) <-
+        list(dimnames(X)[[1]],
+             if (deriv == 0) 
+               object at misc$predictors.names else NULL)
+    return(etamat)
+  } else if (type == "response") {
+    fv <- object at family@linkinv(etamat, extra = object at extra)
+    dimnames(fv) <- list(dimnames(fv)[[1]],
+                         dimnames(object at fitted.values)[[2]])
+    return(fv)
+  } else {
+    attr(terms.mat, "constant") <- interceptvector
+    terms.mat
+  }
 }
 
 
@@ -1549,149 +1628,155 @@ setMethod("predict", "cao", function(object, ...)
 
 predictcao <- function(object, grid, sppno, Rank = 1,
                        deriv = 0, MSratio = 1, type = "link") {
-    if (type != "link" && type != "terms")
-        stop("'link' must be \"link\" or \"terms\"")
-    if (ncol(grid <- as.matrix(grid)) != Rank)
-        stop("'grid' must have ", Rank, " columns")
-    if (!is.Numeric(1 + deriv, allowable.length = 1,
-                    positive = TRUE, integer.valued = TRUE))
-        stop("'deriv' must be a non-negative integer")
-    if (type == "terms" && deriv != 0)
-        stop("'deriv' must be 0 when type=\"terms\"")
-
-    temp.b <- object at Bspline[[sppno]]
+  if (type != "link" && type != "terms")
+    stop("'link' must be \"link\" or \"terms\"")
+  if (ncol(grid <- as.matrix(grid)) != Rank)
+    stop("'grid' must have ", Rank, " columns")
+  if (!is.Numeric(1 + deriv, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE))
+    stop("'deriv' must be a non-negative integer")
+  if (type == "terms" && deriv != 0)
+    stop("'deriv' must be 0 when type=\"terms\"")
+
+  temp.b <- object at Bspline[[sppno]]
+  if (type == "terms") {
+    meanlatvar <- colMeans(grid)
+    answer <- matrix(0, nrow(grid), Rank)
+  } else {
+    nlfunvalues <- 0
+  }
+
+  for (rindex in 1:Rank) {
+    temp <- temp.b[[rindex]]  # temp is of class "vsmooth.spline.fit"
+    nlpart <- predict(temp, grid[, rindex], deriv = deriv)
+    yvals <- nlpart$y
     if (type == "terms") {
-        meanlv <- colMeans(grid)
-        answer <- matrix(0, nrow(grid), Rank)
+      answer[, rindex] <- yvals
     } else {
-        nlfunvalues <- 0
-    }
-    for(rindex in 1:Rank) {
-        temp <- temp.b[[rindex]]  # temp is of class "vsmooth.spline.fit"
-        nlpart <- predict(temp, grid[,rindex], deriv = deriv)
-        yvals <- nlpart$y
-        if (type == "terms") {
-            answer[,rindex] = yvals
-        } else {
-            nlfunvalues <- nlfunvalues + yvals
-        }
+      nlfunvalues <- nlfunvalues + yvals
     }
+  }
 
-    # Get the linear part of the additive predictor (intercept and slopes)
-        lcoef <- object at coefficients # linear coefs; dont use coef() (== Coef)
-        llcoef <- lcoef[(1+(sppno-1)*(MSratio+Rank)):(sppno*(MSratio+Rank))]
-        if (type == "terms") {
-            interceptvector <- llcoef[1]
-            for(rindex in 1:Rank) {
-                answer[,rindex] <- answer[,rindex] + (grid[,rindex] -
-                                  meanlv[rindex]) * llcoef[MSratio+rindex]
-                interceptvector <- interceptvector +
-                    meanlv[rindex] * llcoef[MSratio+rindex]
-            }
-        } else {
-            linpar <- if (deriv == 0) {
-                         llcoef[1]+grid %*% llcoef[-(1:MSratio)]
-                     } else {
-                         if(deriv == 1) llcoef[MSratio+rindex] else 0
-                     }
-            nlfunvalues <- nlfunvalues + linpar # Now complete
-        }
-    if (type == "terms") {
-        attr(answer, "constant") <- interceptvector
-        answer
-    } else {
-        list(xvals = grid,
-             yvals = c(nlfunvalues),
-             eta2 = if (MSratio == 2) llcoef[MSratio] else NULL)
+  lcoef <- object at coefficients  # linear coefs; dont use coef() (== Coef)
+  llcoef <- lcoef[(1+(sppno-1)*(MSratio+Rank)):(sppno*(MSratio+Rank))]
+  if (type == "terms") {
+    interceptvector <- llcoef[1]
+    for (rindex in 1:Rank) {
+      answer[, rindex] <- answer[, rindex] + (grid[, rindex] -
+                          meanlatvar[rindex]) * llcoef[MSratio+rindex]
+      interceptvector <- interceptvector +
+          meanlatvar[rindex] * llcoef[MSratio+rindex]
+    }
+  } else {
+    linpar <- if (deriv == 0) {
+                llcoef[1] + grid %*% llcoef[-(1:MSratio)]
+              } else {
+                if (deriv == 1) llcoef[MSratio + rindex] else 0
+              }
+            nlfunvalues <- nlfunvalues + linpar  # Now complete
+  }
+  if (type == "terms") {
+    attr(answer, "constant") <- interceptvector
+    answer
+  } else {
+    list(xvals = grid,
+         yvals = c(nlfunvalues),
+         eta2 = if (MSratio == 2) llcoef[MSratio] else NULL)
     }
 }
 
 
 
 
+
 plot.cao <- function(x,
-                    xlab = if (Rank == 1) "Latent Variable" else 
-                         paste("Latent Variable", 1:Rank),
-                    ylab = NULL, residuals.arg = FALSE,
-                    pcol=par()$col, pcex=par()$cex, pch=par()$pch,
-                    lcol=par()$col, lwd=par()$lwd, lty=par()$lty, 
-                    add = FALSE, 
-                    main = NULL,
-                    center.cf = Rank > 1,
-                    WhichRank = 1:Rank, 
-                    whichSpecies = NULL, # a numeric or character vector
-                    rugplot = TRUE, se.arg = FALSE, deriv = 0,
-                    scale = 0, ylim = NULL,
-                    overlay = FALSE, ...)
-{
-    Rank <- x at control$Rank
-    if (!is.logical(center.cf) || length(center.cf) != 1)
-        stop("bad input for argument 'center.cf'")
-    if (Rank > 1 &&  !center.cf)
-        stop("center.cf = TRUE is needed for models with Rank > 1")
-    NOS <- ncol(x at y)
-    sppnames <- dimnames(x at y)[[2]]
-    modelno <- x at control$modelno  # 1,2,3, or 0
-    M <- if (any(slotNames(x) == "predictors") &&
-           is.matrix(x at predictors)) ncol(x at predictors) else x at misc$M
-    if (all((MSratio <- M / NOS) != c(1,2))) stop("bad value for 'MSratio'")
-    pcol <- rep(pcol, length = Rank*NOS)
-    pcex <- rep(pcex, length = Rank*NOS)
-    pch  <- rep(pch,  length = Rank*NOS)
-    lcol <- rep(lcol, length = Rank*NOS)
-    lwd  <- rep(lwd,  length = Rank*NOS)
-    lty  <- rep(lty,  length = Rank*NOS)
-    xlab <- rep(xlab, length = Rank)
-    if (!length(whichSpecies)) whichSpecies <- 1:NOS
-    if (length(ylab)) 
-        ylab <- rep(ylab, len=length(whichSpecies)) # Too long if overlay
-    if (length(main))
-         main <- rep(main, len=length(whichSpecies)) # Too long if overlay
-    lvmat <- lv(x)
-    nice21 <- length(x at control$colx1.index) == 1 &&
-                    names(x at control$colx1.index) == "(Intercept)"
-    if (!nice21)
-        stop("can only handle intercept-only models")
-    counter <- 0
-    for(sppno in 1:length(whichSpecies)) {
-        thisSpecies <- whichSpecies[sppno]
-        indexSpecies <- if (is.character(whichSpecies))
-            match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
-        if (is.na(indexSpecies))
-            stop("mismatch found in 'whichSpecies'")
-        terms.mat <- predictcao(object = x, grid=lvmat, type = "terms",
-                               sppno=indexSpecies, Rank=Rank,
-                               deriv=deriv, MSratio=MSratio)
-        for(rindex in WhichRank) {
-            xvals <- lvmat[,rindex]
-            yvals <- terms.mat[,rindex]
-            o <- sort.list(xvals)
-            xvals <- xvals[ o ]
-            yvals <- yvals[ o ]
-            if (!center.cf) yvals <- yvals + attr(terms.mat, "constant")
-            if (!add)
-            if (sppno == 1 || !overlay) {
-                ylim.use <- if (length(ylim)) ylim else
+                     xlab = if (Rank == 1) "Latent Variable" else 
+                            paste("Latent Variable", 1:Rank),
+                     ylab = NULL, residuals.arg = FALSE,
+                     pcol = par()$col, pcex = par()$cex, pch = par()$pch,
+                     lcol = par()$col, lwd = par()$lwd, lty = par()$lty, 
+                     add = FALSE, 
+                     main = NULL,
+                     center.cf = Rank > 1,
+                     WhichRank = 1:Rank, 
+                     which.species = NULL,  # a numeric or character vector
+                     rugplot = TRUE, se.arg = FALSE, deriv = 0,
+                     scale = 0, ylim = NULL,
+                     overlay = FALSE, ...) {
+  Rank <- x at control$Rank
+  if (!is.logical(center.cf) || length(center.cf) != 1)
+    stop("bad input for argument 'center.cf'")
+  if (Rank > 1 &&  !center.cf)
+    stop("center.cf = TRUE is needed for models with Rank > 1")
+  NOS <- ncol(x at y)
+  sppnames <- dimnames(x at y)[[2]]
+  modelno <- x at control$modelno  # 1,2,3, or 0
+  M <- if (any(slotNames(x) == "predictors") &&
+         is.matrix(x at predictors)) ncol(x at predictors) else x at misc$M
+  if (all((MSratio <- M / NOS) != c(1,2)))
+    stop("bad value for 'MSratio'")
+
+  pcol <- rep(pcol, length = Rank*NOS)
+  pcex <- rep(pcex, length = Rank*NOS)
+  pch  <- rep(pch,  length = Rank*NOS)
+  lcol <- rep(lcol, length = Rank*NOS)
+  lwd  <- rep(lwd,  length = Rank*NOS)
+  lty  <- rep(lty,  length = Rank*NOS)
+  xlab <- rep(xlab, length = Rank)
+
+  if (!length(which.species)) which.species <- 1:NOS
+  if (length(ylab)) 
+    ylab <- rep(ylab, len = length(which.species))  # Too long if overlay
+  if (length(main))
+    main <- rep(main, len = length(which.species))  # Too long if overlay
+  latvarmat <- latvar(x)
+  nice21 <- length(x at control$colx1.index) == 1 &&
+                   names(x at control$colx1.index) == "(Intercept)"
+  if (!nice21)
+    stop("can only handle intercept-only models")
+
+  counter <- 0
+  for (sppno in 1:length(which.species)) {
+    thisSpecies <- which.species[sppno]
+    indexSpecies <- if (is.character(which.species))
+        match(which.species[sppno], sppnames) else which.species[sppno]
+    if (is.na(indexSpecies))
+        stop("mismatch found in 'which.species'")
+    terms.mat <- predictcao(object = x, grid = latvarmat, type = "terms",
+                            sppno = indexSpecies, Rank = Rank,
+                            deriv = deriv, MSratio = MSratio)
+    for (rindex in WhichRank) {
+      xvals <- latvarmat[, rindex]
+      yvals <- terms.mat[, rindex]
+      ooo <- sort.list(xvals)
+      xvals <- xvals[ooo]
+      yvals <- yvals[ooo]
+      if (!center.cf)
+        yvals <- yvals + attr(terms.mat, "constant")
+      if (!add)
+      if (sppno == 1 || !overlay) {
+        ylim.use <- if (length(ylim)) ylim else
                     ylim.scale(range(yvals), scale)
-                matplot(xvals, yvals, type = "n",
-                        xlab = xlab[rindex], 
-                        ylab = if (length(ylab)) ylab[sppno] else 
-                   ifelse(overlay, "Fitted functions", "Fitted function"),
-                        main = if (length(main)) main[sppno] else 
-                             ifelse(overlay, "", sppnames[thisSpecies]),
-                        ylim=ylim.use,
-                        ...)
-            }
-            if (residuals.arg) {
-                stop("cannot handle residuals = TRUE yet")
-            } 
-            counter <- counter + 1
-            lines(xvals, yvals,
-                  col=lcol[counter], lwd=lwd[counter], lty=lty[counter])
-            if (rugplot) rug(xvals)
-        }
+        matplot(xvals, yvals, type = "n",
+                xlab = xlab[rindex], 
+                ylab = if (length(ylab)) ylab[sppno] else 
+                       ifelse(overlay, "Fitted functions",
+                                       "Fitted function"),
+                main = if (length(main)) main[sppno] else 
+                       ifelse(overlay, "", sppnames[thisSpecies]),
+                ylim = ylim.use,
+                ...)
+      }
+      if (residuals.arg) {
+        stop("cannot handle residuals = TRUE yet")
+      } 
+      counter <- counter + 1
+      lines(xvals, yvals,
+            col = lcol[counter], lwd = lwd[counter], lty = lty[counter])
+      if (rugplot) rug(xvals)
     }
-    invisible(x)
+  }
+  invisible(x)
 }
 
 
@@ -1704,134 +1789,172 @@ setMethod("plot", "cao",
 
 
 
-persp.cao <- function(x,
-              plot.it = TRUE,
-              xlim = NULL, ylim = NULL, zlim = NULL, # zlim ignored if Rank == 1
-              gridlength = if (Rank == 1) 301 else c(51,51),
-              whichSpecies = NULL,
-              xlab= if (Rank == 1) "Latent Variable" else "Latent Variable 1",
-              ylab= if (Rank == 1) "Expected Value" else "Latent Variable 2",
-              zlab = "Expected value",
-              labelSpecies = FALSE,   # For Rank == 1 only
-              stretch = 1.05,  # quick and dirty, Rank == 1 only
-              main = "",
-              ticktype = "detailed",
-              col = if (Rank == 1) par()$col else "white",
-              lty=par()$lty,
-              lwd=par()$lwd,
-              rugplot = FALSE,
-              ...) {
-    object <- x  # don't like x as the primary argument 
-    coefobj <- Coef(object) 
-    if ((Rank <- coefobj at Rank) > 2)
-        stop("object must be a rank-1 or rank-2 model")
-    fvmat <- fitted(object)
-    NOS <- ncol(fvmat)    # Number of species
-    M <- if (any(slotNames(object) == "predictors") &&
-           is.matrix(object at predictors)) ncol(object at predictors) else
-           object at misc$M
-    MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
-
-    xlim <- if (length(xlim)) xlim else range(coefobj at lv[,1])
-    if (!length(ylim.orig <- ylim)) {
-        ylim <- if (Rank == 1) c(0, max(fvmat)*stretch) else
-               range(coefobj at lv[,2])
-    }
-    xlim <- rep(xlim, length = 2)
-    ylim <- rep(ylim, length = 2)
-    gridlength <- rep(gridlength, length=Rank)
-    lv1 <- seq(xlim[1], xlim[2], length=gridlength[1])
-    lv2 <- if (Rank == 2) seq(ylim[1], ylim[2], len=gridlength[2]) else NULL
-    lvmat <- if (Rank == 2) expand.grid(lv1, lv2) else cbind(lv1)
-
-    sppNames <- dimnames(object at y)[[2]]
-    if (!length(whichSpecies)) {
-        whichSpecies <- sppNames[1:NOS]
-        whichSpecies.numer <- 1:NOS
-    } else
-    if (is.numeric(whichSpecies)) {
-        whichSpecies.numer <- whichSpecies
-        whichSpecies <- sppNames[whichSpecies.numer]  # Convert to character
-    } else
-        whichSpecies.numer <- match(whichSpecies, sppNames)
-
-    LP <- matrix(as.numeric(NA),nrow(lvmat),NOS) # For 1st eta for each spp.
-    for(sppno in 1:NOS) {
-        temp <- predictcao(object=object, grid=lvmat, sppno=sppno, 
-                          Rank=Rank, deriv = 0, MSratio=MSratio)
-        LP[,sppno] <- temp$yval
-    }
-    if (MSratio == 2) {
-        LP <- kronecker(LP, matrix(1:0, 1, 2))  # n x M
-    }
-    fitvals <- object at family@linkinv(LP, extra=object at extra)   # n by NOS
-    dimnames(fitvals) <- list(NULL, dimnames(fvmat)[[2]])
+persp.cao <-
+  function(x,
+           show.plot = TRUE,
+           xlim = NULL, ylim = NULL, zlim = NULL,  # zlim ignored if Rank == 1
+           gridlength = if (Rank == 1) 301 else c(51, 51),
+           which.species = NULL,
+           xlab = if (Rank == 1) "Latent Variable" else "Latent Variable 1",
+           ylab = if (Rank == 1) "Expected Value"  else "Latent Variable 2",
+           zlab = "Expected value",
+           labelSpecies = FALSE,   # For Rank == 1 only
+           stretch = 1.05,  # quick and dirty, Rank == 1 only
+           main = "",
+           ticktype = "detailed",
+           col = if (Rank == 1) par()$col else "white",
+           lty = par()$lty,
+           lwd = par()$lwd,
+           rugplot = FALSE,
+           ...) {
+  object <- x  # don't like x as the primary argument 
+  coefobj <- Coef(object) 
+  if ((Rank <- coefobj at Rank) > 2)
+    stop("object must be a rank-1 or rank-2 model")
+  fvmat <- fitted(object)
+  NOS <- ncol(fvmat)    # Number of species
+  M <- if (any(slotNames(object) == "predictors") &&
+         is.matrix(object at predictors)) ncol(object at predictors) else
+         object at misc$M
+  MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
+
+  xlim <- if (length(xlim))
+            xlim else
+            range(coefobj at latvar[, 1])
+  if (!length(ylim.orig <- ylim)) {
+    ylim <- if (Rank == 1)
+              c(0, max(fvmat)*stretch) else
+              range(coefobj at latvar[,2])
+  }
+  xlim <- rep(xlim, length = 2)
+  ylim <- rep(ylim, length = 2)
+  gridlength <- rep(gridlength, length = Rank)
+  latvar1 <- seq(xlim[1], xlim[2], length = gridlength[1])
+  latvar2 <- if (Rank == 2)
+               seq(ylim[1], ylim[2], len = gridlength[2]) else
+               NULL
+    latvarmat <- if (Rank == 2)
+                   expand.grid(latvar1, latvar2) else
+                   cbind(latvar1)
+
+  sppNames <- dimnames(object at y)[[2]]
+  if (!length(which.species)) {
+    which.species <- sppNames[1:NOS]
+    which.species.numer <- 1:NOS
+  } else
+  if (is.numeric(which.species)) {
+    which.species.numer <- which.species
+    which.species <- sppNames[which.species.numer]  # Convert to character
+  } else {
+    which.species.numer <- match(which.species, sppNames)
+  }
 
-    if (Rank == 1) {
-        if (plot.it) {
-            if (!length(ylim.orig))
-        ylim <- c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
-            col <- rep(col, len=length(whichSpecies.numer))
-            lty <- rep(lty, len=length(whichSpecies.numer))
-            lwd <- rep(lwd, len=length(whichSpecies.numer))
-            matplot(lv1, fitvals, xlab = xlab, ylab=ylab,
-                    type = "n", main=main, xlim = xlim, ylim=ylim, ...)
-            if (rugplot) rug(lv(object)) 
-            for(sppno in 1:length(whichSpecies.numer)) {
-                ptr2 <- whichSpecies.numer[sppno]  # points to species column
-                lines(lv1, fitvals[,ptr2], col=col[sppno], 
-                      lty=lty[sppno], lwd=lwd [sppno], ...)
-                if (labelSpecies) {
-                    ptr1 <- (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
-                                                 fitvals[,ptr2]]
-                    ptr1 <- ptr1[1]
-                    text(lv1[ptr1], fitvals[ptr1,ptr2]+(stretch-1) *
-                         diff(range(ylim)), label=sppNames[sppno],
-                         col=col[sppno], ...)
-                }
-            }
-        }
-    } else {
-        maxfitted <- matrix(fitvals[,whichSpecies[1]], length(lv1),
-                           length(lv2))
-        if (length(whichSpecies) > 1)
-        for(sppno in whichSpecies[-1]) {
-            maxfitted <- pmax(maxfitted, matrix(fitvals[,sppno], 
-                                               length(lv1), length(lv2)))
+  LP <- matrix(as.numeric(NA), nrow(latvarmat), NOS)
+  for (sppno in 1:NOS) {
+    temp <- predictcao(object = object, grid = latvarmat, sppno = sppno,
+                       Rank = Rank, deriv = 0, MSratio = MSratio)
+    LP[, sppno] <- temp$yval
+  }
+  if (MSratio == 2) {
+    LP <- kronecker(LP, matrix(1:0, 1, 2))  # n x M
+  }
+  fitvals <- object at family@linkinv(LP, extra = object at extra)  # n by NOS
+  dimnames(fitvals) <- list(NULL, dimnames(fvmat)[[2]])
+
+  if (Rank == 1) {
+    if (show.plot) {
+      if (!length(ylim.orig))
+        ylim <- c(0, max(fitvals[,which.species.numer]) * stretch)  # A revision
+      col <- rep(col, len = length(which.species.numer))
+      lty <- rep(lty, len = length(which.species.numer))
+      lwd <- rep(lwd, len = length(which.species.numer))
+      matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
+              type = "n", main = main, xlim = xlim, ylim = ylim, ...)
+      if (rugplot) rug(latvar(object)) 
+      for (sppno in 1:length(which.species.numer)) {
+        ptr2 <- which.species.numer[sppno]  # points to species column
+        lines(latvar1, fitvals[,ptr2], col = col[sppno], 
+              lty = lty[sppno], lwd = lwd [sppno], ...)
+        if (labelSpecies) {
+          ptr1 <- (1:nrow(fitvals))[max(fitvals[, ptr2]) ==
+                                        fitvals[, ptr2]]
+          ptr1 <- ptr1[1]
+          text(latvar1[ptr1], fitvals[ptr1, ptr2] + (stretch-1) *
+               diff(range(ylim)), label = sppNames[sppno],
+               col = col[sppno], ...)
         }
-        if (!length(zlim))
-            zlim <- range(maxfitted, na.rm = TRUE)
-        if (plot.it)
-            graphics:::persp.default(lv1, lv2, maxfitted,
-                  zlim=zlim,
-                  xlab = xlab, ylab=ylab, zlab=zlab,
-                  ticktype = ticktype, col = col, main=main, ...) 
+      }
     }
+  } else {
+    max.fitted <- matrix(fitvals[,which.species[1]],
+                         length(latvar1), length(latvar2))
+    if (length(which.species) > 1)
+      for (sppno in which.species[-1]) {
+        max.fitted <- pmax(max.fitted,
+                           matrix(fitvals[, sppno], 
+                                  length(latvar1), length(latvar2)))
+    }
+    if (!length(zlim))
+      zlim <- range(max.fitted, na.rm = TRUE)
+
+
+    perspdefault <- getS3method("persp", "default")
+    if (show.plot)
+      perspdefault(latvar1, latvar2, max.fitted,
+                   zlim = zlim,
+                   xlab = xlab, ylab = ylab, zlab = zlab,
+                   ticktype = ticktype, col = col, main = main, ...)
 
-    invisible(list(fitted = fitvals,
-                   lv1grid = lv1,
-                   lv2grid = if (Rank == 2) lv2 else NULL,
-                   maxfitted = if (Rank == 2) maxfitted else NULL))
+
+  }
+
+  invisible(list(fitted      = fitvals,
+                 latvar1grid = latvar1,
+                 latvar2grid = if (Rank == 2) latvar2 else NULL,
+                 max.fitted  = if (Rank == 2) max.fitted else NULL))
 }
 
 
 if(!isGeneric("persp"))
-    setGeneric("persp", function(x, ...) standardGeneric("persp"))
+  setGeneric("persp", function(x, ...) standardGeneric("persp"))
 setMethod("persp", "cao", function(x, ...) persp.cao(x = x, ...))
 
 
 
-lv.cao <- function(object, ...) {
-    Coef(object, ...)@lv
+latvar.cao <- function(object, ...) {
+  Coef(object, ...)@latvar
 }
 
 
 
+
 if(!isGeneric("lv"))
-    setGeneric("lv", function(object, ...) standardGeneric("lv"),
-    package = "VGAM")
+  setGeneric("lv",
+             function(object, ...) {
+    .Deprecated("latvar")
+
+               standardGeneric("lv")
+             },
+             package = "VGAM")
+
+ setMethod("lv", "cao",
+           function(object, ...) latvar.cao(object, ...))
+
+
+
+ if (!isGeneric("latvar"))
+    setGeneric("latvar",
+  function(object, ...) standardGeneric("latvar"))
+
+setMethod("latvar", "cao",
+  function(object, ...) latvar.cao(object, ...))
+
+
+
+
+
+
 
- setMethod("lv", "cao", function(object, ...) lv.cao(object, ...))
 
 
 
@@ -1864,21 +1987,22 @@ setMethod("summary", "cao", function(object, ...)
 
 
 
+
 show.summary.cao <- function(x, ...) {
-    cat("\nCall:\n")
-    dput(x at call)
+  cat("\nCall:\n")
+  dput(x at call)
 
-    show.Coef.cao(x, ...)
+  show.Coef.cao(x, ...)
 
-    cat("\nNumber of species: ", x at NOS, "\n")
+  cat("\nNumber of species: ", x at NOS, "\n")
 
-    if (length(x at misc$dispersion) == 1) {
-        cat("\nDispersion parameter(s): ", x at misc$dispersion, "\n")
-    } else if (is.Numeric(x at dispersion)) {
-        cat("\nDispersion parameter(s)\n")
-        print( x at dispersion, ... )
-    }
-    invisible(x)
+  if (length(x at misc$dispersion) == 1) {
+    cat("\nDispersion parameter(s): ", x at misc$dispersion, "\n")
+  } else if (is.Numeric(x at dispersion)) {
+    cat("\nDispersion parameter(s)\n")
+    print( x at dispersion, ... )
+  }
+  invisible(x)
 }
 
 
@@ -1892,25 +2016,42 @@ setMethod("show", "summary.cao",
 
 
 
-ccoef.cao <- function(object, ...) {
+concoef.cao <- function(object, ...) {
   Coef(object, ...)@C
 }
 
 
-ccoef.Coef.cao <- function(object, ...) {
+concoef.Coef.cao <- function(object, ...) {
   if (length(list(...)))
     warning("Too late! Ignoring the extra arguments")
   object at C
 }
 
 
-if(!isGeneric("ccoef"))
-  setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+ if (!isGeneric("ccoef"))
+     setGeneric("ccoef", function(object, ...) {
+    .Deprecated("concoef")
+
+    standardGeneric("ccoef")
+    })
 
 setMethod("ccoef", "cao", function(object, ...)
-    ccoef.cao(object, ...))
+    concoef.cao(object, ...))
 setMethod("ccoef", "Coef.cao", function(object, ...)
-    ccoef.Coef.cao(object, ...))
+    concoef.Coef.cao(object, ...))
+
+setMethod("concoef", "cao", function(object, ...)
+    concoef.cao(object, ...))
+setMethod("concoef", "Coef.cao", function(object, ...)
+    concoef.Coef.cao(object, ...))
+
+
+
+
+
+
+
+
 
 
 if(!isGeneric("calibrate"))
@@ -1927,11 +2068,11 @@ setMethod("calibrate", "qrrvglm", function(object, ...)
 
 
 Tol.cao <- function(object, ...) {
-    stop("The tolerance for a 'cao' object is undefined")
+  stop("The tolerance for a 'cao' object is undefined")
 }
 
 if(!isGeneric("Tol"))
-    setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
+  setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
 setMethod("Tol", "cao", function(object, ...)
           Tol.cao(object, ...))
 
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 57863be..89d6495 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -16,7 +16,7 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
     return(ans)
 
  
-  ncolx <- object at misc$p   # = length(object at constraints)
+  ncolx <- object at misc$p  # = length(object at constraints)
   M <- object at misc$M
 
   Blist <- object at constraints
@@ -31,7 +31,7 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
     ncolBlist <- unlist(lapply(Blist, ncol)) 
     nasgn <- names(Blist)
     temp <- c(0, cumsum(ncolBlist))
-    for(ii in 1:length(nasgn)) {
+    for (ii in 1:length(nasgn)) {
       index <- (temp[ii] + 1):temp[ii + 1]
       cmat <- Blist[[nasgn[ii]]]
       Bmat[ii,] <- cmat %*% ans[index]
@@ -40,12 +40,12 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
 
   if (label) {
     d1 <- object at misc$colnames.x
-    d2 <- object at misc$predictors.names # Could be NULL
+    d2 <- object at misc$predictors.names  # Could be NULL
     dimnames(Bmat) <- list(d1, d2)
   }
 
   Bmat
-} # end of coefvlm
+}  # end of coefvlm
 
 
 
diff --git a/R/cqo.R b/R/cqo.R
index f708ab5..59fccf2 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -15,147 +15,146 @@ cqo <- function(formula,
                 contrasts = NULL, 
                 constraints = NULL,
                 extra = NULL, 
-                smart = TRUE, ...)
-{
-    dataname <- as.character(substitute(data))  # "list" if no data =
-    function.name <- "cqo"
-
-    ocall <- match.call()
-
-    if (smart) 
-        setup.smart("write")
-
-    mt <- terms(formula, data = data)
-    if (missing(data)) 
-        data <- environment(formula)
-
-    mf <- match.call(expand.dots = FALSE)
-    mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
-        mf$control <- mf$contrasts <- mf$constraints <- mf$extra <- NULL
-    mf$coefstart <- mf$etastart <- mf$... <- NULL
-    mf$smart <- NULL
-    mf$drop.unused.levels <- TRUE 
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame()) 
-    if (method == "model.frame")
-        return(mf)
-    na.act <- attr(mf, "na.action")
-
-    xvars <- as.character(attr(mt, "variables"))[-1]
-    if ((yvar <- attr(mt, "response")) > 0)
-        xvars <- xvars[-yvar]
-    xlev <- if (length(xvars) > 0) {
-        xlev <- lapply(mf[xvars], levels)
-        xlev[!sapply(xlev, is.null)]
+                smart = TRUE, ...) {
+  dataname <- as.character(substitute(data))  # "list" if no data =
+  function.name <- "cqo"
+
+  ocall <- match.call()
+
+  if (smart) 
+    setup.smart("write")
+
+  mt <- terms(formula, data = data)
+  if (missing(data)) 
+      data <- environment(formula)
+
+  mf <- match.call(expand.dots = FALSE)
+  mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
+    mf$control <- mf$contrasts <- mf$constraints <- mf$extra <- NULL
+  mf$coefstart <- mf$etastart <- mf$... <- NULL
+  mf$smart <- NULL
+  mf$drop.unused.levels <- TRUE 
+  mf[[1]] <- as.name("model.frame")
+  mf <- eval(mf, parent.frame()) 
+  if (method == "model.frame")
+    return(mf)
+  na.act <- attr(mf, "na.action")
+
+  xvars <- as.character(attr(mt, "variables"))[-1]
+  if ((yvar <- attr(mt, "response")) > 0)
+    xvars <- xvars[-yvar]
+  xlev <- if (length(xvars) > 0) {
+    xlev <- lapply(mf[xvars], levels)
+    xlev[!sapply(xlev, is.null)]
+  }
+
+  y <- model.response(mf, "numeric")  # model.extract(mf, "response")
+  x <- model.matrix(mt, mf, contrasts)
+  attr(x, "assign") <- attrassigndefault(x, mt)
+  offset <- model.offset(mf)
+  if (is.null(offset)) 
+    offset <- 0  # yyy ???
+  w <- model.weights(mf)
+  if (!length(w)) {
+    w <- rep(1, nrow(mf))
+  } else if (ncol(as.matrix(w)) == 1 && any(w < 0))
+    stop("negative weights not allowed")
+
+  if (is.character(family))
+    family <- get(family)
+  if (is.function(family))
+    family <- family()
+  if (!inherits(family, "vglmff")) {
+    stop("'family = ", family, "' is not a VGAM family function")
+  }
+
+  control$criterion <- "coefficients"  # Specifically 4 vcontrol.expression
+  eval(vcontrol.expression)
+
+  if (!is.null(family at first))
+    eval(family at first)
+
+
+  cqo.fitter <- get(method)
+
+
+  deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
+  for (tries in 1:control$Bestof) {
+    if (control$trace && (control$Bestof>1))
+    cat(paste("\n========================= Fitting model", tries,
+                "=========================\n"))
+    onefit <- cqo.fitter(x = x, y = y, w = w, offset = offset,
+              etastart = etastart, mustart = mustart, coefstart = coefstart,
+              family = family, control = control, constraints = constraints,
+              extra = extra, Terms = mt, function.name = function.name, ...)
+    deviance.Bestof[tries] <- if (length(onefit$crit.list$deviance))
+      onefit$crit.list$deviance else onefit$crit.list$loglikelihood
+    if (tries == 1 ||
+        min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+      fit <- onefit
+  }
+  fit$misc$deviance.Bestof <- deviance.Bestof
+
+
+  fit$misc$dataname <- dataname
+
+  if (smart) {
+    fit$smart.prediction <- get.smart.prediction()
+    wrapup.smart()
+  }
+
+  answer <-
+  new(Class = "qrrvglm",
+    "assign"       = attr(x, "assign"),
+    "call"         = ocall,
+    "coefficients" = fit$coefficients,
+    "constraints"  = fit$constraints,
+    "criterion"    = fit$crit.list,  # list("deviance" = min(deviance.Bestof)),
+    "dispersion"   = 1,
+    "family"       = fit$family,
+    "misc"         = fit$misc,
+    "model"        = if (model) mf else data.frame(),
+    "residuals"    = as.matrix(fit$residuals),
+    "smart.prediction" = as.list(fit$smart.prediction),
+    "terms"        = list(terms = mt))
+
+  if (!smart) answer at smart.prediction <- list(smart.arg = FALSE)
+
+  if (length(attr(x, "contrasts")))
+    slot(answer, "contrasts") <- attr(x, "contrasts")
+  if (length(fit$fitted.values))
+    slot(answer, "fitted.values") <- as.matrix(fit$fitted.values)
+  slot(answer, "na.action") <- if (length(na.act)) list(na.act) else list()
+  if (length(offset))
+    slot(answer, "offset") <- as.matrix(offset)
+  if (length(fit$weights))
+    slot(answer, "weights") <- as.matrix(fit$weights)
+  if (x.arg)
+    slot(answer, "x") <- fit$x # The 'small' design matrix
+  if (length(xlev))
+    slot(answer, "xlevels") <- xlev
+  if (y.arg)
+    slot(answer, "y") <- as.matrix(fit$y)
+
+  fit$control$min.criterion <- TRUE  # needed for calibrate; a special case
+
+
+  slot(answer, "control") <- fit$control
+  slot(answer, "extra") <- if (length(fit$extra)) {
+    if (is.list(fit$extra)) fit$extra else {
+      warning("'extra' is not a list, therefore placing ",
+              "'extra' into a list")
+      list(fit$extra)
     }
-
-    y <- model.response(mf, "numeric") # model.extract(mf, "response")
-    x <- model.matrix(mt, mf, contrasts)
-    attr(x, "assign") = attrassigndefault(x, mt)
-    offset <- model.offset(mf)
-    if (is.null(offset)) 
-        offset <- 0 # yyy ???
-    w <- model.weights(mf)
-    if (!length(w))
-        w <- rep(1, nrow(mf))
-    else if (ncol(as.matrix(w)) == 1 && any(w < 0))
-        stop("negative weights not allowed")
-
-    if (is.character(family))
-        family <- get(family)
-    if (is.function(family))
-        family <- family()
-    if (!inherits(family, "vglmff")) {
-        stop("'family=", family, "' is not a VGAM family function")
-    }
-
-    control$criterion = "coefficients" # Specifically 4 vcontrol.expression
-    eval(vcontrol.expression)
-
-    if (!is.null(family at first))
-        eval(family at first)
-
-
-    cqo.fitter <- get(method)
-
-
-    deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
-    for(tries in 1:control$Bestof) {
-         if (control$trace && (control$Bestof>1))
-         cat(paste("\n========================= Fitting model", tries,
-                     "=========================\n"))
-         onefit <- cqo.fitter(x=x, y=y, w=w, offset=offset,
-                   etastart=etastart, mustart=mustart, coefstart=coefstart,
-                   family=family, control=control, constraints=constraints,
-                   extra=extra, Terms=mt, function.name=function.name, ...)
-        deviance.Bestof[tries] = if (length(onefit$crit.list$deviance))
-            onefit$crit.list$deviance else onefit$crit.list$loglikelihood
-       if (tries == 1 ||
-          min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
-            fit = onefit
-    }
-    fit$misc$deviance.Bestof = deviance.Bestof
-
-
-    fit$misc$dataname <- dataname
-
-    if (smart) {
-        fit$smart.prediction <- get.smart.prediction()
-        wrapup.smart()
-    }
-
-    answer <-
-    new(Class="qrrvglm",
-      "assign"       = attr(x, "assign"),
-      "call"         = ocall,
-      "coefficients" = fit$coefficients,
-      "constraints"  = fit$constraints,
-      "criterion"    = fit$crit.list, # list("deviance" = min(deviance.Bestof)),
-      "dispersion"   = 1,
-      "family"       = fit$family,
-      "misc"         = fit$misc,
-      "model"        = if (model) mf else data.frame(),
-      "residuals"    = as.matrix(fit$residuals),
-      "smart.prediction" = as.list(fit$smart.prediction),
-      "terms"        = list(terms=mt))
-
-    if (!smart) answer at smart.prediction <- list(smart.arg = FALSE)
-
-    if (length(attr(x, "contrasts")))
-        slot(answer, "contrasts") = attr(x, "contrasts")
-    if (length(fit$fitted.values))
-        slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
-    slot(answer, "na.action") = if (length(na.act)) list(na.act) else list()
-    if (length(offset))
-        slot(answer, "offset") = as.matrix(offset)
-    if (length(fit$weights))
-        slot(answer, "weights") = as.matrix(fit$weights)
-    if (x.arg)
-        slot(answer, "x") = fit$x # The 'small' design matrix
-    if (length(xlev))
-        slot(answer, "xlevels") = xlev
-    if (y.arg)
-        slot(answer, "y") = as.matrix(fit$y)
-
-    fit$control$min.criterion = TRUE # needed for calibrate; a special case
-
-
-    slot(answer, "control") = fit$control
-    slot(answer, "extra") = if (length(fit$extra)) {
-        if (is.list(fit$extra)) fit$extra else {
-            warning("'extra' is not a list, therefore placing ",
-                    "'extra' into a list")
-            list(fit$extra)
-        }
-    } else list() # R-1.5.0
-    slot(answer, "iter") = fit$iter
-    fit$predictors = as.matrix(fit$predictors)  # Must be a matrix 
-    dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
-                                    fit$misc$predictors.names)
-    slot(answer, "predictors") = fit$predictors
-    if (length(fit$prior.weights))
-        slot(answer, "prior.weights") = as.matrix(fit$prior.weights)
-    answer
+  } else list()  # R-1.5.0
+  slot(answer, "iter") <- fit$iter
+  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix 
+  dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
+                                   fit$misc$predictors.names)
+  slot(answer, "predictors") <- fit$predictors
+  if (length(fit$prior.weights))
+    slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
+  answer
 }
 attr(cqo, "smart") <- TRUE
 
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index cbfaa14..05fb32c 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -6,201 +6,223 @@
 
 
 
+
 callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
-                    X_vlm_1save, modelno, Control,
-                    n, M, p1star, p2star, nice31, allofit = FALSE) {
-    ocmatrix <- cmatrix
-    control <- Control
-    Rank <- control$Rank
-    p1 <- length(control$colx1.index); p2 <- length(control$colx2.index)
-    dim(cmatrix) <- c(p2, Rank)  # for crow1C
-    pstar <- p1star + p2star
-    maxMr <- max(M, Rank)
-    nstar <- if (nice31) ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
-    NOS <- ifelse(modelno == 3 || modelno==5, M/2, M)
-    lenbeta <- pstar * ifelse(nice31, NOS, 1)
-
-    if (itol <- control$ITolerances) {
-        if (Rank > 1) {
-            numat <- xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
-            evnu <- eigen(var(numat))
-            cmatrix <- cmatrix %*% evnu$vector
+                     X.vlm.1save, modelno, Control,
+                     n, M, p1star, p2star, nice31, allofit = FALSE) {
+  ocmatrix <- cmatrix
+  control <- Control
+  Rank <- control$Rank
+  p1 <- length(control$colx1.index)
+  p2 <- length(control$colx2.index)
+  dim(cmatrix) <- c(p2, Rank)  # for crow1C
+  pstar <- p1star + p2star
+  maxMr <- max(M, Rank)
+  nstar <- if (nice31) ifelse(modelno %in% c(3, 5), n*2, n) else n*M
+
+  NOS <- ifelse(modelno %in% c(3, 5), M/2, M)
+  lenbeta <- pstar * ifelse(nice31, NOS, 1)
+
+  if (itol <- control$ITolerances) {
+    if (Rank > 1) {
+      numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+      evnu <- eigen(var(numat))
+      cmatrix <- cmatrix %*% evnu$vector
+    }
+
+    cmatrix <- crow1C(cmatrix, control$Crow1positive)
+    numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+    sdnumat <- apply(numat, 2, sd)
+    for (lookat in 1:Rank)
+      if (sdnumat[lookat] >
+          control$MUXfactor[lookat] * control$isd.latvar[lookat]) {
+        muxer <- control$isd.latvar[lookat] *
+                 control$MUXfactor[lookat] / sdnumat[lookat]
+        numat[, lookat] <- numat[, lookat] * muxer
+        cmatrix[,lookat] <- cmatrix[,lookat] * muxer
+        if (control$trace) {
+          cat(paste("Taking evasive action for latent variable ",
+                    lookat, ".\n", sep = ""))
+          flush.console()
         }
+        rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+                        "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
+      }
+  } else {
+    numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+    evnu <- eigen(var(numat))
+    temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+                           evnu$vector %*% evnu$value^(-0.5)
+    cmatrix <- cmatrix %*% temp7
+    cmatrix <- crow1C(cmatrix, control$Crow1positive)
+    numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+  }
 
-        cmatrix <- crow1C(cmatrix, control$Crow1positive)
-        numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-        sdnumat <- apply(numat, 2, sd)
-        for(lookat in 1:Rank)
-            if (sdnumat[lookat] >
-                control$MUXfactor[lookat] * control$isdlv[lookat]) {
-                muxer <- control$isdlv[lookat] *
-                        control$MUXfactor[lookat] / sdnumat[lookat]
-                numat[,lookat] <- numat[,lookat] * muxer
-                cmatrix[,lookat] <- cmatrix[,lookat] * muxer
-                if (control$trace) {
-                    cat(paste("Taking evasive action for latent variable ",
-                              lookat, ".\n", sep = ""))
-                    flush.console()
-                }
-                rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
-                                "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
-            }
-    } else {
-        numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-        evnu <- eigen(var(numat))
-        temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-                evnu$vector %*% evnu$value^(-0.5)
-        cmatrix <- cmatrix %*% temp7
-        cmatrix <- crow1C(cmatrix, control$Crow1positive)
-        numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-    }
+  inited <- ifelse(exists(".VGAM.CQO.etamat", envir = VGAMenv), 1, 0)
 
-    inited <- if (is.R()) {
-        if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
-    } else 0
 
+  usethiseta <- if (inited == 1) 
+    getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
+  usethisbeta <- if (inited == 2) 
+    getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
 
-    usethiseta <- if (inited == 1) 
-        getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
-    usethisbeta <- if (inited == 2) 
-        getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
-
-    othint <- c(Rank = Rank, control$EqualTol, pstar = pstar,
-               dimw = 1, inited = inited, modelno = modelno,
-               maxitl = control$maxitl, actnits = 0, twice = 0,
-               p1star = p1star, p2star = p2star, nice31 = nice31,
-               lenbeta = lenbeta, itol = itol, control$trace,
-               p1 = p1, p2 = p2, control$imethod)
-    bnumat <- if (nice31) matrix(0,nstar,pstar) else
-             cbind(matrix(0,nstar,p2star), X_vlm_1save)
+  othint <- c(Rank = Rank, control$EqualTol, pstar = pstar,
+              dimw = 1, inited = inited, modelno = modelno,
+              maxitl = control$maxitl, actnits = 0, twice = 0,
+              p1star = p1star, p2star = p2star, nice31 = nice31,
+              lenbeta = lenbeta, itol = itol, control$trace,
+              p1 = p1, p2 = p2, control$imethod)
+  bnumat <- if (nice31) matrix(0,nstar,pstar) else
+            cbind(matrix(0, nstar, p2star), X.vlm.1save)
 
  
 
-    ans1 <- 
-    dotC(name = if (nice31) "cqo_1" else "cqo_2",
-       numat = as.double(numat), as.double(ymat), 
-       as.double(if (p1) xmat[,control$colx1.index] else 999),
-       as.double(wvec), etamat = as.double(usethiseta),
-       moff = double(if (itol) n else 1),
-       fv = double(NOS*n), z = double(n*M), wz = double(n*M),
-       U = double(M*n), bnumat = as.double(bnumat),
-       qr = double(nstar*pstar), qraux = double(pstar),
-           qpivot = integer(pstar),
-       as.integer(n), as.integer(M), NOS = as.integer(NOS),
-           as.integer(nstar), dim1U = as.integer(M),
-           errcode = integer(1 + NOS), othint = as.integer(othint),
-       deviance = double(1+NOS), beta = as.double(usethisbeta),
-           othdbl = as.double(c(small = control$SmallNo,
-                  epsilon = control$epsilon, .Machine$double.eps,
-                  iKvector = rep(control$iKvector, len = NOS),
-                  iShape = rep(control$iShape, len = NOS))))
-
-
-    if (ans1$errcode[1] == 0) {
-        assign2VGAMenv(c("etamat", "z", "U", "beta", "deviance"),
-                            ans1, prefix = ".VGAM.CQO.")
-        if (is.R()) {
-            assign(".VGAM.CQO.cmatrix",   cmatrix, envir = VGAM:::VGAMenv)
-            assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAM:::VGAMenv)
-        } else {
-        }
-    } else {
- print("hi 88 20100402; all the species did not converge in callcqo")
+  ans1 <- if (nice31)
+  .C("cqo_1",
+     numat = as.double(numat), as.double(ymat), 
+     as.double(if (p1) xmat[,control$colx1.index] else 999),
+     as.double(wvec), etamat = as.double(usethiseta),
+     moff = double(if (itol) n else 1),
+     fv = double(NOS*n), z = double(n*M), wz = double(n*M),
+     U = double(M*n), bnumat = as.double(bnumat),
+     qr = double(nstar*pstar), qraux = double(pstar),
+         qpivot = integer(pstar),
+     as.integer(n), as.integer(M), NOS = as.integer(NOS),
+         as.integer(nstar), dim1U = as.integer(M),
+         errcode = integer(1 + NOS), othint = as.integer(othint),
+     deviance = double(1+NOS), beta = as.double(usethisbeta),
+         othdbl = as.double(c(small = control$SmallNo,
+                epsilon = control$epsilon, .Machine$double.eps,
+                iKvector = rep(control$iKvector, len = NOS),
+                iShape = rep(control$iShape, len = NOS))), PACKAGE = "VGAM") else
+  .C("cqo_2",
+     numat = as.double(numat), as.double(ymat), 
+     as.double(if (p1) xmat[,control$colx1.index] else 999),
+     as.double(wvec), etamat = as.double(usethiseta),
+     moff = double(if (itol) n else 1),
+     fv = double(NOS*n), z = double(n*M), wz = double(n*M),
+     U = double(M*n), bnumat = as.double(bnumat),
+     qr = double(nstar*pstar), qraux = double(pstar),
+         qpivot = integer(pstar),
+     as.integer(n), as.integer(M), NOS = as.integer(NOS),
+         as.integer(nstar), dim1U = as.integer(M),
+         errcode = integer(1 + NOS), othint = as.integer(othint),
+     deviance = double(1+NOS), beta = as.double(usethisbeta),
+         othdbl = as.double(c(small = control$SmallNo,
+                epsilon = control$epsilon, .Machine$double.eps,
+                iKvector = rep(control$iKvector, len = NOS),
+                iShape = rep(control$iShape, len = NOS))), PACKAGE = "VGAM")
+
+
+
+
+
+
+
+
+
+  if (ans1$errcode[1] == 0) {
+    assign2VGAMenv(c("etamat", "z", "U", "beta", "deviance"),
+                    ans1, prefix = ".VGAM.CQO.")
+    assign(".VGAM.CQO.cmatrix",   cmatrix, envir = VGAMenv)
+    assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAMenv)
+  } else {
         warning("error code in callcqoc = ", ans1$errcode[1])
     if (nice31) {
- print("ans1$errcode[-1]") # Only if (nice31)
- print( ans1$errcode[-1] )
     }
-        rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
-                        "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
-    }
-    if (control$trace)
-        flush.console()
-    if (allofit) list(deviance     = ans1$deviance[1],
-                      alldeviance  = ans1$deviance[-1],
-                      coefficients = ans1$beta) else ans1$deviance[1]
+    rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+                    "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
+  }
+  if (control$trace)
+    flush.console()
+  if (allofit) list(deviance     = ans1$deviance[1],
+                    alldeviance  = ans1$deviance[-1],
+                    coefficients = ans1$beta) else ans1$deviance[1]
 }
 
 
 
 calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
-                     X_vlm_1save, modelno, Control,
+                     X.vlm.1save, modelno, Control,
                      n, M, p1star, p2star, nice31, allofit = FALSE) {
-    control <- Control
-    Rank <- control$Rank
-    p1 <- length(control$colx1.index); p2 <- length(control$colx2.index)
-    dim(cmatrix) <- c(p2, Rank)  # for crow1C
-
-    xmat2 <- xmat[, control$colx2.index, drop = FALSE]   #ccc
-    numat <- double(n*Rank)  #ccc
-    pstar <- p1star + p2star
-    maxMr <- max(M, Rank)
-    nstar <- if (nice31)
-             ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
-    NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
-    lenbeta <- pstar * ifelse(nice31, NOS, 1)
-
-    if (itol <- control$ITolerances) {
-        if (Rank > 1) {
-            numat <- xmat[, control$colx2.index, drop=FALSE] %*% cmatrix
-            evnu <- eigen(var(numat))
-            cmatrix <- cmatrix %*% evnu$vector
-        }
+  control <- Control
+  Rank <- control$Rank
+  p1 <- length(control$colx1.index); p2 <- length(control$colx2.index)
+  dim(cmatrix) <- c(p2, Rank)  # for crow1C
+
+  xmat2 <- xmat[, control$colx2.index, drop = FALSE]   #ccc
+  numat <- double(n*Rank)  #ccc
+  pstar <- p1star + p2star
+  maxMr <- max(M, Rank)
+  nstar <- if (nice31)
+           ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
+  NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
+  lenbeta <- pstar * ifelse(nice31, NOS, 1)
+
+  if (itol <- control$ITolerances) {
+    if (Rank > 1) {
+      numat <- xmat[, control$colx2.index, drop=FALSE] %*% cmatrix
+      evnu <- eigen(var(numat))
+      cmatrix <- cmatrix %*% evnu$vector
+    }
 
-        cmatrix <- crow1C(cmatrix, control$Crow1positive)
-        numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
-        sdnumat <- apply(numat, 2, sd)
-        for(lookat in 1:Rank)
-          if (sdnumat[lookat] > control$MUXfactor[lookat] *
-                                control$isdlv[lookat]) {
-                muxer <- control$isdlv[lookat] *
-                        control$MUXfactor[lookat] / sdnumat[lookat]
-                cmatrix[,lookat] <- cmatrix[,lookat] * muxer
-                if (control$trace) {
-                    cat(paste("Taking evasive action for latent variable ",
-                              lookat, ".\n", sep=""))
-                    flush.console()
-                }
-                rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
-                                "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
+    cmatrix <- crow1C(cmatrix, control$Crow1positive)
+    numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+    sdnumat <- apply(numat, 2, sd)
+    for (lookat in 1:Rank)
+      if (sdnumat[lookat] > control$MUXfactor[lookat] *
+                            control$isd.latvar[lookat]) {
+          muxer <- control$isd.latvar[lookat] *
+                  control$MUXfactor[lookat] / sdnumat[lookat]
+          cmatrix[, lookat] <- cmatrix[, lookat] * muxer
+          if (control$trace) {
+            cat(paste("Taking evasive action for latent variable ",
+                      lookat, ".\n", sep = ""))
+            flush.console()
           }
-    } else {
-        numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
-        evnu <- eigen(var(numat))
-        temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-                              evnu$vector %*% evnu$value^(-0.5)
+          rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+                          "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
+      }
+  } else {
+    numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+    evnu <- eigen(var(numat))
+    temp7 <- if (Rank > 1)
+                   evnu$vector %*% diag(evnu$value^(-0.5)) else
+                   evnu$vector %*% evnu$value^(-0.5)
         cmatrix <- cmatrix %*% temp7
         cmatrix <- crow1C(cmatrix, control$Crow1positive)
         numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-    }
+  }
 
-    inited <- if (is.R()) {
-        if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
-    } else 0
+    inited <- ifelse(exists(".VGAM.CQO.etamat",
+                            envir = VGAMenv), 1, 0)
 
 
     usethiseta <- if (inited == 1) 
-        getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
+      getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
     usethisbeta <- if (inited == 2) 
-        getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
+      getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
 
     othint <- c(Rank, control$EqualTol, pstar, dimw = 1, inited = inited,
-               modelno, maxitl = control$maxitl, actnits = 0, twice = 0, 
-               p1star = p1star, p2star = p2star, nice31 = nice31, lenbeta,
-               itol = itol, control$trace,
-               p1, p2, control$imethod) # other ints
+                modelno, maxitl = control$maxitl, actnits = 0, twice = 0,
+                p1star = p1star, p2star = p2star,
+                nice31 = nice31, lenbeta,
+                itol = itol, control$trace,
+                p1, p2, control$imethod)  # other ints
     bnumat <- if (nice31) matrix(0,nstar,pstar) else
-             cbind(matrix(0,nstar,p2star), X_vlm_1save)
+             cbind(matrix(0,nstar,p2star), X.vlm.1save)
     flush.console()
 
     ans1 <- 
-    dotC(name = "dcqo1", numat = as.double(numat), as.double(ymat), 
+    .C("dcqo1",
+       numat = as.double(numat), as.double(ymat), 
        as.double(if (p1) xmat[,control$colx1.index] else 999),
        as.double(wvec), etamat = as.double(usethiseta),
            moff = double(if (itol) n else 1),
            fv = double(NOS*n), z = double(n*M), wz = double(n*M),
            U = double(M*n), bnumat = as.double(bnumat),
-       qr = double(nstar * pstar), qraux = double(pstar), qpivot = integer(pstar),
+       qr = double(nstar * pstar), qraux = double(pstar),
+       qpivot = integer(pstar),
        as.integer(n), as.integer(M), NOS = as.integer(NOS),
        as.integer(nstar), dim1U = as.integer(M),
            errcode = integer(1 + NOS), othint = as.integer(othint),
@@ -212,13 +234,10 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
        xmat2 = as.double(xmat2),
            cmat = as.double(cmatrix),
        p2 = as.integer(p2), deriv = double(p2*Rank),
-           hstep = as.double(control$Hstep))
+           hstep = as.double(control$Hstep), PACKAGE = "VGAM")
 
     if (ans1$errcode[1] != 0) {
-        warning("error code in calldcqo = ", ans1$errcode[1])
- print("hi 88 20100402; all the species did not converge in calldcqo")
- print("ans1$errcode[]")
- print( ans1$errcode[] )
+      warning("error code in calldcqo = ", ans1$errcode[1])
     }
 
     flush.console()
@@ -238,16 +257,16 @@ checkCMCO <- function(Blist, control, modelno) {
     stop("an intercept term must be in the argument 'noRRR' formula")
   Blist1 <- vector("list", p1) 
   Blist2 <- vector("list", p2)
-  for(kk in 1:p1)
+  for (kk in 1:p1)
     Blist1[[kk]] <- Blist[[(colx1.index[kk])]]
-  for(kk in 1:p2)
+  for (kk in 1:p2)
     Blist2[[kk]] <- Blist[[(colx2.index[kk])]]
 
   if (modelno == 3 || modelno == 5) {
     if (p1 > 1)
-      for(kk in 2:p1)
+      for (kk in 2:p1)
         Blist1[[kk]] <- (Blist1[[kk]])[c(TRUE,FALSE),,drop = FALSE]
-    for(kk in 1:p2)
+    for (kk in 1:p2)
       Blist2[[kk]] <- (Blist2[[kk]])[c(TRUE,FALSE),,drop = FALSE]
   }
 
@@ -258,7 +277,7 @@ checkCMCO <- function(Blist, control, modelno) {
         stop("the constraint matrices for intercept term is ",
              "not trivial")
     if (p1 > 1)
-        for(kk in 2:p1)
+        for (kk in 2:p1)
             if (!trivial.constraints(list(Blist1[[kk]])))
                 stop("the constraint matrices for some 'noRRR' ",
                      "terms is not trivial")
@@ -289,6 +308,9 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
   if (!all(offset == 0))
     stop("cqo.fit() cannot handle offsets")
+
+  eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
+
   specialCM <- NULL
   post <- list()
   nonparametric <- FALSE
@@ -314,74 +336,76 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
     rrcontrol <- control  #
 
     if (length(family at initialize))
-        eval(family at initialize)     # Initialize mu and M (and optionally w)
+      eval(family at initialize)  # Initialize mu and M (and optionally w)
     n <- n.save 
 
     eval(rrr.init.expression)
 
 
     if (length(etastart)) {
-        eta <- etastart
-        mu <- if (length(mustart)) mustart else
-              if (length(body(slot(family, "linkinv"))))
-                slot(family, "linkinv")(eta, extra) else
-                warning("argument 'etastart' assigned a value ",
-                        "but there is no 'linkinv' slot to use it")
+      eta <- etastart
+      mu <- if (length(mustart)) mustart else
+            if (length(body(slot(family, "linkinv"))))
+              slot(family, "linkinv")(eta, extra) else
+              warning("argument 'etastart' assigned a value ",
+                      "but there is no 'linkinv' slot to use it")
     }
 
     if (length(mustart)) {
-        mu <- mustart
-        if (length(body(slot(family, "linkfun")))) {
-          eta <- slot(family, "linkfun")(mu, extra)
-        } else {
-          warning("argument 'mustart' assigned a value ",
-                  "but there is no 'link' slot to use it")
-        }
+      mu <- mustart
+      if (length(body(slot(family, "linkfun")))) {
+        eta <- slot(family, "linkfun")(mu, extra)
+      } else {
+        warning("argument 'mustart' assigned a value ",
+                "but there is no 'link' slot to use it")
+      }
     }
 
 
     M <- if (is.matrix(eta)) ncol(eta) else 1
 
     if (is.character(rrcontrol$Dzero)) {
-        index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]]) 
-        if (any(is.na(index)))
-            stop("Dzero argument didn't fully match y-names")
-        if (length(index) == M)
-            stop("all linear predictors are linear in the",
-                 " latent variable(s); so set 'Quadratic=FALSE'")
-        rrcontrol$Dzero <- control$Dzero <- index
+      index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+      if (any(is.na(index)))
+        stop("Dzero argument didn't fully match y-names")
+      if (length(index) == M)
+        stop("all linear predictors are linear in the",
+             " latent variable(s); so set 'Quadratic=FALSE'")
+      rrcontrol$Dzero <- control$Dzero <- index
     }
 
     if (length(family at constraints))
         eval(family at constraints)
 
 
-    special.matrix <- matrix(-34956.125, M, M)    # An unlikely used matrix
-    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
+    special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix
+    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR,
+                            constraints)
     findex <- trivial.constraints(just.testing, special.matrix)
     tc1 <- trivial.constraints(constraints)
 
     if (!control$Quadratic && sum(!tc1)) {
-        for(ii in names(tc1))
-            if (!tc1[ii] && !any(ii == names(findex)[findex == 1]))
-              warning("'", ii, "' is a non-trivial constraint that will ",
-                      "be overwritten by reduced-rank regression")
+      for (ii in names(tc1))
+        if (!tc1[ii] && !any(ii == names(findex)[findex == 1]))
+          warning("'", ii, "' is a non-trivial constraint that will ",
+                  "be overwritten by reduced-rank regression")
     }
 
     if (all(findex == 1))
-        stop("use vglm(), not rrvglm()!")
+      stop("use vglm(), not rrvglm()!")
     colx1.index <- names.colx1.index <- NULL
     dx2 <- dimnames(x)[[2]]
     if (sum(findex)) {
-        asx <- attr(x, "assign")
-        for(ii in names(findex))
-            if (findex[ii]) {
-                names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
-                colx1.index <- c(colx1.index, asx[[ii]])
+      asx <- attr(x, "assign")
+      for (ii in names(findex))
+        if (findex[ii]) {
+          names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
+          colx1.index <- c(colx1.index, asx[[ii]])
         }
-        names(colx1.index) <- names.colx1.index
+      names(colx1.index) <- names.colx1.index
     }
-    rrcontrol$colx1.index <- control$colx1.index <- colx1.index
+    rrcontrol$colx1.index <-
+      control$colx1.index <- colx1.index
     colx2.index <- 1:ncol(x)
     names(colx2.index) <- dx2
     colx2.index <- colx2.index[-colx1.index]
@@ -391,91 +415,95 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
 
 
-    Amat <- if (length(rrcontrol$Ainit)) rrcontrol$Ainit else
-            matrix(rnorm(M * Rank, sd = rrcontrol$SD.Cinit), M, Rank)
+    Amat <- if (length(rrcontrol$Ainit))
+            rrcontrol$Ainit else
+            matrix(rnorm(M * Rank, sd = rrcontrol$sd.Cinit), M, Rank)
 
     Cmat <- if (length(rrcontrol$Cinit)) {
                matrix(rrcontrol$Cinit, p2, Rank)
-           } else {
-                if (!rrcontrol$Use.Init.Poisson.QO) {
-                  matrix(rnorm(p2 * Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
-                } else
-                  .Init.Poisson.QO(ymat = as.matrix(y), 
-                      X1 = x[, colx1.index, drop = FALSE],
-                      X2 = x[, colx2.index, drop = FALSE],
-                      Rank = rrcontrol$Rank, trace = rrcontrol$trace,
-                      max.ncol.etamat = rrcontrol$Etamat.colmax,
-                      Crow1positive = rrcontrol$Crow1positive,
-                      isdlv = rrcontrol$isdlv,
-                      constwt = any(family at vfamily[1] ==
-                      c("negbinomial","gamma2","gaussianff")),
-                      takelog = any(family at vfamily[1] != c("gaussianff")))
-            }
+            } else {
+              if (!rrcontrol$Use.Init.Poisson.QO) {
+                matrix(rnorm(p2 * Rank, sd = rrcontrol$sd.Cinit),
+                       p2, Rank)
+              } else {
+                .Init.Poisson.QO(ymat = as.matrix(y),
+                    X1 = x[, colx1.index, drop = FALSE],
+                    X2 = x[, colx2.index, drop = FALSE],
+                    Rank = rrcontrol$Rank, trace = rrcontrol$trace,
+                    max.ncol.etamat = rrcontrol$Etamat.colmax,
+                    Crow1positive = rrcontrol$Crow1positive,
+                    isd.latvar = rrcontrol$isd.latvar,
+                    constwt = family at vfamily[1] %in%
+                              c("negbinomial", "gamma2", "gaussianff"),
+                    takelog = any(family at vfamily[1] != c("gaussianff")))
+              }
+          }
 
     if (rrcontrol$ITolerances) {
-        lvmat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
-        lvmatmeans <- t(lvmat) %*% matrix(1/n, n, 1)
-        if (!all(abs(lvmatmeans) < 4))
-            warning("ITolerances=TRUE but the variables making up the ",
-                    "latent variable(s) do not appear to be centered.")
+      latvarmat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
+      latvarmatmeans <- t(latvarmat) %*% matrix(1/n, n, 1)
+      if (!all(abs(latvarmatmeans) < 4))
+        warning("ITolerances = TRUE but the variables making up the ",
+                "latent variable(s) do not appear to be centered.")
     }
-    if (modelno == 3 || modelno == 5) 
-        Amat[c(FALSE,TRUE),] <- 0  # Intercept only for log(k)
+    if (modelno %in% c(3, 5))
+      Amat[c(FALSE, TRUE), ] <- 0  # Intercept only for log(k)
 
-    if (length(control$szero))
-        Amat[control$szero,] <- 0
+    if (length(control$str0))
+      Amat[control$str0, ] <- 0
 
-    rrcontrol$Ainit <- control$Ainit <- Amat   # Good for valt()
-    rrcontrol$Cinit <- control$Cinit <- Cmat   # Good for valt()
+    rrcontrol$Ainit <- control$Ainit <- Amat  # Good for valt()
+    rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
 
     Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
     nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
     ncolBlist <- unlist(lapply(Blist, ncol))
     dimB <- sum(ncolBlist)
 
-    X_vlm_save <- if (nice31) {
-        NULL 
+    X.vlm.save <- if (nice31) {
+      NULL 
     } else {
-        tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
-                                        C = Cmat, control = control)
-        xsmall.qrr <- tmp500$new.lv.model.matrix 
-        B.list <- tmp500$constraints
-        lv.mat <- tmp500$lv.mat
-        if (length(tmp500$offset)) {
-            offset <- tmp500$offset 
-        }
-        lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
+      tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+                                       C = Cmat, control = control)
+      xsmall.qrr <- tmp500$new.latvar.model.matrix 
+      B.list <- tmp500$constraints
+      latvar.mat <- tmp500$latvar.mat
+      if (length(tmp500$offset)) {
+        offset <- tmp500$offset 
+      }
+      lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
     }
 
-    if (length(coefstart) && length(X_vlm_save)) {
-        eta <- if (ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
-                   offset else X_vlm_save * coefstart + offset
-        eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta) 
-        mu <- family at linkinv(eta, extra)
+    if (length(coefstart) && length(X.vlm.save)) {
+      eta <- if (ncol(X.vlm.save) > 1)
+               X.vlm.save %*% coefstart + offset else
+               X.vlm.save  *  coefstart + offset
+      eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta) 
+      mu <- family at linkinv(eta, extra)
     }
 
     rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
                     "cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
 
     eval(cqo.init.derivative.expression)
-    for(iter in 1:control$optim.maxit) {
-        eval(cqo.derivative.expression)
-        if (!quasi.newton$convergence) break
+    for (iter in 1:control$optim.maxit) {
+      eval(cqo.derivative.expression)
+      if (!quasi.newton$convergence) break
     }
     if (maxitl > 1 && iter >= maxitl && quasi.newton$convergence)
-        warning("convergence not obtained in", maxitl, "iterations.")
+      warning("convergence not obtained in", maxitl, "iterations.")
 
     if (length(family at fini))
-        eval(family at fini)
+      eval(family at fini)
 
     asgn <- attr(x, "assign")
     coefs <- getfromVGAMenv("beta", prefix = ".VGAM.CQO.")
     if (control$ITolerances) {
-        if (NOS == M) {
-            coefs <- c(t(matrix(coefs, ncol = M))) # Get into right order
-        } else {
-            coefs <- coefs
-        }
+      if (NOS == M) {
+        coefs <- c(t(matrix(coefs, ncol = M)))  # Get into right order
+      } else {
+        coefs <- coefs
+      }
     }
 
     dn <- labels(x)
@@ -483,23 +511,23 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
     xn <- dn[[2]]
     residuals <- z - fv
     if (M == 1) {
-        residuals <- as.vector(residuals)
-        names(residuals) <- yn
+      residuals <- as.vector(residuals)
+      names(residuals) <- yn
     } else {
-        dimnames(residuals) <- list(yn, predictors.names)
+      dimnames(residuals) <- list(yn, predictors.names)
     }
 
     if (is.matrix(mu)) {
-          if (length(dimnames(y)[[2]])) {
-              y.names <- dimnames(y)[[2]]
-          }
-          if (length(dimnames(mu)[[2]])) {
-              y.names <- dimnames(mu)[[2]]
-          }
-          dimnames(mu) <- list(yn, y.names)
+      if (length(dimnames(y)[[2]])) {
+        y.names <- dimnames(y)[[2]]
+      }
+      if (length(dimnames(mu)[[2]])) {
+        y.names <- dimnames(mu)[[2]]
+      }
+      dimnames(mu) <- list(yn, y.names)
     } else {
-        names(mu) <- names(fv)
-        y.names <- NULL
+      names(mu) <- names(fv)
+      y.names <- NULL
     }
 
     df.residual <- 55 - 8 - Rank*p2
@@ -511,10 +539,10 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
                 fitted.values = mu,
                 offset = offset, 
                 residuals = residuals,
-                terms = Terms) # terms: This used to be done in vglm() 
+                terms = Terms)  # terms: This used to be done in vglm() 
 
     if (M == 1) {
-        wz <- as.vector(wz)  # Convert wz into a vector
+      wz <- as.vector(wz)  # Convert wz into a vector
     }
     fit$weights <- if (save.weight) wz else NULL
 
@@ -532,16 +560,17 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
         ynames = dimnames(y)[[2]])
 
     if (w[1] != 1 || any(w != w[1]))
-        fit$prior.weights <- w
+      fit$prior.weights <- w
 
     if (length(family at last))
-        eval(family at last)
+      eval(family at last)
 
     edeviance <- getfromVGAMenv("deviance", prefix = ".VGAM.CQO.")
-    crit.list <- list(deviance = edeviance[1], alldeviance = edeviance[-1])
+    crit.list <- list(   deviance = edeviance[ 1],
+                      alldeviance = edeviance[-1])
     if (is.character(y.names) &&
         length(y.names) == length(crit.list$alldeviance))
-            names(crit.list$alldeviance) = y.names
+      names(crit.list$alldeviance) <- y.names
     structure(c(fit, list(predictors = matrix(eta, n, M),
         contrasts = attr(x, "contrasts"),
         control = control,
@@ -551,7 +580,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
         iter = iter,
         misc = misc,
         post = post,
-        rss = 000,
+        res.ss = 000,
         x = x,
         y = y)),
         vclass = family at vfamily)
@@ -562,166 +591,168 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
 .Init.Poisson.QO <-
   function(ymat, X1, X2, Rank = 1, epsilon = 1/32,
            max.ncol.etamat = 10,
-           trace = FALSE, Crow1positive = rep(TRUE, len = Rank),
-           isdlv = rep(1, lengt = Rank),
+           trace = FALSE,
+           Crow1positive = rep(TRUE, len = Rank),
+           isd.latvar = rep(1, length = Rank),
            constwt = FALSE, takelog = TRUE) {
 
-    print.CQO.expression = expression({
-        if (trace && length(X2)) {
-            cat("\nUsing initial values\n")
-            dimnames(ans) <- list(dimnames(X2)[[2]],
-                            if (Rank == 1) "lv" else
-                            paste("lv", 1:Rank, sep = ""))
-            if (p2 > 5) print(ans, dig = 3) else  print(t(ans), dig = 3)
-        }
-        flush.console()
-    })
-
-    sd.scale.X2.expression <- expression({
-        if (length(isdlv)) {
-            actualSD <- c( sqrt(diag(var(X2 %*% ans))) )
-            for(ii in 1:Rank)
-                ans[,ii] <- ans[,ii] * isdlv[ii] / actualSD[ii]
-        }
-    })
-
-    Crow1positive <- if (length(Crow1positive))
-        rep(Crow1positive, len = Rank) else
-        rep(TRUE, len = Rank)
-    if (epsilon <= 0) 
-        stop("epsilon > 0 is required")
-    ymat <- cbind(ymat) + epsilon  # ymat == 0 cause problems
-    NOS <- ncol(ymat)
-    p2 <- ncol(X2)
-    if (NOS < 2*Rank) {
-      ans <- crow1C(matrix(rnorm(p2 * Rank, sd = 0.02), p2, Rank),
-                    Crow1positive)
-      eval(sd.scale.X2.expression)
-      if (NOS == 1) {
-        eval(print.CQO.expression) 
-        return(ans)
-      } else {
-        ans.save <- ans;   # ans.save contains scaled guesses
-      }
+  print.CQO.expression <- expression({
+    if (trace && length(X2)) {
+      cat("\nUsing initial values\n")
+      dimnames(ans) <- list(dimnames(X2)[[2]],
+                       if (Rank == 1) "latvar" else
+                       paste("latvar", 1:Rank, sep = ""))
+      print(if (p2 > 5) ans else t(ans), dig = 3)
     }
+    flush.console()
+  })
 
-    calS <- 1:NOS  # Set of all species available for the approximation
-    effrank <- min(Rank, floor(NOS/2))  # effective rank
-    ncol.etamat <- min(if (length(X2)) floor(NOS/2) else effrank,
-                      max.ncol.etamat)
-    etamat <-
-    wts <- matrix(0, nrow = nrow(ymat), ncol = ncol.etamat) # has >=1 coln
-    rr <- 1
-    for(ii in 1:floor(NOS/2)) {
-        if (length(calS) < 2) break
-        index <- sample(calS, size = 2)   # Randomness here
-        etamat[,rr] <- etamat[,rr] + (if (takelog)
-                      log(ymat[,index[1]] / ymat[,index[2]]) else
-                          ymat[,index[1]] - ymat[,index[2]])
-        wts[,rr] <- wts[,rr] +
-                   (if (constwt) 1 else ymat[,index[1]] + ymat[,index[2]])
-        calS <- setdiff(calS, index)
-        rr <- (rr %% ncol.etamat) + 1
+  sd.scale.X2.expression <- expression({
+    if (length(isd.latvar)) {
+      actualSD <- c( sqrt(diag(var(X2 %*% ans))) )
+      for (ii in 1:Rank)
+        ans[,ii] <- ans[,ii] * isd.latvar[ii] / actualSD[ii]
     }
-    if (trace)
-        cat("\nObtaining initial values\n")
-
-    if (length(X2)) {
-        alt <- valt(x = cbind(X1, X2), z = etamat,
-                    U = sqrt(t(wts)), Rank = effrank,
-                    Blist = NULL, Cinit = NULL, trace = FALSE,
-                    colx1.index = 1:ncol(X1), Criterion = "rss")
-        temp.control <- list(Rank = effrank, colx1.index = 1:ncol(X1),
-                             Alpha = 0.5,
-                           colx2.index = (ncol(X1)+1):(ncol(X1) + ncol(X2)),
-                             Corner = FALSE, Svd.arg = TRUE,
-                             Uncorrelated.lv = TRUE, Quadratic = FALSE)
-        
-        ans2 <- if (Rank > 1)
-               rrr.normalize(rrcontrol = temp.control, A = alt$A, 
-                             C = alt$C, x = cbind(X1, X2)) else alt
-        ans <- crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
-
-        Rank.save <- Rank
-        Rank <- effrank
-        eval(sd.scale.X2.expression)
-        Rank <- Rank.save 
-
-        if (effrank < Rank) {
-            ans <- cbind(ans, ans.save[,-(1:effrank)]) # ans is better
-        }
-        eval(print.CQO.expression)
+  })
+
+  Crow1positive <- if (length(Crow1positive))
+                   rep(Crow1positive, len = Rank) else
+                   rep(TRUE, len = Rank)
+  if (epsilon <= 0) 
+    stop("epsilon > 0 is required")
+  ymat <- cbind(ymat) + epsilon  # ymat == 0 cause problems
+  NOS <- ncol(ymat)
+  p2 <- ncol(X2)
+  if (NOS < 2*Rank) {
+    ans <- crow1C(matrix(rnorm(p2 * Rank, sd = 0.02), p2, Rank),
+                  Crow1positive)
+    eval(sd.scale.X2.expression)
+    if (NOS == 1) {
+      eval(print.CQO.expression) 
+      return(ans)
     } else {
-        xij <- NULL # temporary measure
-        U <- t(sqrt(wts))
-        tmp <- vlm.wfit(xmat = X1, zmat = etamat, Blist = NULL, U = U,
-                       matrix.out = TRUE,
-                       is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = xij)
-        ans <- crow1C(as.matrix(tmp$resid),
-                     rep(Crow1positive, length.out = effrank))
-        if (effrank < Rank) {
-            ans <- cbind(ans, ans.save[,-(1:effrank)]) # ans is better
-        }
+      ans.save <- ans;   # ans.save contains scaled guesses
+    }
+  }
 
-        if (Rank > 1) {
-            evnu <- eigen(var(ans))
-            ans <- ans %*% evnu$vector
-        }
+  calS <- 1:NOS  # Set of all species available for the approximation
+  effrank <- min(Rank, floor(NOS/2))  # Effective rank
+  ncol.etamat <- min(if (length(X2)) floor(NOS/2) else effrank,
+                     max.ncol.etamat)
+  etamat <-
+  wts <- matrix(0, nrow = nrow(ymat), ncol = ncol.etamat)  # has >=1 coln
+  rr <- 1
+  for (ii in 1:floor(NOS/2)) {
+    if (length(calS) < 2) break
+    index <- sample(calS, size = 2)  # Randomness here
+    etamat[, rr] <- etamat[, rr] + (if (takelog)
+                    log(ymat[, index[1]] / ymat[, index[2]]) else
+                        ymat[, index[1]] - ymat[, index[2]])
+    wts[, rr] <- wts[, rr] +
+                 (if (constwt) 1 else ymat[, index[1]] + ymat[, index[2]])
+    calS <- setdiff(calS, index)
+    rr <- (rr %% ncol.etamat) + 1
+  }
+  if (trace)
+    cat("\nObtaining initial values\n")
+
+  if (length(X2)) {
+    alt <- valt(x = cbind(X1, X2), z = etamat,
+                U = sqrt(t(wts)), Rank = effrank,
+                Blist = NULL, Cinit = NULL, trace = FALSE,
+                colx1.index = 1:ncol(X1), Criterion = "res.ss")
+    temp.control <- list(Rank = effrank, colx1.index = 1:ncol(X1),
+                         Alpha = 0.5,
+                         colx2.index = (ncol(X1)+1):(ncol(X1) + ncol(X2)),
+                         Corner = FALSE, Svd.arg = TRUE,
+                         Uncorrelated.latvar = TRUE, Quadratic = FALSE)
+    
+    ans2 <- if (Rank > 1)
+              rrr.normalize(rrcontrol = temp.control, A = alt$A, 
+                            C = alt$C, x = cbind(X1, X2)) else
+              alt
+    ans <- crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
+
+    Rank.save <- Rank
+    Rank <- effrank
+    eval(sd.scale.X2.expression)
+    Rank <- Rank.save 
+
+    if (effrank < Rank) {
+      ans <- cbind(ans, ans.save[,-(1:effrank)])  # ans is better
+    }
+    eval(print.CQO.expression)
+  } else {
+    xij <- NULL # temporary measure
+    U <- t(sqrt(wts))
+    tmp <- vlm.wfit(xmat = X1, zmat = etamat, Blist = NULL, U = U,
+                    matrix.out = TRUE,
+                    is.vlmX = FALSE, res.ss = TRUE, qr = FALSE, xij = xij)
+    ans <- crow1C(as.matrix(tmp$resid),
+                  rep(Crow1positive, length.out = effrank))
+    if (effrank < Rank) {
+      ans <- cbind(ans, ans.save[,-(1:effrank)])  # ans is better
+    }
 
-        if (length(isdlv)) {
-            actualSD <- apply(cbind(ans), 2, sd)
-            for(ii in 1:Rank)
-                ans[,ii] <- ans[,ii] * isdlv[ii] / actualSD[ii]
-        }
-        ans <- crow1C(ans, rep(Crow1positive, length.out = Rank))
-        dimnames(ans) <- list(dimnames(X1)[[1]],
-                       if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = ""))
-        if (trace) {
-          if (nrow(ans) > 10) print(t(ans), dig = 3) else
-                              print(ans, dig = 3)
-        }
+    if (Rank > 1) {
+      evnu <- eigen(var(ans))
+      ans <- ans %*% evnu$vector
+    }
+
+    if (length(isd.latvar)) {
+      actualSD <- apply(cbind(ans), 2, sd)
+      for (ii in 1:Rank)
+        ans[,ii] <- ans[,ii] * isd.latvar[ii] / actualSD[ii]
+    }
+    ans <- crow1C(ans, rep(Crow1positive, length.out = Rank))
+    dimnames(ans) <- list(dimnames(X1)[[1]],
+                          if (Rank == 1) "latvar" else
+                                         paste("latvar", 1:Rank,
+                                               sep = ""))
+    if (trace) {
+      print(if (nrow(ans) > 10) t(ans) else ans, dig = 3)
     }
-    ans
+  }
+  ans
 }
 
 
 
 cqo.init.derivative.expression <- expression({
-    which.optimizer <- if (is.R()) {
-        if (control$Quadratic && control$FastAlgorithm) {
-          "BFGS" 
-        } else {
-          if (iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
-        }
-    } else "Quasi-Newton" 
-    if (trace && control$OptimizeWrtC) {
-        cat("\nUsing", which.optimizer, "algorithm\n")
-        flush.console()
-    } 
+  which.optimizer <- if (control$Quadratic && control$FastAlgorithm) {
+    "BFGS" 
+  } else {
+    ifelse(iter <= rrcontrol$Switch.optimizer, "Nelder-Mead", "BFGS")
+  }
+
+
+  if (trace && control$OptimizeWrtC) {
+    cat("\nUsing", which.optimizer, "algorithm\n")
+    flush.console()
+  } 
 
 
  if (FALSE) {
     constraints <- replace.constraints(constraints, diag(M),
-                                      rrcontrol$colx2.index)
+                                       rrcontrol$colx2.index)
 
     nice31 <- (!control$EqualTol || control$ITolerances) &&
-             all(trivial.constraints(constraints) == 1)
-}
+               all(trivial.constraints(constraints) == 1)
+  }
 
-    NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
-    canfitok <- if (is.R()) 
-        (exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
-        get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
-    (exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
-    if (!canfitok)
-        stop("cannot fit this model using fast algorithm")
-
-    p2star <- if (nice31) 
-      ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
-      (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
-    p1star <- if (nice31) ifelse(modelno == 3 || modelno == 5,1+p1,p1) else
-             (ncol(X_vlm_save)-p2star)
-    X_vlm_1save <- if (p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
+  NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
+  canfitok <- (exists("CQO.FastAlgorithm", envir = VGAMenv) &&
+               get("CQO.FastAlgorithm", envir = VGAMenv))
+  if (!canfitok)
+    stop("cannot fit this model using fast algorithm")
+
+  p2star <- if (nice31) 
+    ifelse(control$IToleran, Rank, Rank + Rank*(Rank+1)/2) else
+    (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol, 1, NOS))
+
+  p1star <- if (nice31) ifelse(modelno %in% c(3, 5), 1+p1, p1) else
+            (ncol(X.vlm.save) - p2star)
+  X.vlm.1save <- if (p1star > 0) X.vlm.save[, -(1:p2star)] else NULL
 })
     
 
@@ -730,159 +761,157 @@ cqo.init.derivative.expression <- expression({
 cqo.derivative.expression <- expression({
 
 
-    if (iter == 1 || quasi.newton$convergence) {
-        quasi.newton <- optim(par = Cmat, fn = callcqoc,
-                gr = if (control$GradientFunction) calldcqo else NULL,
-                method = which.optimizer,
-                control = list(fnscale = 1,trace = as.integer(control$trace),
-                    parscale = rep(control$Parscale, len = length(Cmat)),
-                    maxit = control$Maxit.optim),
-                etamat = eta, xmat = x, ymat = y, wvec = w,
-                X_vlm_1save  =  X_vlm_1save,
-                modelno = modelno, Control = control,
-                n = n, M = M, p1star = p1star,
-                p2star = p2star, nice31 = nice31)
-
-        z <- matrix(getfromVGAMenv("z", prefix = ".VGAM.CQO."), n, M)
-        U <- matrix(getfromVGAMenv("U", prefix = ".VGAM.CQO."), M, n)
-    }
+  if (iter == 1 || quasi.newton$convergence) {
+    quasi.newton <- optim(par = Cmat, fn = callcqoc,
+      gr = if (control$GradientFunction) calldcqo else NULL,
+      method = which.optimizer,
+      control = list(fnscale = 1, trace = as.integer(control$trace),
+                     parscale = rep(control$Parscale, len = length(Cmat)),
+                     maxit = control$Maxit.optim),
+      etamat = eta, xmat = x, ymat = y, wvec = w,
+      X.vlm.1save  =  X.vlm.1save,
+      modelno = modelno, Control = control,
+      n = n, M = M, p1star = p1star,
+      p2star = p2star, nice31 = nice31)
+
+    z <- matrix(getfromVGAMenv("z", prefix = ".VGAM.CQO."), n, M)
+    U <- matrix(getfromVGAMenv("U", prefix = ".VGAM.CQO."), M, n)
+  }
 
 
-    ocmatrix <- getfromVGAMenv("ocmatrix", prefix = ".VGAM.CQO.")
-    maxdiff <- max(abs(c(ocmatrix) - c(quasi.newton$par)) / (1 +
-              abs(c(ocmatrix))))
-    if (maxdiff < 1.0e-4) {
-        Cmat <- getfromVGAMenv("cmatrix", prefix = ".VGAM.CQO.")
-    } else {
-        warning("solution does not correspond to .VGAM.CQO.cmatrix")
-    }
+  ocmatrix <- getfromVGAMenv("ocmatrix", prefix = ".VGAM.CQO.")
+  maxdiff <- max(abs(c(ocmatrix) - c(quasi.newton$par)) / (1 +
+                 abs(c(ocmatrix))))
+  if (maxdiff < 1.0e-4) {
+    Cmat <- getfromVGAMenv("cmatrix", prefix = ".VGAM.CQO.")
+  } else {
+    warning("solution does not correspond to .VGAM.CQO.cmatrix")
+  }
 
-    alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
-                      C = Cmat, nice31 = nice31,
-                     control = rrcontrol, lp.names = predictors.names,
-                     MSratio = M/NOS)
-
-    if (length(alt$offset))
-        offset <- alt$offset
-
-    B1.save <- alt$B1 # Put later into extra  
-    tmp.fitted <- alt$fitted  # contains \bI_{Rank} \bnu if Corner
-
-    if (trace && control$OptimizeWrtC) {
-       cat("\n")
-       cat(which.optimizer, "using", if (is.R()) "optim():" else
-           "nlminb():", "\n")
-       cat("Objective =", quasi.newton$value, "\n")
-       cat("Parameters (= c(C)) = ", if (length(quasi.newton$par) < 5) ""
-           else "\n")
-       cat( if (is.R()) alt$Cmat else format(alt$Cmat), fill=TRUE)
-       cat("\n")
-       if (!is.R()) {
-           cat("Gradient norm =", format(quasi.newton$grad.norm), "\n")
-           cat("Number of gradient evaluations =", quasi.newton$g.evals,
-               "\n")
-       }
-       cat("Number of function evaluations =", if (is.R()) 
-           quasi.newton$count[1] else quasi.newton$f.evals, "\n")
-       if (length(quasi.newton$message))
-           cat("Message =", quasi.newton$message, "\n")
-       cat("\n")
-       flush.console()
-    }
+  alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
+                    C = Cmat, nice31 = nice31,
+                    control = rrcontrol, lp.names = predictors.names,
+                    MSratio = M / NOS)
+
+  if (length(alt$offset))
+    offset <- alt$offset
+
+  B1.save <- alt$B1  # Put later into extra  
+  tmp.fitted <- alt$fitted  # contains \bI_{Rank} \bnu if Corner
+
+  if (trace && control$OptimizeWrtC) {
+    cat("\n")
+    cat(which.optimizer, "using optim():", "\n")
+    cat("Objective =", quasi.newton$value, "\n")
+    cat("Parameters (= c(C)) = ",
+        if (length(quasi.newton$par) < 5) "" else "\n")
+    cat(alt$Cmat, fill = TRUE)
+    cat("\n")
+    cat("Number of function evaluations =", quasi.newton$count[1], "\n")
+    if (length(quasi.newton$message))
+      cat("Message =", quasi.newton$message, "\n")
+    cat("\n")
+    flush.console()
+  }
 
-    Amat <- alt$Amat  # 
-    Cmat <- alt$Cmat  # 
-    Dmat <- alt$Dmat  # 
+  Amat <- alt$Amat  # 
+  Cmat <- alt$Cmat  # 
+  Dmat <- alt$Dmat  # 
 
-    eval(cqo.end.expression) #
+  eval(cqo.end.expression)  #
 })
 
 
 
-cqo.end.expression <- expression({
 
-    rmfromVGAMenv(c("etamat"), prefix = ".VGAM.CQO.")
+cqo.end.expression <- expression({
 
+  rmfromVGAMenv(c("etamat"), prefix = ".VGAM.CQO.")
 
-    if (control$Quadratic) {
-        if (!length(extra)) extra =list()
-        extra$Amat <- Amat      # Not the latest iteration ??
-        extra$Cmat <- Cmat      # Saves the latest iteration 
-        extra$Dmat <- Dmat      # Not the latest iteration
-        extra$B1   <- B1.save   # Not the latest iteration (not good)
-    } else {
-        Blist <- replace.constraints(Blist.save, Amat, colx2.index)
-    }
 
+  if (control$Quadratic) {
+    if (!length(extra))
+      extra <- list()
+    extra$Amat <- Amat  # Not the latest iteration ??
+    extra$Cmat <- Cmat  # Saves the latest iteration 
+    extra$Dmat <- Dmat     # Not the latest iteration
+    extra$B1   <- B1.save  # Not the latest iteration (not good)
+  } else {
+    Blist <- replace.constraints(Blist.save, Amat, colx2.index)
+  }
 
-    fv <- tmp.fitted            # Contains \bI \bnu
-    eta <- fv + offset
-    mu <- family at linkinv(eta, extra)
 
-    if (any(is.na(mu)))
-        warning("there are NAs in mu") 
+  fv <- tmp.fitted  # Contains \bI \bnu
+  eta <- fv + offset
+  mu <- family at linkinv(eta, extra)
 
-    deriv.mu <- eval(family at deriv)
-    wz <- eval(family at weight)
-    if (control$checkwz)
-        wz <- checkwz(wz, M = M, trace = trace, wzeps = control$wzepsilon)
-    U <- vchol(wz, M = M, n = n, silent = !trace)
-    tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
-    z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset # Contains \bI \bnu
+  if (any(is.na(mu)))
+    warning("there are NAs in mu") 
 
+  deriv.mu <- eval(family at deriv)
+  wz <- eval(family at weight)
+  if (control$checkwz)
+    wz <- checkwz(wz, M = M, trace = trace, wzeps = control$wzepsilon)
+  U <- vchol(wz, M = M, n = n, silent = !trace)
+  tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+  z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset  # Contains \bI \bnu
 
 
 
 })
 
+
+
 crow1C <- function(cmat,
-                  crow1positive = rep(TRUE, length.out = ncol(cmat)),
-                  amat = NULL) {
+                   crow1positive = rep(TRUE, length.out = ncol(cmat)),
+                   amat = NULL) {
   if (!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
     stop("bad input in crow1C")
 
-  for(LV in 1:ncol(cmat))
-    if (( crow1positive[LV] && cmat[1,LV] < 0) ||
-       (!crow1positive[LV] && cmat[1,LV] > 0)) {
-          cmat[,LV] <- -cmat[,LV]
-          if (length(amat)) amat[,LV] <- -amat[,LV]
+  for (LV in 1:ncol(cmat))
+    if (( crow1positive[LV] && cmat[1, LV] < 0) ||
+        (!crow1positive[LV] && cmat[1, LV] > 0)) {
+      cmat[, LV] <- -cmat[, LV]
+      if (length(amat))
+        amat[, LV] <- -amat[, LV]
     }
-  if (length(amat)) list(cmat = cmat, amat = amat) else cmat
+  if (length(amat))
+    list(cmat = cmat, amat = amat) else
+    cmat
 }
 
 
 
 
+printqrrvglm <- function(x, ...) {
+  if (!is.null(cl <- x at call)) {
+    cat("Call:\n")
+    dput(cl)
+  }
 
-printqrrvglm <- function(x, ...)
-{
-    if (!is.null(cl <- x at call)) {
-            cat("Call:\n")
-            dput(cl)
-    }
 
-    if (FALSE) {
-    }
+  if (FALSE) {
+  }
 
-    if (FALSE) {
-        nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
-        rdf <- x at df.residual
-        if (!length(rdf))
-            rdf <- nobs - Rank
-    }
-    cat("\n")
+  if (FALSE) {
+    nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
+    rdf <- x at df.residual
+    if (!length(rdf))
+      rdf <- nobs - Rank
+  }
+  cat("\n")
 
-    if (length(deviance(x)))
-        cat("Residual deviance:", format(deviance(x)), "\n")
+  if (length(deviance(x)))
+    cat("Residual deviance:", format(deviance(x)), "\n")
 
-    if (FALSE && length(x at criterion)) {
-      ncrit <- names(x at criterion)
-      for(ii in ncrit)
-        if (ii != "loglikelihood" && ii != "deviance")
-          cat(paste(ii, ":", sep=""), format(x at criterion[[ii]]), "\n")
-    }
+  if (FALSE && length(x at criterion)) {
+    ncrit <- names(x at criterion)
+    for (ii in ncrit)
+      if (ii != "loglikelihood" && ii != "deviance")
+        cat(paste(ii, ":", sep = ""), format(x at criterion[[ii]]), "\n")
+  }
 
-    invisible(x)
+  invisible(x)
 }
 
 
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index c68f626..08972de 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -8,17 +8,17 @@
 
 
 deviance.vlm <- function(object, ...)
-    object at criterion$deviance
+  object at criterion$deviance
 
 
 deviance.vglm <- function(object, ...)
-    object at criterion$deviance
+  object at criterion$deviance
 
 
 
 if(!isGeneric("deviance"))
-    setGeneric("deviance", function(object, ...)
-    standardGeneric("deviance"))
+  setGeneric("deviance", function(object, ...)
+  standardGeneric("deviance"))
 
 
 setMethod("deviance", "vlm", function(object, ...)
@@ -63,13 +63,13 @@ nvar_vlm <- function(object, ...) {
 
   NumPars <- rep(0, length = M)
   for (jay in 1:M) {
-    X_lm_jay <- model.matrix(object, type = "lm", lapred.index = jay)
-    NumPars[jay] <- ncol(X_lm_jay)
+    X.lm.jay <- model.matrix(object, type = "lm", linpred.index = jay)
+    NumPars[jay] <- ncol(X.lm.jay)
   }
   if (length(object at misc$predictors.names) == M)
     names(NumPars) <- object at misc$predictors.names
   if (!all(NumPars == numPars)) {
-    print(NumPars - numPars) # Should be all 0s
+    print(NumPars - numPars)  # Should be all 0s
     stop("something wrong in nvar_vlm()")
   }
 
@@ -91,27 +91,27 @@ if (FALSE) {
 
 
 set.seed(123)
-zapdat = data.frame(x2 = runif(nn <- 2000))
-zapdat = transform(zapdat, p0     = logit(-0.5 + 1*x2, inverse = TRUE),
+zapdat <- data.frame(x2 = runif(nn <- 2000))
+zapdat <- transform(zapdat, p0     = logit(-0.5 + 1*x2, inverse = TRUE),
                            lambda =  loge( 0.5 + 2*x2, inverse = TRUE),
                            f1     =  gl(4, 50, labels = LETTERS[1:4]),
                            x3     =  runif(nn))
-zapdat = transform(zapdat, y = rzapois(nn, lambda, p0))
+zapdat <- transform(zapdat, y = rzapois(nn, lambda, p0))
 with(zapdat, table(y))
 
 
-fit1 = vglm(y ~ x2, zapoisson, zapdat, trace = TRUE)
-fit1 = vglm(y ~ bs(x2), zapoisson, zapdat, trace = TRUE)
+fit1 <- vglm(y ~ x2, zapoisson, zapdat, trace = TRUE)
+fit1 <- vglm(y ~ bs(x2), zapoisson, zapdat, trace = TRUE)
 coef(fit1, matrix = TRUE)  # These should agree with the above values
 
 
-fit2 = vglm(y ~ bs(x2) + x3, zapoisson(zero = 2), zapdat, trace = TRUE)
+fit2 <- vglm(y ~ bs(x2) + x3, zapoisson(zero = 2), zapdat, trace = TRUE)
 coef(fit2, matrix = TRUE)
 
 
-clist = list("(Intercept)" = diag(2), "x2" = rbind(0,1),
+clist <- list("(Intercept)" = diag(2), "x2" = rbind(0,1),
              "x3" = rbind(1,0))
-fit3 = vglm(y ~ x2 + x3, zapoisson(zero = NULL), zapdat,
+fit3 <- vglm(y ~ x2 + x3, zapoisson(zero = NULL), zapdat,
             constraints = clist, trace = TRUE)
 coef(fit3, matrix = TRUE)
 
@@ -124,14 +124,14 @@ head(model.matrix(fit2, type = "lm"))
 
 
 
-allH = matrix(unlist(constraints(fit1)), nrow = fit1 at misc$M)
-allH = matrix(unlist(constraints(fit2)), nrow = fit2 at misc$M)
-allH = matrix(unlist(constraints(fit3)), nrow = fit3 at misc$M)
+allH <- matrix(unlist(constraints(fit1)), nrow = fit1 at misc$M)
+allH <- matrix(unlist(constraints(fit2)), nrow = fit2 at misc$M)
+allH <- matrix(unlist(constraints(fit3)), nrow = fit3 at misc$M)
 
 
-checkNonZero = function(m) sum(as.logical(m))
+checkNonZero <- function(m) sum(as.logical(m))
 
-(numPars = apply(allH, 1, checkNonZero))
+(numPars <- apply(allH, 1, checkNonZero))
 
 
 nvar_vlm(fit1)
diff --git a/R/family.actuary.R b/R/family.actuary.R
index 00bf114..434aae7 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -98,7 +98,7 @@ rgumbelII <- function(n, shape, scale = 1) {
   function(lshape = "loge", lscale = "loge",
            ishape = NULL,   iscale = NULL,
            probs.y = c(0.2, 0.5, 0.8),
-           perc.out = NULL, # 50,
+           perc.out = NULL,  # 50,
            imethod = 1, zero = -2)
 {
 
@@ -115,7 +115,7 @@ rgumbelII <- function(n, shape, scale = 1) {
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -265,7 +265,7 @@ rgumbelII <- function(n, shape, scale = 1) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eshape
       misc$earg[[Musual*ii  ]] <- .escale
     }
@@ -635,7 +635,7 @@ perks.control <- function(save.weight = TRUE, ...) {
   lscale <- attr(escale, "function.name")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
     stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -774,7 +774,7 @@ perks.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eshape
       misc$earg[[Musual*ii  ]] <- .escale
     }
@@ -831,12 +831,12 @@ perks.control <- function(save.weight = TRUE, ...) {
     NOS <- M / Musual
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
 
-    wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
 
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       run.varcov <- 0
       Shape <- shape[, spp.]
       Scale <- scale[, spp.]
@@ -862,7 +862,7 @@ perks.control <- function(save.weight = TRUE, ...) {
       run.varcov <- cbind(ave.oim11, ave.oim22, ave.oim12)
     } else {
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rperks(n = n, shape = Shape, scale = Scale)
 if (ii < 3) {
 }
@@ -898,14 +898,14 @@ if (ii < 3) {
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-        for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
                       (spp. - 1) * Musual + kay,
                       M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
         }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
 
 
@@ -1009,7 +1009,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
 
  makeham <-
   function(lshape = "loge", lscale = "loge", lepsilon = "loge",
-           ishape = NULL,   iscale = NULL,   iepsilon = NULL, # 0.3,
+           ishape = NULL,   iscale = NULL,   iepsilon = NULL,  # 0.3,
            gshape = exp(-5:5),
            gscale = exp(-5:5),
            gepsilon = exp(-4:1),
@@ -1037,7 +1037,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
   eepsil <- link2list(lepsil)
   lepsil <- attr(eepsil, "function.name")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
     stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -1224,7 +1224,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-2]] <- .eshape
       misc$earg[[Musual*ii-1]] <- .escale
       misc$earg[[Musual*ii  ]] <- .eepsil
@@ -1293,12 +1293,12 @@ makeham.control <- function(save.weight = TRUE, ...) {
     NOS <- M / Musual
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
 
-    wz <- matrix(0.0, n, M + M - 1 + M - 2) # wz has half-bw 3
+    wz <- matrix(0.0, n, M + M - 1 + M - 2)  # wz has half-bw 3
 
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       run.varcov <- 0
       Shape <- shape[, spp.]
       Scale <- scale[, spp.]
@@ -1324,7 +1324,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
       run.varcov <- cbind(ave.oim11, ave.oim22, ave.oim12)
     } else {
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rmakeham(n = n, shape = Shape, scale = Scale,
                          epsil = Epsil)
 if (ii < 3) {
@@ -1375,14 +1375,14 @@ if (ii < 3) {
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-        for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
                       (spp. - 1) * Musual + kay,
                       M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
         }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
 
 
@@ -1497,7 +1497,7 @@ gompertz.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
     stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -1632,7 +1632,7 @@ gompertz.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eshape
       misc$earg[[Musual*ii  ]] <- .escale
     }
@@ -1686,17 +1686,17 @@ gompertz.control <- function(save.weight = TRUE, ...) {
     NOS <- M / Musual
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
 
-    wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
 
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       run.varcov <- 0
       Shape <- shape[, spp.]
       Scale <- scale[, spp.]
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rgompertz(n = n, shape = Shape, scale = Scale)
 if (ii < 3) {
 }
@@ -1724,14 +1724,14 @@ if (ii < 3) {
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-        for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
                       (spp. - 1) * Musual + kay,
                       M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
         }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
 
 
@@ -1827,7 +1827,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
     stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -1842,7 +1842,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
       stop("argument 'ilambda' values must be positive")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1961,7 +1961,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .ealpha0
       misc$earg[[Musual*ii  ]] <- .elambda
     }
@@ -2015,17 +2015,17 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
     NOS <- M / Musual
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
 
-    wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
 
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       run.varcov <- 0
       Alph <- alpha0[, spp.]
       Lamb <- lambda[, spp.]
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rmoe(n = n, alpha = Alph, lambda = Lamb)
 if (ii < 3) {
 }
@@ -2052,14 +2052,14 @@ if (ii < 3) {
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-        for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
                       (spp. - 1) * Musual + kay,
                       M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
         }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
 
 
@@ -2270,7 +2270,7 @@ if (ii < 3) {
     ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
     ned2l.dpq <- -temp5
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # M==4 means 10=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # M==4 means 10=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(3, 3, M)] <- ned2l.dp * dp.deta^2
@@ -2502,7 +2502,7 @@ dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
   shape3.q <- rep(shape3.q,  length.out = LLL)
 
   Loglik <- rep(log(0), length.out = LLL)
-  xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
+  xok <- (x > 0) & !is.na(x)  # Avoids log(x) if x<0, and handles NAs
   Loglik[xok] <- log(shape1.a[xok]) + log(shape3.q[xok]) +
                  (shape1.a[xok]-1) * log(x[xok]) -
                 shape1.a[xok] * log(scale[xok]) -
@@ -2544,7 +2544,7 @@ ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
   shape2.p <- rep(shape2.p, length.out = LLL)
 
   Loglik <- rep(log(0), length.out = LLL)
-  xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
+  xok <- (x > 0) & !is.na(x)  # Avoids log(x) if x<0, and handles NAs
   Loglik[xok] <- log(shape1.a[xok]) +
                  log(shape2.p[xok]) +
                  (shape1.a[xok] * shape2.p[xok]-1) * log(    x[xok]) -
@@ -2627,7 +2627,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     parg <- 1
 
     if (!length( .ishape1.a) || !length( .iscale )) {
-        qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+        qvec <- c( .25, .5, .75)  # Arbitrary; could be made an argument
         ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
         xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
         fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -2745,7 +2745,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     ned2l.daq <- -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
     ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M==3 means 6=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(3, 3, M)] <- ned2l.dq * dq.deta^2
@@ -2811,7 +2811,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
         namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
 
     if (!length( .ishape1.a) || !length( .iscale )) {
-        qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+        qvec <- c( .25, .5, .75)  # Arbitrary; could be made an argument
         ishape2.p <- if (length( .ishape2.p)) .ishape2.p else 1
         xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
         fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -2927,7 +2927,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                    ) / (Scale * (1 + parg+qq))
     ned2l.dap= -(qq   * (temp3a -temp3b) -1) / (aa*(parg+qq))
     ned2l.dscalep <-  aa * qq   / (Scale * (parg + qq))
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M==3 means 6=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.da     * da.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(3, 3, M)] <- ned2l.dp     * dp.deta^2
@@ -2993,7 +2993,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
       namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
 
     if (!length( .iscale )) {
-      qvec <- c(0.25, .5, .75) # Arbitrary; could be made an argument
+      qvec <- c(0.25, .5, .75)  # Arbitrary; could be made an argument
       ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
       xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -3113,7 +3113,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
     ned2l.dpq <- -temp5
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M==3 means 6=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dp * dp.deta^2
     wz[, iam(3, 3, M)] <- ned2l.dq * dq.deta^2
@@ -3130,7 +3130,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
 
  lomax <- function(lscale = "loge",    lshape3.q = "loge",
-                   iscale = NULL,      ishape3.q = NULL, # 2.0, 
+                   iscale = NULL,      ishape3.q = NULL,  # 2.0, 
                                        gshape3.q = exp(-5:5),
                    zero = NULL) {
 
@@ -3181,7 +3181,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
                    
     lomax.Loglikfun <- function(shape3.q, y, x, w, extraargs) {
-      qvec <- c(0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+      qvec <- c(0.25, 0.5, 0.75)  # Arbitrary; could be made an argument
       xvec <- log( (1-qvec)^(-1/ shape3.q ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
       init.scale <- exp(fit0$coef[1])
@@ -3204,7 +3204,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
 
     if (!length( .iscale )) {
-      qvec <- c(0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+      qvec <- c(0.25, 0.5, 0.75)  # Arbitrary; could be made an argument
       ishape3.q <- if (length( .ishape3.q )) .ishape3.q else Init.shape3.q
       xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -3350,7 +3350,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     qq <- parg <- 1
 
     if (!length( .iscale )) {
-      qvec <- c( 0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+      qvec <- c( 0.25, 0.5, 0.75)  # Arbitrary; could be made an argument
       xvec <- log( 1 / qvec - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
     }
@@ -3443,7 +3443,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     ned2l.dascale <- (parg - qq - parg*qq*(temp3a - temp3b)) / (
                       scale * (1 + parg + qq))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M == 2 means 3=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
@@ -3500,7 +3500,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     qq <- aa <- 1
 
     if (!length( .iscale )) {
-      qvec <- c(0.25, .5, .75) # Arbitrary; could be made an argument
+      qvec <- c(0.25, .5, .75)  # Arbitrary; could be made an argument
       ishape2.p <- if (length( .ishape2.p)) .ishape2.p else 1
       xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -3578,7 +3578,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     ned2l.dp <- 1 / parg^2 
     ned2l.dscalep <-  aa * qq   / (scale * (parg + qq))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M == 2 means 3=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dp * dp.deta^2
     wz[, iam(1, 2, M)] <- ned2l.dscalep * dscale.deta * dp.deta
@@ -3635,7 +3635,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     parg <- 1
 
     if (!length( .ishape1.a) || !length( .iscale )) {
-      qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+      qvec <- c( .25, .5, .75)  # Arbitrary; could be made an argument
       ishape1.a <- if (length( .ishape1.a)) .ishape1.a else 1
       xvec <- log( (1-qvec)^(-1/ ishape1.a ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -3736,7 +3736,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     ned2l.dascale <- (parg - qq - parg*qq*(temp3a -temp3b)) /(
                       scale*(1 + parg+qq))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #M == 2 means 3=dimm(M)
     wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
     wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
@@ -3786,7 +3786,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
         namesof("scale",    .lscale ,    earg = .escale ,    tag = FALSE))
 
     if (!length( .ishape1.a) || !length( .iscale )) {
-      qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+      qvec <- c( .25, .5, .75)  # Arbitrary; could be made an argument
       ishape2.p <- if (length( .ishape1.a )) .ishape1.a else 1
       xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -3910,11 +3910,11 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
  if (FALSE)
  genlognormal <- function(link.sigma = "loge", link.r = "loge",
                           init.sigma = 1, init.r = 1, zero = NULL) {
-warning("2/4/04; doesn't work, possibly because first derivs are ",
+warning("20040402; does not work, possibly because first derivs are ",
         "not continuous (sign() is used). Certainly, the derivs wrt ",
         "mymu are problematic (run with maxit=4:9 and look at weight ",
         "matrices). Possibly fundamentally cannot be estimated by IRLS. ",
-        "Pooling doesn't seem to help")
+        "Pooling does not seem to help")
 
 
 
@@ -4016,7 +4016,7 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
                  dl.dr * dr.deta)
   }), list( .link.sigma = link.sigma, .link.r = link.r ))),
   weight = expression({
-    wz <- matrix(0, n, 6) # 5 will have small savings of 1 column
+    wz <- matrix(0, n, 6)  # 5 will have small savings of 1 column
 
     B <- log(r) + digamma(ss)
     ned2l.dmymu2 <- (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 06e3b35..49663b2 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -20,8 +20,9 @@ dkumar <- function(x, shape1, shape2, log = FALSE) {
 
 
   N <- max(length(x), length(shape1), length(shape2))
-  x <- rep(x, len = N); shape1 <- rep(shape1, len = N);
-  shape2 <- rep(shape2, len = N)
+  if (length(x)      != N) x      <- rep(x,      len = N)
+  if (length(shape1) != N) shape1 <- rep(shape1, len = N)
+  if (length(shape2) != N) shape2 <- rep(shape2, len = N)
 
   logdensity <- rep(log(0), len = N)
   xok <- (0 <= x & x <= 1)
@@ -84,14 +85,14 @@ pkumar <- function(q, shape1, shape2) {
 
 
   if (length(ishape1) &&
-     (!is.Numeric(ishape1, allowable.length = 1, positive = TRUE)))
+     (!is.Numeric(ishape1, length.arg = 1, positive = TRUE)))
     stop("bad input for argument 'ishape1'")
   if (length(ishape2) && !is.Numeric(ishape2))
     stop("bad input for argument 'ishape2'")
 
-  if (!is.Numeric(tol12, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(tol12, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tol12'")
-  if (!is.Numeric(grid.shape1, allowable.length = 2, positive = TRUE))
+  if (!is.Numeric(grid.shape1, length.arg = 2, positive = TRUE))
     stop("bad input for argument 'grid.shape1'")
 
 
@@ -165,7 +166,7 @@ pkumar <- function(q, shape1, shape2) {
 
 
 
-           mediany <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
+           mediany <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
 
           shape2 <- log(0.5) / log1p(-(mediany^shape1))
           sum(c(w) * dkumar(x = y, shape1 = shape1, shape2 = shape2,
@@ -181,7 +182,7 @@ pkumar <- function(q, shape1, shape2) {
 
 
 
-       mediany <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
+       mediany <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
 
 
       shape2.init <- if (length( .ishape2 )) .ishape2 else
@@ -213,7 +214,7 @@ pkumar <- function(q, shape1, shape2) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eshape1
       misc$earg[[Musual*ii  ]] <- .eshape2
     }
@@ -297,8 +298,9 @@ drice <- function(x, vee, sigma, log = FALSE) {
 
 
   N <- max(length(x), length(vee), length(sigma))
-  x <- rep(x, len = N); vee <- rep(vee, len = N);
-  sigma <- rep(sigma, len = N)
+  if (length(x)      != N) x      <- rep(x,      len = N)
+  if (length(vee)    != N) vee    <- rep(vee   , len = N)
+  if (length(sigma ) != N) sigma  <- rep(sigma , len = N)
 
   logdensity <- rep(log(0), len = N)
   xok <- (x > 0)
@@ -314,9 +316,9 @@ drice <- function(x, vee, sigma, log = FALSE) {
 
 
 rrice <- function(n, vee, sigma) {
-  if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
+  if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1))
     stop("bad input for argument 'n'")
-  theta <- 1 # any number
+  theta <- 1  # any number
   X <- rnorm(n, mean = vee * cos(theta), sd = sigma)
   Y <- rnorm(n, mean = vee * sin(theta), sd = sigma)
   sqrt(X^2 + Y^2)
@@ -348,7 +350,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
   if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
     stop("bad input for argument 'isigma'")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("'nsimEIM' should be an integer greater than 50")
@@ -364,7 +366,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
             "besselI(-z/2, nu = 0) - z * besselI(-z/2, nu = 1)) ",
             "where z=-vee^2/(2*sigma^2)"),
   constraints = eval(substitute(expression({
-    constraints = cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -381,31 +383,31 @@ riceff.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <-
-      c(namesof("vee",   .lvee, earg = .evee, tag = FALSE),
+      c(namesof("vee",   .lvee,   earg = .evee,   tag = FALSE),
         namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
 
 
 
     if (!length(etastart)) {
       riceff.Loglikfun <- function(vee, y, x, w, extraargs) {
-            sigma.init <- sd(rep(y, w))
-            sum(c(w) * (log(y) - 2*log(sigma.init) +
-                     log(besselI(y*vee/sigma.init^2, nu = 0)) -
-                     (y^2 + vee^2)/(2*sigma.init^2)))
-        }
-        vee.grid <-
-          seq(quantile(rep(y, w), probs = seq(0, 1, 0.2))["20%"],
-              quantile(rep(y, w), probs = seq(0, 1, 0.2))["80%"], len=11)
-        vee.init <- if (length( .ivee )) .ivee else
-          getMaxMin(vee.grid, objfun = riceff.Loglikfun,
-                    y = y,  x = x, w = w)
-        vee.init <- rep(vee.init, length = length(y))
-        sigma.init <- if (length( .isigma )) .isigma else
-            sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
-        sigma.init <- rep(sigma.init, length = length(y))
-        etastart <-
-          cbind(theta2eta(vee.init,   .lvee,   earg = .evee),
-                theta2eta(sigma.init, .lsigma, earg = .esigma))
+        sigma.init <- sd(rep(y, w))
+        sum(c(w) * (log(y) - 2*log(sigma.init) +
+                    log(besselI(y*vee/sigma.init^2, nu = 0)) -
+                   (y^2 + vee^2) / (2*sigma.init^2)))
+      }
+    vee.grid <-
+      seq(quantile(rep(y, w), probs = seq(0, 1, 0.2))["20%"],
+          quantile(rep(y, w), probs = seq(0, 1, 0.2))["80%"], len = 11)
+    vee.init <- if (length( .ivee )) .ivee else
+      getMaxMin(vee.grid, objfun = riceff.Loglikfun,
+                y = y,  x = x, w = w)
+      vee.init <- rep(vee.init, length = length(y))
+      sigma.init <- if (length( .isigma )) .isigma else
+          sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
+      sigma.init <- rep(sigma.init, length = length(y))
+      etastart <-
+        cbind(theta2eta(vee.init,   .lvee,   earg = .evee),
+              theta2eta(sigma.init, .lsigma, earg = .esigma))
     }
   }), list( .lvee = lvee, .lsigma = lsigma,
             .ivee = ivee, .isigma = isigma,
@@ -462,7 +464,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
             .evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
     run.var <- run.cov <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rrice(n, vee = vee, sigma = sigma)
       temp8 <- ysim * vee / sigma^2
       dl.dvee <- -vee/sigma^2 + (ysim/sigma^2) *
@@ -492,6 +494,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
 
 
 
+
 dskellam <- function(x, mu1, mu2, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -499,29 +502,37 @@ dskellam <- function(x, mu1, mu2, log = FALSE) {
 
 
   L <- max(length(x), length(mu1), length(mu2))
-  x <- rep(x, len = L);
-  mu1 <- rep(mu1, len = L);
-  mu2 <- rep(mu2, len = L);
-  ok2 <- is.finite(mu1) && is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
+  if (length(x)      != L) x      <- rep(x,      len = L)
+  if (length(mu1)    != L) mu1    <- rep(mu1,    len = L)
+  if (length(mu2)    != L) mu2    <- rep(mu2,    len = L)
+
+  ok2 <- is.finite(mu1) & is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
   ok3 <- (mu1 == 0) & (mu2 >  0)
   ok4 <- (mu1 >  0) & (mu2 == 0)
   ok5 <- (mu1 == 0) & (mu2 == 0)
     if (log.arg) {
-        ans <- -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
-               0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
-               log(besselI(2 * sqrt(mu1*mu2), nu = x,
-                           expon.scaled = TRUE))
-        ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
-        ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
-        ans[ok5] <- dpois(x =  x[ok5], lambda = 0.0,      log = TRUE)
-        ans[x != round(x)] = log(0.0)
+      ans <- -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
+             0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
+             log(besselI(2 * sqrt(mu1*mu2),
+
+                         nu = abs(x),
+
+                         expon.scaled = TRUE))
+      ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
+      ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
+      ans[ok5] <- dpois(x =  x[ok5], lambda = 0.0,      log = TRUE)
+      ans[x != round(x)] = log(0.0)
     } else {
-        ans <- (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
-               besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE)
-        ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3])
-        ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4])
-        ans[ok5] <- dpois(x =  x[ok5], lambda = 0.0)
-        ans[x != round(x)] <- 0.0
+      ans <- (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
+             besselI(2 * sqrt(mu1*mu2),
+
+                     nu = abs(x),
+
+                     expon.scaled = TRUE)
+      ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3])
+      ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4])
+      ans[ok5] <- dpois(x =  x[ok5], lambda = 0.0)
+      ans[x != round(x)] <- 0.0
     }
     ans[!ok2] <- NaN
     ans
@@ -533,7 +544,7 @@ dskellam <- function(x, mu1, mu2, log = FALSE) {
 
 
 rskellam <- function(n, mu1, mu2) {
-    rpois(n, mu1) - rpois(n, mu2)
+  rpois(n, mu1) - rpois(n, mu2)
 }
 
 
@@ -544,7 +555,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
 
 
  skellam <- function(lmu1 = "loge", lmu2 = "loge",
-                     imu1 = NULL, imu2 = NULL,
+                     imu1 = NULL,   imu2 = NULL,
                      nsimEIM = 100, parallel = FALSE, zero = NULL) {
 
   lmu1 <- as.list(substitute(lmu1))
@@ -565,7 +576,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("argument 'nsimEIM' should be an integer greater than 50")
@@ -578,9 +589,11 @@ skellam.control <- function(save.weight = TRUE, ...) {
          "Mean:     mu1-mu2", "\n",
          "Variance: mu1+mu2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = TRUE)
-    constraints = cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -605,98 +618,109 @@ skellam.control <- function(save.weight = TRUE, ...) {
       junk <- lm.wfit(x = x, y = c(y), w = c(w))
       var.y.est <- sum(c(w) * junk$resid^2) / junk$df.residual
       mean.init <- weighted.mean(y, w)
-        mu1.init <- max((var.y.est + mean.init) / 2, 0.01)
-        mu2.init <- max((var.y.est - mean.init) / 2, 0.01)
-        mu1.init <- rep(if(length( .imu1 )) .imu1 else mu1.init,
-                        length <- n)
-        mu2.init <- rep(if(length( .imu2 )) .imu2 else mu2.init,
-                        length = n)
-        etastart <- cbind(theta2eta(mu1.init, .lmu1, earg = .emu1),
-                          theta2eta(mu2.init, .lmu2, earg = .emu2))
+
+      mu1.init <- max((var.y.est + mean.init) / 2, 0.01)
+      mu2.init <- max((var.y.est - mean.init) / 2, 0.01)
+      mu1.init <- rep(if (length( .imu1 )) .imu1 else mu1.init,
+                      length = n)
+      mu2.init <- rep(if (length( .imu2 )) .imu2 else mu2.init,
+                      length = n)
+
+      etastart <- cbind(theta2eta(mu1.init, .lmu1, earg = .emu1 ),
+                        theta2eta(mu2.init, .lmu2, earg = .emu2 ))
       }
   }), list( .lmu1 = lmu1, .lmu2 = lmu2,
             .imu1 = imu1, .imu2 = imu2,
             .emu1 = emu1, .emu2 = emu2 ))),
   linkinv = eval(substitute(function(eta, extra = NULL){
-      mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
-      mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+      mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+      mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
       mu1 - mu2
   }, list( .lmu1 = lmu1, .lmu2 = lmu2,
            .emu1 = emu1, .emu2 = emu2 ))),
   last = eval(substitute(expression({
       misc$link <-    c("mu1" = .lmu1, "mu2" = .lmu2)
 
-      misc$earg <- list("mu1" = .emu1, "mu2" = .emu2)
+      misc$earg <- list("mu1" = .emu1, "mu2" = .emu2 )
 
       misc$expected <- TRUE
       misc$nsimEIM <- .nsimEIM
   }), list( .lmu1 = lmu1, .lmu2 = lmu2,
-            .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
+            .emu1 = emu1, .emu2 = emu2,
+            .nsimEIM = nsimEIM ))),
   loglikelihood = eval(substitute(
           function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
-      mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
-      mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+      mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+      mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
         if (residuals)
           stop("loglikelihood residuals not implemented yet") else {
 
 
 
 
-        if ( is.logical( .parallel ) && length( .parallel )== 1 &&
-            .parallel )
-            sum(c(w) * log(besselI(2*mu1, nu = y, expon = TRUE))) else
-            sum(c(w) * (-mu1 - mu2 +
-                    0.5 * y * log(mu1) -
-                    0.5 * y * log(mu2) +
-                    2 * sqrt(mu1*mu2) +  # Use this when expon = TRUE
-                    log(besselI(2 * sqrt(mu1*mu2), nu = y, expon = TRUE))))
+        if ( is.logical( .parallel ) &&
+             length( .parallel ) == 1 &&
+             .parallel )
+          sum(c(w) * log(besselI(2*mu1, nu = y, expon = TRUE))) else
+          sum(c(w) * (-mu1 - mu2 +
+                  0.5 * y * log(mu1) -
+                  0.5 * y * log(mu2) +
+                  2 * sqrt(mu1*mu2) +  # Use this when expon = TRUE
+                  log(besselI(2 * sqrt(mu1*mu2), nu = y, expon = TRUE))))
         }
   }, list( .lmu1 = lmu1, .lmu2 = lmu2,
-           .parallel = parallel,
-           .emu1 = emu1, .emu2 = emu2 ))),
+           .emu1 = emu1, .emu2 = emu2,
+           .parallel = parallel ))),
   vfamily = c("skellam"),
   deriv = eval(substitute(expression({
-      mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
-      mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
-      dmu1.deta <- dtheta.deta(mu1, link = .lmu1, earg = .emu1)
-      dmu2.deta <- dtheta.deta(mu2, link = .lmu2, earg = .emu2)
-      temp8 <- 2 * sqrt(mu1*mu2)
-      temp9 <-  besselI(temp8, nu = y  , expon = TRUE)
-      temp7 <- (besselI(temp8, nu = y-1, expon = TRUE) +
-                besselI(temp8, nu = y+1, expon = TRUE)) / 2
-      temp6 <- temp7 / temp9
-      dl.dmu1 <- -1 + 0.5 * y / mu1 + sqrt(mu2/mu1) * temp6
-      dl.dmu2 <- -1 - 0.5 * y / mu2 + sqrt(mu1/mu2) * temp6
-      c(w) * cbind(dl.dmu1 * dmu1.deta,
-                   dl.dmu2 * dmu2.deta)
+    mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+    mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
+
+    dmu1.deta <- dtheta.deta(mu1, link = .lmu1, earg = .emu1 )
+    dmu2.deta <- dtheta.deta(mu2, link = .lmu2, earg = .emu2 )
+
+    temp8 <- 2 * sqrt(mu1*mu2)
+    temp9 <-  besselI(temp8, nu = y  , expon = TRUE)
+    temp7 <- (besselI(temp8, nu = y-1, expon = TRUE) +
+              besselI(temp8, nu = y+1, expon = TRUE)) / 2
+    temp6 <- temp7 / temp9
+
+    dl.dmu1 <- -1 + 0.5 * y / mu1 + sqrt(mu2/mu1) * temp6
+    dl.dmu2 <- -1 - 0.5 * y / mu2 + sqrt(mu1/mu2) * temp6
+
+    c(w) * cbind(dl.dmu1 * dmu1.deta,
+                 dl.dmu2 * dmu2.deta)
   }), list( .lmu1 = lmu1, .lmu2 = lmu2,
-            .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
+            .emu1 = emu1, .emu2 = emu2,
+            .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
     run.var <- run.cov <- 0
-    for(ii in 1:( .nsimEIM )) {
-        ysim <- rskellam(n, mu1=mu1, mu2=mu2)
-        temp9 <-  besselI(temp8, nu = ysim,   expon = TRUE)
-        temp7 <- (besselI(temp8, nu = ysim-1, expon = TRUE) +
-                  besselI(temp8, nu = ysim+1, expon = TRUE)) / 2
-        temp6 <- temp7 / temp9
-        dl.dmu1 <- -1 + 0.5 * ysim/mu1 + sqrt(mu2/mu1) * temp6
-        dl.dmu2 <- -1 - 0.5 * ysim/mu2 + sqrt(mu1/mu2) * temp6
-        rm(ysim)
-        temp3 <- cbind(dl.dmu1, dl.dmu2)
-        run.var <- ((ii-1) * run.var + temp3^2) / ii
-        run.cov <- ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
-      }
-      wz <- if (intercept.only)
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rskellam(n, mu1=mu1, mu2=mu2)
+      temp9 <-  besselI(temp8, nu = ysim,   expon = TRUE)
+      temp7 <- (besselI(temp8, nu = ysim-1, expon = TRUE) +
+                besselI(temp8, nu = ysim+1, expon = TRUE)) / 2
+      temp6 <- temp7 / temp9
+      dl.dmu1 <- -1 + 0.5 * ysim/mu1 + sqrt(mu2/mu1) * temp6
+      dl.dmu2 <- -1 - 0.5 * ysim/mu2 + sqrt(mu1/mu2) * temp6
+      rm(ysim)
+      temp3 <- cbind(dl.dmu1, dl.dmu2)
+      run.var <- ((ii-1) * run.var + temp3^2) / ii
+      run.cov <- ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
+    }
+    wz <- if (intercept.only)
           matrix(colMeans(cbind(run.var, run.cov)),
-                 n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
+                 n, dimm(M), byrow = TRUE) else
+          cbind(run.var, run.cov)
 
-      dtheta.detas <- cbind(dmu1.deta, dmu2.deta)
-      index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-      wz <- wz * dtheta.detas[, index0$row] *
-                 dtheta.detas[, index0$col]
-      c(w) * wz
+    dtheta.detas <- cbind(dmu1.deta, dmu2.deta)
+    index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+    wz <- wz * dtheta.detas[, index0$row] *
+               dtheta.detas[, index0$col]
+    c(w) * wz
   }), list( .lmu1 = lmu1, .lmu2 = lmu2,
-            .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))))
+            .emu1 = emu1, .emu2 = emu2,
+            .nsimEIM = nsimEIM ))))
 }
 
 
@@ -721,7 +745,7 @@ dyules <- function(x, rho, log = FALSE) {
 
 
 ryules <- function(n, rho) {
-  if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
+  if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1))
     stop("bad input for argument 'n'")
   rgeom(n, prob = exp(-rexp(n, rate=rho))) + 1
 }
@@ -758,7 +782,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("argument 'nsimEIM' should be an integer greater than 50")
@@ -840,7 +864,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -870,7 +894,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
   weight = eval(substitute(expression({
 
     run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- ryules(n, rho <- rho)
       dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
       rm(ysim)
@@ -1023,7 +1047,7 @@ rlind <- function(n, theta) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -1105,8 +1129,7 @@ if (FALSE)
  poissonlindley <-
   function(link = "loge",
            itheta = NULL, nsimEIM = 200,
-           zero = NULL)
-{
+           zero = NULL) {
 
   stop("not working since rpoislindley() not written")
 
@@ -1122,7 +1145,7 @@ if (FALSE)
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("argument 'nsimEIM' should be an integer greater than 50")
@@ -1209,7 +1232,7 @@ if (FALSE)
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -1242,7 +1265,7 @@ if (FALSE)
   weight = eval(substitute(expression({
 
     run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rpoislindley(n, theta = theta)
       dl.dtheta <- 2 / theta + 1 / (ysim + 2 + theta) -
                    (ysim + 3) / (theta + 1)
@@ -1277,9 +1300,9 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
   if (!is.Numeric(sigma) || any(sigma <= 0))
     stop("argument 'sigma' must be positive")
   L <- max(length(x), length(mu), length(sigma))
-  x     <- rep(x,     len = L);
-  mu    <- rep(mu,    len = L);
-  sigma <- rep(sigma, len = L)
+  if (length(x)     != L) x     <- rep(x,     len = L)
+  if (length(mu)    != L) mu    <- rep(mu,    len = L)
+  if (length(sigma) != L) sigma <- rep(sigma, len = L)
 
   zedd <- (x-mu)/sigma
   if (log.arg)
@@ -1294,9 +1317,9 @@ pslash <- function(q, mu = 0, sigma = 1) {
   if (!is.Numeric(sigma) || any(sigma <= 0))
     stop("argument 'sigma' must be positive")
   L <- max(length(q), length(mu), length(sigma))
-  q     <- rep(q,     len = L);
-  mu    <- rep(mu,    len = L);
-  sigma <- rep(sigma, len = L)
+  if (length(q)     != L) q     <- rep(q,     len = L)
+  if (length(mu)    != L) mu    <- rep(mu,    len = L)
+  if (length(sigma) != L) sigma <- rep(sigma, len = L)
 
   ans <- q * NA
   for (ii in 1:L) {
@@ -1346,7 +1369,7 @@ slash.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("argument 'nsimEIM' should be an integer greater than 50")
@@ -1372,7 +1395,7 @@ slash.control <- function(save.weight = TRUE, ...) {
          "\n1/(2*sigma*sqrt(2*pi))",
          "\t\t\t\t\t\t\ty=mu\n")),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1476,7 +1499,7 @@ slash.control <- function(save.weight = TRUE, ...) {
     sd3 <- deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
                   log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
                   c("mu", "sigma"))
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
         ysim <- rslash(n, mu = mu, sigma = sigma)
         seval.d3 <- eval(sd3)
 
@@ -1507,15 +1530,19 @@ slash.control <- function(save.weight = TRUE, ...) {
 
 
 dnefghs <- function(x, tau, log = FALSE) {
+
+
+
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
   N <- max(length(x), length(tau))
-  x <- rep(x, len = N); tau = rep(tau, len = N);
+  if (length(x)   != N) x   <- rep(x,   len = N)
+  if (length(tau) != N) tau <- rep(tau, len = N)
 
-  logdensity <- log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
+  logdensity <- log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1pexp(x)
   logdensity[tau < 0] <- NaN
   logdensity[tau > 1] <- NaN
   if (log.arg) logdensity else exp(logdensity)
@@ -1525,6 +1552,7 @@ dnefghs <- function(x, tau, log = FALSE) {
 
  nefghs <- function(link = "logit",
                     itau = NULL, imethod = 1) {
+
   if (length(itau) &&
       !is.Numeric(itau, positive = TRUE) ||
       any(itau >= 1))
@@ -1534,7 +1562,7 @@ dnefghs <- function(x, tau, log = FALSE) {
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
        imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1574,7 +1602,8 @@ dnefghs <- function(x, tau, log = FALSE) {
                       len = n)
       etastart <- theta2eta(tau.init, .link , earg = .earg )
     }
-  }), list( .link = link, .earg = earg, .itau = itau,
+  }), list( .link = link, .earg = earg,
+            .itau = itau,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     tau <- eta2theta(eta, .link , earg = .earg )
@@ -1587,7 +1616,8 @@ dnefghs <- function(x, tau, log = FALSE) {
 
     misc$expected <- TRUE
     misc$imethod <- .imethod
-  }), list( .link = link, .earg = earg, .imethod = imethod ))),
+  }), list( .link = link, .earg = earg,
+            .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
     tau <- eta2theta(eta, .link , earg = .earg )
@@ -1604,10 +1634,10 @@ dnefghs <- function(x, tau, log = FALSE) {
     w * dl.dtau * dtau.deta
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
-    d2l.dtau2 <- (pi / sin(pi * tau))^2
-    wz <- d2l.dtau2 * dtau.deta^2
-        c(w) * wz
-    }), list( .link = link ))))
+    ned2l.dtau2 <- (pi / sin(pi * tau))^2
+    wz <- ned2l.dtau2 * dtau.deta^2
+    c(w) * wz
+  }), list( .link = link ))))
 }
 
 
@@ -1619,8 +1649,12 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
   rm(log)
 
 
-  logdensity <- -shape2*x - lbeta(shape1, shape2) -
-                (shape1 + shape2) * log1p(exp(-x))
+
+
+  logdensity <- shape1*x - lbeta(shape1, shape2) -
+                (shape1 + shape2) * log1pexp(x)
+
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
@@ -1630,6 +1664,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
  logF <- function(lshape1 = "loge", lshape2 = "loge",
                   ishape1 = NULL, ishape2 = 1,
                   imethod = 1) {
+
   if (length(ishape1) &&
       !is.Numeric(ishape1, positive = TRUE))
     stop("argument 'ishape1' must be positive")
@@ -1648,7 +1683,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
   lshape2 <- attr(eshape2, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
       stop("argument 'imethod' must be 1 or 2")
@@ -1674,8 +1709,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
 
 
     predictors.names <- c(
-      namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
-      namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE))
+      namesof("shape1", .lshape1 , earg = .eshape1 , tag = FALSE),
+      namesof("shape2", .lshape2 , earg = .eshape2 , tag = FALSE))
 
 
     if (!length(etastart)) {
@@ -1683,7 +1718,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
                 median(rep(y, w))
 
 
-      shape1.init <- shape2.init = rep( .ishape2, len = n)
+      shape1.init <- shape2.init <- rep( .ishape2 , len = n)
       shape1.init <- if (length( .ishape1))
                             rep( .ishape1, len = n) else {
                 index1 <- (y > wmeany)
@@ -1693,23 +1728,23 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
                 shape1.init
               }
       etastart <-
-          cbind(theta2eta(shape1.init, .lshape1, earg = .eshape1),
-                theta2eta(shape2.init, .lshape2, earg = .eshape2))
+          cbind(theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
+                theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))
     }
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .eshape1 = eshape1, .eshape2 = eshape2,
             .ishape1 = ishape1, .ishape2 = ishape2,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
-    shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     digamma(shape1) - digamma(shape2)
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape1 = .lshape1, shape2 = .lshape2)
+    misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2)
 
-    misc$earg <- list(shape1 = .eshape1, shape2 = .eshape2)
+    misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 )
 
     misc$expected <- TRUE
     misc$imethod <- .imethod
@@ -1718,8 +1753,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
             .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
-    shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
-    shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     if (residuals)
       stop("loglikelihood residuals not implemented yet") else {
         sum(c(w) * dlogF(x = y, shape1 = shape1,
@@ -1729,15 +1764,15 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("logF"),
   deriv = eval(substitute(expression({
-    shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
-    shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
 
-    tmp888 <- digamma(shape1 + shape2) - log1p(exp(-y))
+    tmp888 <- digamma(shape1 + shape2) - log1pexp(-y)
     dl.dshape1 <- tmp888 - digamma(shape1)
     dl.dshape2 <- tmp888 - digamma(shape2) - y
 
-    dshape1.deta <- dtheta.deta(shape1, .lshape1, earg = .eshape1)
-    dshape2.deta <- dtheta.deta(shape2, .lshape2, earg = .eshape2)
+    dshape1.deta <- dtheta.deta(shape1, .lshape1 , earg = .eshape1 )
+    dshape2.deta <- dtheta.deta(shape2, .lshape2 , earg = .eshape2 )
 
     c(w) * cbind(dl.dshape1 * dshape1.deta,
                  dl.dshape2 * dshape2.deta)
@@ -1764,7 +1799,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
 
 
 dbenf <- function(x, ndigits = 1, log = FALSE) {
-  if (!is.Numeric(ndigits, allowable.length = 1,
+  if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
     stop("argument 'ndigits' must be 1 or 2")
@@ -1789,7 +1824,7 @@ dbenf <- function(x, ndigits = 1, log = FALSE) {
 
 
 rbenf <- function(n, ndigits = 1) {
-  if (!is.Numeric(ndigits, allowable.length = 1,
+  if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
     stop("argument 'ndigits' must be 1 or 2")
@@ -1797,12 +1832,12 @@ rbenf <- function(n, ndigits = 1) {
   upperlimit <- ifelse(ndigits == 1, 9, 99)
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE)) 
+                           length.arg = 1, positive = TRUE)) 
              stop("bad input for argument 'n'") else n
   myrunif <- runif(use.n)
 
   ans <- rep(lowerlimit, length = use.n)
-  for(ii in (lowerlimit+1):upperlimit) {
+  for (ii in (lowerlimit+1):upperlimit) {
       indexTF <- (pbenf(ii-1, ndigits = ndigits) < myrunif) &
                  (myrunif <= pbenf(ii, ndigits = ndigits))
       ans[indexTF] <- ii
@@ -1812,7 +1847,7 @@ rbenf <- function(n, ndigits = 1) {
 
 
 pbenf <- function(q, ndigits = 1, log.p = FALSE) {
-  if (!is.Numeric(ndigits, allowable.length = 1,
+  if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
     stop("argument 'ndigits' must be 1 or 2")
@@ -1833,7 +1868,7 @@ pbenf <- function(q, ndigits = 1, log.p = FALSE) {
 
 
 qbenf <- function(p, ndigits = 1) {
-  if (!is.Numeric(ndigits, allowable.length = 1,
+  if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
     stop("argument 'ndigits' must be 1 or 2")
@@ -1844,7 +1879,7 @@ qbenf <- function(p, ndigits = 1) {
     stop("bad input for argument 'p'")
 
   ans <- rep(lowerlimit, length = length(p))
-  for(ii in (lowerlimit+1):upperlimit) {
+  for (ii in (lowerlimit+1):upperlimit) {
     indexTF <- is.finite(p) &
               (pbenf(ii-1, ndigits = ndigits) < p) &
               (p <= pbenf(ii, ndigits = ndigits))
@@ -1870,7 +1905,7 @@ qbenf <- function(p, ndigits = 1) {
 
 
  truncgeometric <-
-  function(upper.limit = Inf,  # lower.limit = 1, # Inclusive
+  function(upper.limit = Inf,  # lower.limit = 1,  # Inclusive
            link = "logit", expected = TRUE,
            imethod = 1, iprob = NULL, zero = NULL) {
 
@@ -1893,7 +1928,7 @@ qbenf <- function(p, ndigits = 1) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2003,7 +2038,7 @@ qbenf <- function(p, ndigits = 1) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
diff --git a/R/family.basics.R b/R/family.basics.R
index 63e833c..ce9fce1 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -9,6 +9,7 @@
 
 
 
+
  getind <- function(constraints, M, ncolx) {
 
 
@@ -21,7 +22,7 @@
   }
 
   ans <- vector("list", M+1)
-  names(ans) <- c(paste("eta", 1:M, sep = ""), "ncolX_vlm")
+  names(ans) <- c(paste("eta", 1:M, sep = ""), "ncolX.vlm")
 
   temp2 <- matrix(unlist(constraints), nrow = M)
   for (kk in 1:M) {
@@ -34,7 +35,7 @@
       }
     }
     ans[[kk]] <- list(xindex = ansx,
-                  X_vlmindex = (1:ncol(temp2))[temp2[kk,] != 0])
+                  X.vlmindex = (1:ncol(temp2))[temp2[kk,] != 0])
   }
   ans[[M+1]] <- ncol(temp2)
 
@@ -59,7 +60,7 @@
     stop("argument 'cm' is not a matrix")
   M <- nrow(cm)
   asgn <- attr(x, "assign")
-  if(is.null(asgn))
+  if (is.null(asgn))
     stop("the 'assign' attribute is missing from 'x'; this ",
          "may be due to some missing values")  # 20100306
   nasgn <- names(asgn)
@@ -110,7 +111,20 @@
         ii <- attr(tbool, "factors")
         default <- dimnames(ii)[[1]]
         default <- default[1]
-        default <- parse(text = default[1])[[1]]
+        default <- if (is.null(default[1])) {
+          t.or.f <- attr(tbool, "variables")
+
+          t.or.f <- as.character( t.or.f )
+          if (t.or.f[1] == "list" && length(t.or.f) == 2 &&
+             (t.or.f[2] == "TRUE" || t.or.f[2] == "FALSE")) {
+            t.or.f <- as.character( t.or.f[2] )
+            parse(text = t.or.f)[[1]]
+          } else {
+            stop("something gone awry")
+          }
+        } else {
+          parse(text = default[1])[[1]]  # Original
+        }
         default <- as.logical(eval(default))
     } else {
       default <- TRUE
@@ -138,7 +152,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   asgn <- attr(x, "assign")
   nasgn <- names(asgn)
   if (is.null(constraints)) {
-    constraints <- vector("list", length(nasgn)) # list()
+    constraints <- vector("list", length(nasgn))  # list()
     names(constraints) <- nasgn
   }
   if (!is.list(constraints))
@@ -190,10 +204,11 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
   if (is.null(zero))
     return(constraints)
+
   if (!is.numeric(zero))
-    stop("'zero' must be numeric")
+    stop("argument 'zero' must be numeric")
   if (any(zero < 1 | zero > M))
-    stop("'zero' out of range")
+    stop("argument 'zero' out of range")
   if (nasgn[1] != "(Intercept)")
     stop("cannot fit an intercept to a no-intercept model")
 
@@ -232,10 +247,10 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   }
 
   if (is.matrix(constraints))
-      constraints <- list(constraints)
+    constraints <- list(constraints)
 
   if (!is.list(constraints))
-      stop("'constraints' must be a list")
+    stop("'constraints' must be a list")
 
   lenconstraints <- length(constraints)
   if (lenconstraints > 0)
@@ -281,7 +296,9 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
                 any(nasgn[ii] == names(specialCM))) {
               slist <- specialCM[[(nasgn[ii])]]
               slist[[ictr]]
-            } else constraints[[ii]]
+            } else {
+              constraints[[ii]]
+            }
       Blist[[jay]] <- cm 
     }
   }
@@ -335,7 +352,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   if (empty.list(constraints))
     if (is.list(new.constraints))
       return(new.constraints) else 
-      return(list()) # Both NULL probably
+      return(list())  # Both NULL probably
 
   constraints <- as.list(constraints)
   new.constraints <- as.list(new.constraints)
@@ -383,7 +400,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
- iam <- function(j, k, M, # hbw = M,
+ iam <- function(j, k, M,  # hbw = M,
                  both = FALSE, diag = TRUE) {
 
 
@@ -459,12 +476,12 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
     stop("bad value for 'M'; it is too big") 
   }
 
-  fred <- dotC(name = "m2a", as.double(t(m)), ans=double(M*M*n),
+  fred <- .C("m2a", as.double(t(m)), ans=double(M*M*n),
       as.integer(dimm),
       as.integer(index$row-1),  
       as.integer(index$col-1),  
       as.integer(n),  as.integer(M),  
-      as.integer(as.numeric(upper)), NAOK = TRUE)
+      as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
   dim(fred$ans) <- c(M, M, n)
   alpn <- NULL
   dimnames(fred$ans) <- list(alpn, alpn, dimnames(m)[[1]])
@@ -488,11 +505,12 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
 
 
-  fred <- dotC(name = "a2m", as.double(a), m=double(dimm.value*n),
+  fred <- .C("a2m",
+             as.double(a), m = double(dimm.value * n),
       as.integer(dimm.value),
       as.integer(index$row-1),  
       as.integer(index$col-1),  
-      as.integer(n),  as.integer(M), NAOK = TRUE)
+      as.integer(n),  as.integer(M), NAOK = TRUE, PACKAGE = "VGAM")
   dim(fred$m) <- c(dimm.value,n)
   fred$m <- t(fred$m)
 
@@ -532,7 +550,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
-if(!exists("is.R"))
+if (!exists("is.R"))
   is.R <- function()
     exists("version") &&
     !is.null(version$language) &&
@@ -564,7 +582,7 @@ if(!exists("is.R"))
   mu <- object at fitted.values
   if (any(slotNames(object) == "predictors"))
     eta <- object at predictors
-  mt <- terms(object) # object at terms$terms; 11/8/03 
+  mt <- terms(object)  # object at terms$terms; 20030811
   Blist <- constraints <- object at constraints 
   new.coeffs <- object at coefficients
   if (any(slotNames(object) == "iter"))
@@ -579,17 +597,31 @@ if(!exists("is.R"))
   x <- object at x
   if (!length(x))
     x <- model.matrixvlm(object, type = "lm")
+
   y <- object at y
+  if (!length(y))
+    y <- depvar(object)
+
+
+
+  if (length(object at misc$form2)) {
+    Xm2 <- object at Xm2
+    if (!length(Xm2))
+      Xm2 <- model.matrix(object, type = "lm2")
+    Ym2 <- object at Ym2
+  }
+
+
 
   if (any(slotNames(object) == "control"))
-  for (ii in names(object at control)) {
+    for (ii in names(object at control)) {
       assign(ii, object at control[[ii]]) 
-  } 
+    } 
 
   if (length(object at misc))
-  for (ii in names(object at misc)) {
-    assign(ii, object at misc[[ii]]) 
-  } 
+    for (ii in names(object at misc)) {
+      assign(ii, object at misc[[ii]]) 
+    } 
 
   if (any(slotNames(object) == "family")) {
     expr <- object at family@deriv
@@ -600,7 +632,7 @@ if(!exists("is.R"))
 
 
       if (M > 1) 
-        dimnames(wz) <- list(dimnames(wz)[[1]], NULL) # Remove colnames
+        dimnames(wz) <- list(dimnames(wz)[[1]], NULL)  # Remove colnames
       wz <- if (matrix.arg) as.matrix(wz) else c(wz) 
     }
     if (deriv.arg) list(deriv = deriv.mu, weights = wz) else wz
@@ -618,7 +650,7 @@ if(!exists("is.R"))
     ans 
   } else {
     temp <- object at y
-    ans <- rep(1, nrow(temp)) # Assumed all equal and unity.
+    ans <- rep(1, nrow(temp))  # Assumed all equal and unity.
     names(ans) <- dimnames(temp)[[1]]
     ans 
   }
@@ -633,11 +665,15 @@ procVec <- function(vec, yn, Default) {
 
 
 
+
+
+
+
   if (any(is.na(vec)))
     stop("vec cannot contain any NAs")
   L <- length(vec)
-  nvec <- names(vec)     # vec[""] undefined
-  named <- length(nvec)   # FALSE for c(1,3)
+  nvec <- names(vec)  # vec[""] undefined
+  named <- length(nvec)  # FALSE for c(1,3)
   if (named) {
     index <- (1:L)[nvec == ""]
     default <- if (length(index)) vec[index] else Default
@@ -668,7 +704,8 @@ procVec <- function(vec, yn, Default) {
 if (FALSE) {
 
 if (!isGeneric("m2a"))
-    setGeneric("m2a", function(object, ...) standardGeneric("m2a"))
+    setGeneric("m2a",
+  function(object, ...) standardGeneric("m2a"))
 
 setMethod("m2a", "vglm",
          function(object, ...)
@@ -727,16 +764,8 @@ setMethod("weights", "vglm",
 
 
 
-dotFortran <- function(name, ..., NAOK = FALSE, DUP = TRUE,
-                       PACKAGE = "VGAM") {
-  .Fortran(name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
-}
 
 
-dotC <- function(name, ..., NAOK = FALSE, DUP = TRUE,
-                 PACKAGE = "VGAM") {
-  .C(name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
-}
 
 
 
@@ -750,7 +779,8 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
     dderiv <- cbind(dderiv)
     deta <- cbind(deta)
   }
-  Bs <- mux22(t(wzold), deta, M = M, upper = FALSE, as.matrix = TRUE) # n x M
+  Bs <- mux22(t(wzold), deta, M = M,
+              upper = FALSE, as.matrix = TRUE)  # n x M
   sBs <- c( (deta * Bs) %*% rep(1, M) )   # should have positive values
   sy <- c( (dderiv * deta) %*% rep(1, M) )
   wznew <- wzold
@@ -782,7 +812,7 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
 
 
 mbesselI0 <- function(x, deriv.arg = 0) {
-  if (!is.Numeric(deriv.arg, allowable.length = 1,
+  if (!is.Numeric(deriv.arg, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) &&
       deriv.arg != 0)
     stop("argument 'deriv.arg' must be a single non-negative integer")
@@ -829,11 +859,11 @@ VGAM.matrix.norm <- function(A, power = 2, suppressWarning = FALSE) {
 rmfromVGAMenv <- function(varnames, prefix = "") {
   evarnames <- paste(prefix, varnames, sep = "")
   for (ii in evarnames) {
-    mytext1 <- "exists(x = ii, envir = VGAM:::VGAMenv)"
+    mytext1 <- "exists(x = ii, envir = VGAMenv)"
     myexp1 <- parse(text = mytext1)
     is.there <- eval(myexp1)
     if (is.there) {
-      rm(list = ii, envir = VGAM:::VGAMenv)
+      rm(list = ii, envir = VGAMenv)
     }
   }
 }
@@ -845,7 +875,7 @@ existsinVGAMenv <- function(varnames, prefix = "") {
   evarnames <- paste(prefix, varnames, sep = "")
   ans <- NULL
   for (ii in evarnames) {
-    mytext1 <- "exists(x = ii, envir = VGAM:::VGAMenv)"
+    mytext1 <- "exists(x = ii, envir = VGAMenv)"
     myexp1 <- parse(text = mytext1)
     is.there <- eval(myexp1)
     ans <- c(ans, is.there)
@@ -858,7 +888,7 @@ assign2VGAMenv <- function(varnames, mylist, prefix = "") {
   evarnames <- paste(prefix, varnames, sep = "")
   for (ii in 1:length(varnames)) {
     assign(evarnames[ii], mylist[[(varnames[ii])]],
-           envir = VGAM:::VGAMenv)
+           envir = VGAMenv)
   }
 }
 
@@ -871,7 +901,7 @@ getfromVGAMenv <- function(varname, prefix = "") {
   varname <- paste(prefix, varname, sep = "")
   if (length(varname) > 1)
     stop("'varname' must be of length 1")
-  get(varname, envir = VGAM:::VGAMenv)
+  get(varname, envir = VGAMenv)
 }
 
  
@@ -881,10 +911,10 @@ lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
     stop("bad input in 'x', 's', and/or 'v'")
   if (is.complex(c(x,s,v)))
     stop("complex arguments not allowed in 'x', 's' and 'v'")
-  if (!is.Numeric(tolerance, allowable.length = 1, positive = TRUE) ||
+  if (!is.Numeric(tolerance, length.arg = 1, positive = TRUE) ||
       tolerance > 0.01)
     stop("bad input for argument 'tolerance'")
-  if (!is.Numeric(iter, allowable.length = 1,
+  if (!is.Numeric(iter, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'iter'")
 
@@ -895,10 +925,11 @@ lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
   xok <- abs(x) < 1 & !(v <= 0 & v == round(v))
   x[!xok] <- 0  # Fix this later
 
-  ans <- dotC(name = "lerchphi123", err = integer(L), as.integer(L),
+  ans <- .C("lerchphi123",
+           err = integer(L), as.integer(L),
            as.double(x), as.double(s), as.double(v),
            acc=as.double(tolerance), result=double(L),
-           as.integer(iter))
+           as.integer(iter), PACKAGE = "VGAM")
 
   ifelse(ans$err == 0 & xok , ans$result, NA)
 }
@@ -918,25 +949,25 @@ negzero.expression <- expression({
   negdotzero <-  dotzero[dotzero < 0]
 
   bigUniqInt <- 1080
-  zneg_index <- if (length(negdotzero)) {
+  zneg.index <- if (length(negdotzero)) {
 
     if (!is.Numeric(-negdotzero, positive = TRUE,
                     integer.valued = TRUE) ||
         max(-negdotzero) > Musual)
         stop("bad input for argument 'zero'")
 
-    zneg_index <- rep(0:bigUniqInt, rep(length(negdotzero),
+    zneg.index <- rep(0:bigUniqInt, rep(length(negdotzero),
                       1 + bigUniqInt)) * Musual + abs(negdotzero)
-    sort(intersect(zneg_index, 1:M))
+    sort(intersect(zneg.index, 1:M))
   } else {
     NULL
   }
 
-  zpos_index <- if (length(posdotzero)) posdotzero else NULL
-  z_Index <- if (!length(dotzero)) NULL else
-                   unique(sort(c(zneg_index, zpos_index)))
+  zpos.index <- if (length(posdotzero)) posdotzero else NULL
+  z.Index <- if (!length(dotzero)) NULL else
+                   unique(sort(c(zneg.index, zpos.index)))
 
-  constraints <- cm.zero.vgam(constraints, x, z_Index, M)
+  constraints <- cm.zero.vgam(constraints, x, z.Index, M)
 })
 
 
@@ -1037,7 +1068,7 @@ w.y.check <- function(w, y,
     w <- as.matrix(w)
   if (!is.matrix(y))
     y <- as.matrix(y)
-  n_lm <- nrow(y)
+  n.lm <- nrow(y)
   rn.w <- rownames(w)
   rn.y <- rownames(y)
   cn.w <- colnames(w)
@@ -1051,7 +1082,7 @@ w.y.check <- function(w, y,
   if (Is.nonnegative.y && any(y < 0))
     stop("response variable 'y' must be 0 or positive-valued")
 
-  if (nrow(w) != n_lm)
+  if (nrow(w) != n.lm)
     stop("nrow(w) should be equal to nrow(y)")
 
   if (ncol(w) > ncol.w.max)
@@ -1089,7 +1120,7 @@ w.y.check <- function(w, y,
     if (length(cn.w) < Ncol.max.w)
       cn.w <- c(cn.w, paste(prefix.w, (length(cn.w)+1):Ncol.max.w,
                             sep = ""))
-    w <- matrix(w, n_lm, Ncol.max.w, dimnames = list(rn.w, cn.w))
+    w <- matrix(w, n.lm, Ncol.max.w, dimnames = list(rn.w, cn.w))
   }
   if (out.wy && ncol(y) < Ncol.max.y) {
     nblanks <- sum(cn.y == "")
@@ -1098,7 +1129,7 @@ w.y.check <- function(w, y,
     if (length(cn.y) < Ncol.max.y)
       cn.y <- c(cn.y, paste(prefix.y, (length(cn.y)+1):Ncol.max.y,
                             sep = ""))
-    y <- matrix(y, n_lm, Ncol.max.y, dimnames = list(rn.y, cn.y))
+    y <- matrix(y, n.lm, Ncol.max.y, dimnames = list(rn.y, cn.y))
   }
        
   list(w = if (out.wy) w else NULL,
@@ -1175,3 +1206,41 @@ vweighted.mean.default <- function (x, w, ..., na.rm = FALSE) {
 
 
 
+
+
+family.name.vlm <- function(object, all = FALSE, ...) {
+  ans <- object at family@vfamily
+  if (all) ans else ans[1]
+}
+
+
+family.name.vglmff <- function(object, all = FALSE, ...) {
+  ans <- object at vfamily
+  if (all) ans else ans[1]
+}
+
+
+
+if (!isGeneric("family.name"))
+    setGeneric("family.name",
+  function(object, ...) standardGeneric("family.name"))
+
+
+setMethod("family.name", "vglmff",
+         function(object, ...)
+         family.name.vglmff(object, ...))
+
+
+
+setMethod("family.name", "vlm",
+         function(object, ...)
+         family.name.vlm(object, ...))
+
+
+
+
+
+
+
+
+
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 8517ed0..b00fcf8 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -88,17 +88,17 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
     stop("argument 'imethod' must be 1, 2, 3 or 4")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
       shrinkage.init < 0 ||
       shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
   if (!is.null(nsimEIM)) {
-    if (!is.Numeric(nsimEIM, allowable.length = 1,
+    if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 10)
@@ -113,7 +113,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
             "Mean:       mu", "\n",
             "Variance:   mu*(1-mu)*(1+(w-1)*rho)/w"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (!all(w == 1))
@@ -124,32 +124,32 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
     }
 
     mustart.orig <- mustart
-    eval(binomialff()@initialize)   # Note: n,w,y,mustart is changed 
+    eval(binomialff()@initialize)  # Note: n,w,y,mustart is changed 
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
 
 
     ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-              y * w # Convert proportions to counts
+               y * w  # Convert proportions to counts
     if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
       warning("the response (as counts) does not appear to ",
               "be integer-valued. Am rounding to integer values.")
-    ycounts <- round(ycounts) # Make sure it is an integer
+    ycounts <- round(ycounts)  # Make sure it is an integer
     predictors.names <-
       c(namesof("mu",  .lmu ,  earg = .emu  , tag = FALSE),
         namesof("rho", .lrho , earg = .erho , tag = FALSE))
+
     if (!length(etastart)) {
       betabinomial.Loglikfun <- function(rhoval, y, x, w, extraargs) {
         shape1 <-    extraargs$mustart  * (1-rhoval) / rhoval
         shape2 <- (1-extraargs$mustart) * (1-rhoval) / rhoval
-        ycounts <- extraargs$ycounts   # Ought to be integer-valued
+        ycounts <- extraargs$ycounts  # Ought to be integer-valued
         nvec <- extraargs$nvec
         sum(dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
                           shape2 = shape2, log = TRUE))
       }
-      rho.grid <- seq(0.05, 0.95, len=21)  # rvar = 
-      mustart.use =
-      if (length(mustart.orig)) {
+      rho.grid <- seq(0.05, 0.95, len = 25)  # rvar =
+      mustart.use <- if (length(mustart.orig)) {
         mustart.orig
       } else if ( .imethod == 1) {
         rep(weighted.mean(y, w), len = n)
@@ -163,18 +163,19 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
       } else {
         mustart
       }
-      try.this <- getMaxMin(rho.grid, objfun=betabinomial.Loglikfun,
-                        y = y,  x = x, w = w, extraargs = list(
-                        ycounts=ycounts,
-                        nvec = if (is.numeric(extra$orig.w))
-                               round(w / extra$orig.w) else round(w),
-                        mustart = mustart.use))
+      try.this <- getMaxMin(rho.grid, objfun = betabinomial.Loglikfun,
+                            y = y,  x = x, w = w,
+                            extraargs = list(
+                            ycounts = ycounts,
+                            nvec = if (is.numeric(extra$orig.w))
+                                   round(w / extra$orig.w) else round(w),
+                            mustart = mustart.use))
       init.rho <- if (is.Numeric( .irho ))
                     rep( .irho , length = n) else
                     rep(try.this, length = n)
       etastart <-
-        cbind(theta2eta(mustart.use,  .lmu ,  earg = .emu),
-              theta2eta(init.rho,     .lrho , earg = .erho))
+        cbind(theta2eta(mustart.use,  .lmu ,  earg = .emu ),
+              theta2eta(init.rho,     .lrho , earg = .erho ))
       mustart <- NULL  # Since etastart has been computed.
     }
   }), list( .lmu = lmu, .lrho = lrho,
@@ -182,12 +183,12 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
             .imethod = imethod, .sinit = shrinkage.init,
             .nsimEIM = nsimEIM, .irho = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL)
-    eta2theta(eta[, 1], .lmu, earg = .emu), 
+    eta2theta(eta[, 1], .lmu , earg = .emu ), 
   list( .lmu = lmu, .emu = emu ))),
   last = eval(substitute(expression({
-    misc$link <-    c(mu = .lmu, rho = .lrho)
+    misc$link <-    c(mu = .lmu , rho = .lrho)
 
-    misc$earg <- list(mu = .emu, rho = .erho)
+    misc$earg <- list(mu = .emu , rho = .erho )
 
     misc$zero <- .zero
     misc$expected <- TRUE
@@ -199,10 +200,10 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
   loglikelihood = eval(substitute(
     function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
     ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-               y * w # Convert proportions to counts
+               y * w  # Convert proportions to counts
 
-    mymu <- eta2theta(eta[, 1], .lmu,  earg = .emu)
-    rho  <- eta2theta(eta[, 2], .lrho , earg = .erho)
+    mymu <- eta2theta(eta[, 1], .lmu ,  earg = .emu )
+    rho  <- eta2theta(eta[, 2], .lrho , earg = .erho )
     smallno <- 1.0e4 * .Machine$double.eps
 
     if (max(abs(ycounts - round(ycounts))) > smallno)
@@ -231,11 +232,11 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
     ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-              y * w # Convert proportions to counts
+              y * w  # Convert proportions to counts
 
     ycounts <- round(ycounts)
-    mymu <- eta2theta(eta[, 1], .lmu,  earg = .emu)
-    rho  <- eta2theta(eta[, 2], .lrho , earg = .erho)
+    mymu <- eta2theta(eta[, 1], .lmu ,  earg = .emu )
+    rho  <- eta2theta(eta[, 2], .lrho , earg = .erho )
     smallno <- 100 * .Machine$double.eps
     rho  <- pmax(rho, smallno)
     rho  <- pmin(rho, 1-smallno)
@@ -247,8 +248,8 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
     dshape1.drho <-       -mymu  / rho^2
     dshape2.drho <-  -(1 - mymu) / rho^2
 
-    dmu.deta  <- dtheta.deta(mymu, .lmu  , earg = .emu)
-    drho.deta <- dtheta.deta(rho,  .lrho , earg = .erho)
+    dmu.deta  <- dtheta.deta(mymu, .lmu  , earg = .emu )
+    drho.deta <- dtheta.deta(rho,  .lrho , earg = .erho )
 
     dl.dmu <- dshape1.dmu * (digamma(shape1+ycounts) -
               digamma(shape2+nvec-ycounts) -
@@ -329,14 +330,15 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-dbinom2.or <- function(mu1,
-             mu2 = if (exchangeable) mu1 else
-                   stop("'mu2' not specified"),
-             oratio = 1,
-             exchangeable = FALSE,
-             tol = 0.001,
-             colnames = c("00", "01", "10", "11"),
-             ErrorCheck = TRUE) {
+dbinom2.or <-
+  function(mu1,
+           mu2 = if (exchangeable) mu1 else
+                 stop("'mu2' not specified"),
+           oratio = 1,
+           exchangeable = FALSE,
+           tol = 0.001,
+           colnames = c("00", "01", "10", "11"),
+           ErrorCheck = TRUE) {
   if (ErrorCheck) {
     if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
       stop("bad input for argument 'mu1'") 
@@ -344,46 +346,47 @@ dbinom2.or <- function(mu1,
       stop("bad input for argument 'mu2'") 
     if (!is.Numeric(oratio, positive = TRUE))
       stop("bad input for argument 'oratio'") 
-    if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+    if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
         tol > 0.1)
       stop("bad input for argument 'tol'") 
     if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
       stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
   }
 
-  n <- max(length(mu1), length(mu2), length(oratio))
-  oratio <- rep(oratio, len = n)
-  mu1    <- rep(mu1,    len = n)
-  mu2    <- rep(mu2,    len = n)
+  L <- max(length(mu1), length(mu2), length(oratio))
+  if (length(oratio) != L) oratio <- rep(oratio, len = L)
+  if (length(mu1   ) != L) mu1    <- rep(mu1,    len = L)
+  if (length(mu2   ) != L) mu2    <- rep(mu2,    len = L)
 
   a.temp <- 1 + (mu1+mu2)*(oratio-1)
   b.temp <- -4 * oratio * (oratio-1) * mu1 * mu2
   temp <- sqrt(a.temp^2 + b.temp)
   p11 <- ifelse(abs(oratio-1) < tol,
-               mu1*mu2,
-              (a.temp-temp)/(2*(oratio-1)))
+                mu1*mu2,
+               (a.temp-temp)/(2*(oratio-1)))
   p01 <- mu2 - p11
   p10 <- mu1 - p11
   p00 <- 1 - p11 - p01 - p10
-  matrix(c(p00, p01, p10, p11), n, 4, dimnames = list(NULL, colnames))
+  matrix(c(p00, p01, p10, p11), L, 4, dimnames = list(NULL, colnames))
 }
 
 
 
 
-rbinom2.or <- function(n, mu1,
-                      mu2 = if (exchangeable) mu1 else
-                        stop("argument 'mu2' not specified"),
-                      oratio = 1,
-                      exchangeable = FALSE,
-                      tol = 0.001,
-                      twoCols = TRUE,
-                      colnames = if (twoCols) c("y1", "y2") else
-                                 c("00", "01", "10", "11"),
-                      ErrorCheck = TRUE) {
+rbinom2.or <-
+  function(n, mu1,
+           mu2 = if (exchangeable) mu1 else
+                   stop("argument 'mu2' not specified"),
+           oratio = 1,
+           exchangeable = FALSE,
+           tol = 0.001,
+           twoCols = TRUE,
+           colnames = if (twoCols) c("y1", "y2") else
+                         c("00", "01", "10", "11"),
+           ErrorCheck = TRUE) {
   if (ErrorCheck) {
     if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE,
-                    allowable.length = 1))
+                    length.arg = 1))
       stop("bad input for argument 'n'")
     if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
       stop("bad input for argument 'mu1'") 
@@ -391,7 +394,7 @@ rbinom2.or <- function(n, mu1,
       stop("bad input for argument 'mu2'") 
     if (!is.Numeric(oratio, positive = TRUE))
       stop("bad input for argument 'oratio'") 
-    if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+    if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
         tol > 0.1)
       stop("bad input for argument 'tol'") 
     if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
@@ -427,8 +430,10 @@ rbinom2.or <- function(n, mu1,
  binom2.or <- function(lmu = "logit", lmu1 = lmu, lmu2 = lmu,
                        loratio = "loge",
                        imu1 = NULL, imu2 = NULL, ioratio = NULL,
-                       zero = 3, exchangeable = FALSE, tol = 0.001,
-                       morerobust = FALSE) {
+                       zero = 3,
+                       exchangeable = FALSE,
+                       tol = 0.001,
+                       more.robust = FALSE) {
 
   lmu1 <- lmu1
   lmu2 <- lmu2
@@ -443,19 +448,22 @@ rbinom2.or <- function(n, mu1,
   lmu2 <- attr(emu2, "function.name")
 
 
-
-
-
   loratio <- as.list(substitute(loratio))
   eoratio <- link2list(loratio)
   loratio <- attr(eoratio, "function.name")
 
 
 
-  if (is.logical(exchangeable) && exchangeable && ((lmu1 != lmu2) ||
-     !all.equal(emu1, emu2)))
-    stop("exchangeable = TRUE but marginal links are not equal") 
-  if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+  if (!is.logical(exchangeable))
+    warning("argument 'exchangeable' should be a single logical") 
+
+  if (is.logical(exchangeable) && exchangeable &&
+     ((lmu1 != lmu2) || !all.equal(emu1, emu2)))
+    warning("exchangeable = TRUE but marginal links are not equal") 
+
+
+
+  if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
       tol > 0.1)
     stop("bad input for argument 'tol'") 
 
@@ -467,10 +475,14 @@ rbinom2.or <- function(n, mu1,
             namesof("mu2", lmu2, earg = emu2), "; ",
             namesof("oratio", loratio, earg = eoratio)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1,0,0,0, 1), 3, 2), x, 
-                           .exchangeable , constraints,
-                            apply.int = TRUE)
-      constraints = cm.zero.vgam(constraints, x, .zero , M)
+    cm.intercept.default <- diag(3)
+    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
+                           apply.int = TRUE,
+                           cm.default           = cm.intercept.default,
+                           cm.intercept.default = cm.intercept.default)
+      constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
   initialize = eval(substitute(expression({
@@ -541,7 +553,7 @@ rbinom2.or <- function(n, mu1,
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     if (residuals)
       stop("loglikelihood residuals not implemented yet") else {
-      if ( .morerobust) {
+      if ( .more.robust) {
         vsmallno <-  1.0e4 * .Machine$double.xmin
         mu[mu < vsmallno] <- vsmallno
       }
@@ -561,7 +573,7 @@ rbinom2.or <- function(n, mu1,
                        log = TRUE, dochecking = FALSE))
 
     }
-  }, list( .morerobust = morerobust ))),
+  }, list( .more.robust = more.robust ))),
   vfamily = c("binom2.or", "binom2"),
   deriv = eval(substitute(expression({
     smallno <- 1.0e4 * .Machine$double.eps
@@ -647,7 +659,7 @@ dbinom2.rho <-
   mu2 <- rep(mu2, len = nn)
   eta1 <- qnorm(mu1)
   eta2 <- qnorm(mu2)
-  p11 <- pnorm2(eta1, eta2, cov12 = rho)
+  p11 <- pbinorm(eta1, eta2, cov12 = rho)
   p01 <- mu2 - p11
   p10 <- mu1 - p11
   p00 <- 1.0 - p01 - p10 - p11
@@ -669,7 +681,7 @@ rbinom2.rho <-
            ErrorCheck = TRUE) {
   if (ErrorCheck) {
     if (!is.Numeric(n, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1))
+                    positive = TRUE, length.arg = 1))
       stop("bad input for argument 'n'")
     if (!is.Numeric(mu1, positive = TRUE) ||
         max(mu1) >= 1)
@@ -680,6 +692,8 @@ rbinom2.rho <-
     if (!is.Numeric(rho) || min(rho) <= -1 ||
         max(rho) >= 1)
       stop("bad input for argument 'rho'") 
+
+
     if (exchangeable &&
         max(abs(mu1 - mu2)) > 0.00001)
       stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
@@ -719,7 +733,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
 
  binom2.rho <- function(lrho = "rhobit",
-                        lmu = "probit", # added 20120817
+                        lmu = "probit",  # added 20120817
                         imu1 = NULL, imu2 = NULL, irho = NULL,
                         imethod = 1,
                         zero = 3, exchangeable = FALSE,
@@ -745,14 +759,14 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
 
   if (is.Numeric(nsimEIM)) {
-    if (!is.Numeric(nsimEIM, allowable.length = 1,
+    if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 100)
       warning("'nsimEIM' should be an integer greater than 100")
   }
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -767,10 +781,11 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
             namesof("mu2", lmu12, earg = emu12), ", ",
             namesof("rho", lrho,  earg = erho)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
-                           .exchangeable , constraints,
+    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -798,11 +813,11 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
 
     ycounts <- if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
-               y * c(w) # Convert proportions to counts
+               y * c(w)  # Convert proportions to counts
     if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
        warning("the response (as counts) does not appear to ",
                "be integer-valued. Am rounding to integer values.")
-    ycounts <- round(ycounts) # Make sure it is an integer
+    ycounts <- round(ycounts)  # Make sure it is an integer
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
                                           round(w)
 
@@ -841,7 +856,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
           nvec     <-    extraargs$nvec
           eta1 <- qnorm(init.mu1)
           eta2 <- qnorm(init.mu2)
-          p11 <- pnorm2(eta1, eta2, cov12 = rhoval)
+          p11 <- pbinorm(eta1, eta2, cov12 = rhoval)
           p01 <- pmin(init.mu2 - p11, init.mu2)
           p10 <- pmin(init.mu1 - p11, init.mu1)
           p00 <- 1.0 - p01 - p10 - p11
@@ -874,7 +889,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
       etastart <- cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
                         theta2eta(mu2.init, .lmu12 , earg = .emu12 ),
-                        theta2eta(rho.init, .lrho ,  earg = .erho))
+                        theta2eta(rho.init, .lrho ,  earg = .erho ))
       mustart <- NULL # Since etastart has been computed.
     }
   }), list( .lmu12 = lmu12, .lrho = lrho,
@@ -886,8 +901,8 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
   linkinv = eval(substitute(function(eta, extra = NULL) {
     pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
                      eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
-    rho <- eta2theta(eta[, 3], .lrho , earg = .erho)
-    p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rho)
+    rho <- eta2theta(eta[, 3], .lrho , earg = .erho )
+    p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rho)
     p01 <- pmin(pmargin[, 2] - p11, pmargin[, 2])
     p10 <- pmin(pmargin[, 1] - p11, pmargin[, 1])
     p00 <- 1.0 - p01 - p10 - p11
@@ -916,7 +931,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
       ycounts <- if (is.numeric(extra$orig.w))
                  y * c(w) / extra$orig.w else
-                 y * c(w) # Convert proportions to counts
+                 y * c(w)  # Convert proportions to counts
 
       smallno <- 1.0e4 * .Machine$double.eps
       if (max(abs(ycounts - round(ycounts))) > smallno)
@@ -936,12 +951,12 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
             round(w)
     ycounts <- if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
-               y * c(w) # Convert proportions to counts
+               y * c(w)  # Convert proportions to counts
 
     pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
                      eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
-    rhovec <- eta2theta(eta[, 3], .lrho , earg = .erho)
-    p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+    rhovec <- eta2theta(eta[, 3], .lrho , earg = .erho )
+    p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     p01 <- pmargin[, 2] - p11
     p10 <- pmargin[, 1] - p11
     p00 <- 1 - p01 - p10 - p11
@@ -960,7 +975,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
     p10[p10 < smallno] <- smallno
     p11[p11 < smallno] <- smallno
 
-    dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+    dprob00 <- dbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     dl.dprob1 <-     PhiB * (ycounts[, 4]/p11 - ycounts[, 2]/p01) +
                  onemPhiB * (ycounts[, 3]/p10 - ycounts[, 1]/p00)
     dl.dprob2 <-     PhiA * (ycounts[, 4]/p11 - ycounts[, 3]/p10) +
@@ -970,7 +985,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
     dprob1.deta <- dtheta.deta(pmargin[, 1], .lmu12 , earg = .emu12 )
     dprob2.deta <- dtheta.deta(pmargin[, 2], .lmu12 , earg = .emu12 )
-    drho.deta <- dtheta.deta(rhovec, .lrho , earg = .erho)
+    drho.deta <- dtheta.deta(rhovec, .lrho , earg = .erho )
     dthetas.detas <- cbind(dprob1.deta, dprob2.deta, drho.deta)
 
     (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
@@ -993,7 +1008,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
                           onemPhiA * (1/p01 + 1/p00)) * dprob00
       ned2l.drho2 <-  (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
 
-      wz <- matrix(0, n, dimm(M)) # 6=dimm(M)
+      wz <- matrix(0, n, dimm(M))  # 6=dimm(M)
       wz[, iam(1, 1, M)] <- ned2l.dprob1prob1 * dprob1.deta^2
       wz[, iam(2, 2, M)] <- ned2l.dprob2prob2 * dprob2.deta^2
       wz[, iam(3, 3, M)] <- ned2l.drho2 * drho.deta^2
@@ -1030,7 +1045,14 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
 
 
+
+
 dnorm2 <- function(x, y, rho = 0, log = FALSE) {
+
+
+  warning("decommissioning dnorm2() soon; use ",
+          "dbinorm(..., cov12 = rho) instead")
+
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -1049,12 +1071,70 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
 
 
 
+ pbinorm <-
+ function(x1, x2,
+                    mean1 = 0, mean2 = 0,
+                    var1 = 1, var2 = 1,
+                    cov12 = 0) {
+
+
+
+  sd1 <- sqrt(var1)
+  sd2 <- sqrt(var2)
+  rho <- cov12 / (sd1 * sd2)
+
+  if (any(is.na(x1)    | is.na(x2)    |
+          is.na(sd1)   | is.na(sd2)   |
+          is.na(mean1) | is.na(mean2) | is.na(rho)))
+    stop("no NAs allowed in arguments or variables 'x1', 'x2', 'mean1', ",
+         "'mean2', 'sd1', 'sd2', 'cov12'")
+  if (min(rho) < -1 || max(rho) > +1)
+    stop("correlation 'rho' is out of range")
+
+
+  if (length(mean1) > 1 && length(mean2) == 1 &&
+      length(var1) == 1 && length(var2)  == 1 && length(cov12) == 1)
+    warning("the call to pnorm2() seems based on the old version ",
+            "of the arguments")
+
+  LLL <- max(length(x1), length(x2),
+             length(mean1), length(mean2),
+             length(sd1), length(sd2),
+             length(rho))
+  if (length(x1)    != LLL) x1    <- rep(x1,     len = LLL)
+  if (length(x2)    != LLL) x2    <- rep(x2,     len = LLL)
+  if (length(mean1) != LLL) mean1 <- rep(mean1,  len = LLL)
+  if (length(mean2) != LLL) mean2 <- rep(mean2,  len = LLL)
+  if (length(sd1)   != LLL) sd1   <- rep(sd1,    len = LLL)
+  if (length(sd2)   != LLL) sd2   <- rep(sd2,    len = LLL)
+  if (length(rho)   != LLL) rho   <- rep(rho,    len = LLL)
+
+  Z1 <- (x1 - mean1) / sd1
+  Z2 <- (x2 - mean2) / sd2
+
+  ans <- Z1
+  singler <- ifelse(length(rho) == 1, 1, 0)
+  answer <- .C("pnorm2",
+       ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
+       size = as.integer(LLL), singler = as.integer(singler),
+       ans = as.double(ans), PACKAGE = "VGAM")$ans
+  if (any(answer < 0.0))
+    warning("some negative values returned")
+  answer
+}
+
+
+
+
  pnorm2 <- function(x1, x2,
                     mean1 = 0, mean2 = 0,
                     var1 = 1, var2 = 1,
                     cov12 = 0) {
 
 
+  warning("decommissioning pnorm2() soon; use ",
+          "dbinorm() instead")
+
 
   sd1 <- sqrt(var1)
   sd2 <- sqrt(var2)
@@ -1091,10 +1171,10 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
 
   ans <- Z1
   singler <- ifelse(length(rho) == 1, 1, 0)
-  answer <- dotC(name = "pnorm2",
+  answer <- .C("pnorm2",
        ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
        size = as.integer(LLL), singler = as.integer(singler),
-       ans = as.double(ans))$ans
+       ans = as.double(ans), PACKAGE = "VGAM")$ans
   if (any(answer < 0.0))
     warning("some negative values returned")
   answer
@@ -1104,6 +1184,11 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
 
 
 
+
+
+
+
+
 my.dbinom <- function(x,
                       size = stop("no 'size' argument"),
                       prob = stop("no 'prob' argument")) {
@@ -1222,9 +1307,9 @@ my.dbinom <- function(x,
     }
   }
 
-  okay1 <- is.na(shape1)       & is.infinite(shape2) # rho = 0 and prob == 0
+  okay1 <- is.na(shape1)       & is.infinite(shape2)  # rho = 0 and prob == 0
   okay2 <- is.infinite(shape1) & is.na(shape2)       # rho = 0 and prob == 1
-  okay3 <- is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
+  okay3 <- is.infinite(shape1) & is.infinite(shape2)  # rho = 0 and 0 < prob < 1
 
   if (sum.okay1 <- sum(okay1))
     ans[okay1] <- dbinom(x = x[okay1], size = size[okay1],
@@ -1313,7 +1398,7 @@ my.dbinom <- function(x,
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
   if (length(size)   != use.n) size   <- rep(size,   len = use.n)
   if (length(shape1) != use.n) shape1 <- rep(shape1, len = use.n)
@@ -1326,9 +1411,9 @@ my.dbinom <- function(x,
                          prob = rbeta(n = smalln, shape1 = shape1[okay0],
                                                   shape2 = shape2[okay0]))
 
-  okay1 <- is.na(shape1)       & is.infinite(shape2) # rho = 0 and prob == 0
+  okay1 <- is.na(shape1)       & is.infinite(shape2)  # rho = 0 and prob == 0
   okay2 <- is.infinite(shape1) & is.na(shape2)       # rho = 0 and prob == 1
-  okay3 <- is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
+  okay3 <- is.infinite(shape1) & is.infinite(shape2)  # rho = 0 and 0 < prob < 1
 
   if (sum.okay1 <- sum(okay1))
     ans[okay1] <- rbinom(n = sum.okay1, size = size[okay1],
@@ -1425,7 +1510,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
 
   if (!is.Numeric(i1, positive = TRUE))
     stop("bad input for argument 'i1'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1, 2 or 3")
@@ -1434,7 +1519,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'i2'")
 
   if (!is.null(nsimEIM)) {
-    if (!is.Numeric(nsimEIM, allowable.length = 1,
+    if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 10)
@@ -1451,53 +1536,53 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
             "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
                        "where rho = 1 / (shape1+shape2+1)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
-      if (!all(w == 1))
-        extra$orig.w <- w
+    if (!all(w == 1))
+      extra$orig.w <- w
 
-      if (is.null( .nsimEIM)) {
-         save.weight <- control$save.weight <- FALSE
-      }
+    if (is.null( .nsimEIM)) {
+      save.weight <- control$save.weight <- FALSE
+    }
 
-      mustart.orig <- mustart
-      eval(binomialff()@initialize)   # Note: n,w,y,mustart is changed 
-      if (length(mustart.orig))
-        mustart <- mustart.orig  # Retain it if inputted
-      predictors.names <-
-           c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
-             namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
-
-      if (!length(etastart)) {
-
-        mustart.use <- if (length(mustart.orig)) mustart.orig else
-                      mustart
-
-        shape1 <- rep( .i1 , len = n)
-        shape2 <- if (length( .i2 )) {
-                    rep( .i2 , len = n)
-                  } else if (length(mustart.orig)) {
-                    shape1 * (1 / mustart.use - 1)
-                  } else if ( .imethod == 1) {
-                    shape1 * (1 / weighted.mean(y, w)  - 1)
-                  } else if ( .imethod == 2) {
-                    temp777 <- .sinit * weighted.mean(y, w) +
-                              (1 - .sinit) * y
-                    shape1 * (1 / temp777 - 1)
-                  } else {
-                        shape1 * (1 / weighted.mean(mustart.use, w) - 1)
-                  }
-        ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-                    y * w # Convert proportions to counts
-        if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
-           warning("the response (as counts) does not appear to ",
-                   "be integer-valued. Am rounding to integer values.")
-        ycounts <- round(ycounts) # Make sure it is an integer
-        etastart <- cbind(theta2eta(shape1, .lshape12, earg = .earg),
-                          theta2eta(shape2, .lshape12, earg = .earg))
-        mustart <- NULL  # Since etastart has been computed.
-      }
+    mustart.orig <- mustart
+    eval(binomialff()@initialize)   # Note: n,w,y,mustart is changed 
+    if (length(mustart.orig))
+      mustart <- mustart.orig  # Retain it if inputted
+    predictors.names <-
+         c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
+           namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
+
+    if (!length(etastart)) {
+
+      mustart.use <- if (length(mustart.orig)) mustart.orig else
+                    mustart
+
+      shape1 <- rep( .i1 , len = n)
+      shape2 <- if (length( .i2 )) {
+                  rep( .i2 , len = n)
+                } else if (length(mustart.orig)) {
+                  shape1 * (1 / mustart.use - 1)
+                } else if ( .imethod == 1) {
+                  shape1 * (1 / weighted.mean(y, w)  - 1)
+                } else if ( .imethod == 2) {
+                  temp777 <- .sinit * weighted.mean(y, w) +
+                            (1 - .sinit) * y
+                  shape1 * (1 / temp777 - 1)
+                } else {
+                  shape1 * (1 / weighted.mean(mustart.use, w) - 1)
+                }
+      ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+                 y * w # Convert proportions to counts
+      if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
+         warning("the response (as counts) does not appear to ",
+                 "be integer-valued. Am rounding to integer values.")
+      ycounts <- round(ycounts)  # Make sure it is an integer
+      etastart <- cbind(theta2eta(shape1, .lshape12, earg = .earg),
+                        theta2eta(shape2, .lshape12, earg = .earg))
+      mustart <- NULL  # Since etastart has been computed.
+    }
   }), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
             .nsimEIM = nsimEIM,
             .imethod = imethod, .sinit = shrinkage.init ))),
@@ -1513,6 +1598,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
 
     shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
     shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+
     misc$rho <- 1 / (shape1 + shape2 + 1)
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
@@ -1596,7 +1682,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
                       digamma(shape1+shape2+nvec) -
                       digamma(shape2) + digamma(shape1+shape2)
         rm(ysim)
-        temp3 <- cbind(dl.dshape1, dl.dshape2) # n x M matrix
+        temp3 <- cbind(dl.dshape1, dl.dshape2)  # n x M matrix
         run.varcov <- ((ii-1) * run.varcov +
                      temp3[, ind1$row.index]*
                      temp3[, ind1$col.index]) / ii
@@ -1631,9 +1717,9 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
   if (!is.Numeric(ishape, positive = TRUE))
     stop("bad input for argument 'ishape'")
   if (!is.Numeric(moreSummation, positive = TRUE,
-                  allowable.length = 2, integer.valued = TRUE))
+                  length.arg = 2, integer.valued = TRUE))
     stop("bad input for argument 'moreSummation'")
-  if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(tolerance, positive = TRUE, length.arg = 1) ||
       1.0 - tolerance >= 1.0)
       stop("bad input for argument 'tolerance'")
 
@@ -1646,7 +1732,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
             namesof("prob",  lprob,  earg = eprob), ", ",
             namesof("shape", lshape, earg = eshape)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     eval(geometric()@initialize)
@@ -1655,15 +1741,15 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
          c(namesof("prob",  .lprob,  earg = .eprob,  tag = FALSE),
            namesof("shape", .lshape, earg = .eshape, short = FALSE))
 
-      if (length( .iprob ))
-          prob.init <- rep( .iprob , len = n)
+    if (length( .iprob ))
+      prob.init <- rep( .iprob , len = n)
 
-      if (!length(etastart) ||
-          ncol(cbind(etastart)) != 2) {
-        shape.init <- rep( .ishape , len = n)
-        etastart <-
-          cbind(theta2eta(prob.init,  .lprob,  earg = .eprob),
-                theta2eta(shape.init, .lshape, earg = .eshape))
+    if (!length(etastart) ||
+      ncol(cbind(etastart)) != 2) {
+      shape.init <- rep( .ishape , len = n)
+      etastart <-
+        cbind(theta2eta(prob.init,  .lprob,  earg = .eprob),
+              theta2eta(shape.init, .lshape, earg = .eshape))
       }
   }), list( .iprob = iprob, .ishape = ishape, .lprob = lprob,
             .eprob = eprob, .eshape = eshape,
@@ -1681,7 +1767,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
     misc$earg <- list("prob" = .eprob , "shape" = .eshape )
 
     if (intercept.only) {
-      misc$shape1 <- shape1[1] # These quantities computed in @deriv
+      misc$shape1 <- shape1[1]  # These quantities computed in @deriv
       misc$shape2 <- shape2[1]
     }
     misc$expected <- TRUE
@@ -1702,10 +1788,11 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
       stop("loglikelihood residuals not implemented yet") else {
     for (ii in 1:maxy) {
       index <- (ii <= y)
-      ans[index] <- ans[index] + log1p(-prob[index]+(ii-1) *
-                    shape[index]) - log1p((ii-1)*shape[index])
+      ans[index] <- ans[index] +
+                    log1p(-prob[index] + (ii-1) * shape[index]) -
+                    log1p((ii-1) * shape[index])
     }
-    ans <- ans - log1p((y+1-1)*shape)
+    ans <- ans - log1p((y+1-1) * shape)
 
 
 
@@ -1716,23 +1803,23 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
            .eprob = eprob, .eshape = eshape ))),
   vfamily = c("betageometric"),
   deriv = eval(substitute(expression({
-    prob  <- eta2theta(eta[, 1], .lprob, earg = .eprob)
+    prob  <- eta2theta(eta[, 1], .lprob,  earg = .eprob)
     shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
     shape1 <- prob / shape; shape2 <- (1 - prob) / shape;
-    dprob.deta  <- dtheta.deta(prob,  .lprob, earg = .eprob)
+    dprob.deta  <- dtheta.deta(prob,  .lprob,  earg = .eprob)
     dshape.deta <- dtheta.deta(shape, .lshape, earg = .eshape)
     dl.dprob <- 1 / prob
     dl.dshape <- 0 * y
     maxy <- max(y)
     for (ii in 1:maxy) {
-        index <- (ii <= y)
-        dl.dprob[index] <- dl.dprob[index] -
-                           1/(1-prob[index]+(ii-1)*shape[index])
-        dl.dshape[index] <- dl.dshape[index] +
-                           (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
-                           (ii-1)/(1+(ii-1)*shape[index])
+      index <- (ii <= y)
+      dl.dprob[index] <- dl.dprob[index] -
+                         1/(1-prob[index]+(ii-1) * shape[index])
+      dl.dshape[index] <- dl.dshape[index] +
+                         (ii-1)/(1-prob[index]+(ii-1) * shape[index]) -
+                         (ii-1)/(1+(ii-1) * shape[index])
     }
-    dl.dshape <- dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
+    dl.dshape <- dl.dshape - (y+1 -1)/(1+(y+1 -1) * shape)
     c(w) * cbind(dl.dprob * dprob.deta,
                  dl.dshape * dshape.deta)
   }), list( .lprob = lprob, .lshape = lshape,
@@ -1751,7 +1838,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
       wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] - (ii-2) * temp7 / denom1
       wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + (ii-2)^2 * temp7 / denom1 -
                         (ii-1)^2 * temp7 / denom2
-      if (max(temp7) < .tolerance ) break;
+      if (max(temp7) < .tolerance ) break
     }
     ii <- 2
     temp7 <- 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
@@ -1775,8 +1862,10 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
 
 seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
                          iprob1 = NULL,    iprob2 = NULL,
-                         parallel = FALSE, apply.parint = TRUE,
+                         parallel = FALSE,  # apply.parint = TRUE,
                          zero = NULL) {
+  apply.parint <- TRUE
+
   lprob1 <- as.list(substitute(lprob1))
   eprob1 <- link2list(lprob1)
   lprob1 <- attr(eprob1, "function.name")
@@ -1803,9 +1892,11 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
             namesof("prob1", lprob1, earg = eprob1), ", ",
             namesof("prob2", lprob2, earg = eprob2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints,
                            apply.int = .apply.parint )
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel,
             .apply.parint = apply.parint,
             .zero = zero ))),
@@ -1953,10 +2044,10 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
 
 
 
-  if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
       tol > 0.1)
       stop("bad input for argument 'tol'") 
-  if (!is.Numeric(addRidge, allowable.length = 1, positive = TRUE) ||
+  if (!is.Numeric(addRidge, length.arg = 1, positive = TRUE) ||
       addRidge > 0.5)
     stop("bad input for argument 'addRidge'") 
 
@@ -1973,7 +2064,7 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
             namesof("phi12",  lphi12,  earg = ephi12), ", ",
             namesof("oratio", loratio, earg = eoratio)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
     }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     eval(process.binomial2.data.vgam)
@@ -2161,8 +2252,8 @@ if (FALSE)
                    mu[, 1] / (mu[, 1] + mu[, 2])
       init.pee2 <- if (length( .iprob2 )) rep( .iprob2 , len = n) else
                    mu[, 3] / (mu[, 3] + mu[, 4])
-      init.rhopos <- pmax(1.1, init.pee1 / init.pee2) # Should be > 1
-      init.rhoneg <- pmin(0.4, (1 - init.pee1) / (1 - init.pee2)) # c. 0
+      init.rhopos <- pmax(1.1, init.pee1 / init.pee2)  # Should be > 1
+      init.rhoneg <- pmin(0.4, (1 - init.pee1) / (1 - init.pee2))  # c. 0
 
       if (length( .irhopos)) init.rhopos <- rep( .irhopos , len = n)
       if (length( .irhoneg)) init.rhoneg <- rep( .irhoneg , len = n)
@@ -2289,13 +2380,14 @@ if (FALSE)
 
 
 
+
  binom2.Rho <- function(rho = 0, imu1 = NULL, imu2 = NULL, 
                         exchangeable = FALSE, nsimEIM = NULL) {
   lmu12 <- "probit"
   emu12 <- list()
 
   if (is.Numeric(nsimEIM)) {
-    if (!is.Numeric(nsimEIM, allowable.length = 1,
+    if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 100)
@@ -2308,8 +2400,9 @@ if (FALSE)
             namesof("mu1", lmu12, earg = emu12), ", ",
             namesof("mu2", lmu12, earg = emu12)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1), 2, 1), x,
-                           .exchangeable , constraints,
+    constraints <- cm.vgam(matrix(c(1, 1), 2, 1), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
                            apply.int = TRUE)
   }), list( .exchangeable = exchangeable ))),
   deviance = Deviance.categorical.data.vgam,
@@ -2339,7 +2432,7 @@ if (FALSE)
     pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
                      eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
     rhovec <- rep( .rho , len = nrow(eta))
-    p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+    p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     p01 <- pmin(pmargin[, 2] - p11, pmargin[, 2])
     p10 <- pmin(pmargin[, 1] - p11, pmargin[, 1])
     p00 <- 1 - p01 - p10 - p11
@@ -2382,7 +2475,7 @@ if (FALSE)
     pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
                      eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
     rhovec <- rep( .rho , len = nrow(eta))
-    p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+    p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     p01 <- pmargin[, 2]-p11
     p10 <- pmargin[, 1]-p11
     p00 <- 1-p01-p10-p11
@@ -2401,7 +2494,7 @@ if (FALSE)
     p10[p10 < smallno] <- smallno
     p11[p11 < smallno] <- smallno
 
-    dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+    dprob00 <- dibinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     dl.dprob1 <- PhiB*(y[, 4]/p11-y[, 2]/p01) +
                 onemPhiB*(y[, 3]/p10-y[, 1]/p00)
     dl.dprob2 <- PhiA*(y[, 4]/p11-y[, 3]/p10) +
@@ -2479,7 +2572,7 @@ if (FALSE)
     emu12 <- emu  # list()
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -2492,10 +2585,11 @@ if (FALSE)
             namesof("mu2", lmu12, earg = emu12), ", ",
             namesof("rho", l.rho, earg = e.rho)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
-                           .exchangeable , constraints,
+    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -2595,7 +2689,7 @@ if (FALSE)
           eta2 <- qnorm(init.mu2)
 
           smallno <- 1000 * .Machine$double.eps
-          p11 <- pmax(smallno, pnorm2(eta1, eta2, cov12 = rhoval))
+          p11 <- pmax(smallno, pbinorm(eta1, eta2, cov12 = rhoval))
           p10 <- pmax(smallno, pnorm( eta1) - p11)
           p0  <- pmax(smallno, pnorm(-eta1))
 
@@ -2646,7 +2740,7 @@ if (FALSE)
     rhovec <- eta2theta(eta[, 3], .l.rho , earg = .e.rho )
 
     smallno <- 1000 * .Machine$double.eps
-    p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+    p11 <- pmax(smallno, pbinorm(eta[, 1], eta[, 2], cov12 = rhovec))
     p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
     p0  <- pmax(smallno, pnorm(-eta[, 1]))
     sumprob <- p11 + p10 + p0
@@ -2680,7 +2774,7 @@ if (FALSE)
 
       smallno <- 1000 * .Machine$double.eps
       rhovec <- eta2theta(eta[, 3], .l.rho , earg = .e.rho )
-      p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+      p11 <- pmax(smallno, pbinorm(eta[, 1], eta[, 2], cov12 = rhovec))
       p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
       p0  <- pmax(smallno, pnorm(-eta[, 1]))
     sumprob <- p11 + p10 + p0
@@ -2705,7 +2799,7 @@ if (FALSE)
 
 
     smallno <- 1000 * .Machine$double.eps
-    p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+    p11 <- pmax(smallno, pbinorm(eta[, 1], eta[, 2], cov12 = rhovec))
     p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
     p0  <- pmax(smallno, pnorm(-eta[, 1]))
     sumprob <- p11 + p10 + p0
@@ -2729,7 +2823,7 @@ if (FALSE)
   mycode <- TRUE   # zz
 
  if (mycode) {
-    dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+    dprob00 <- dibinorm(eta[, 1], eta[, 2], cov12 = rhovec)
     dl.dprob1 <-     PhiA *      ycounts[, 1] *      ycounts[, 2]  / p11 +
                  onemPhiA *      ycounts[, 1] * (1 - ycounts[, 2]) / p10 -
                             (1 - ycounts[, 1]) / p0
@@ -2746,9 +2840,9 @@ if (FALSE)
                               dl.dprob2 * dprob2.deta,
                               dl.drho   * drho...deta)
  } # else {
-    eta1 <- eta[, 1] # dat1 %*% params[1:X1.d2]
-    eta2 <- eta[, 2] # dat2 %*% params[(X1.d2 + 1):(X1.d2 + X2.d2)]
-    corr.st <- eta[, 3] # params[(X1.d2 + X2.d2 + 1)]
+    eta1 <- eta[, 1]  # dat1 %*% params[1:X1.d2]
+    eta2 <- eta[, 2]  # dat2 %*% params[(X1.d2 + 1):(X1.d2 + X2.d2)]
+    corr.st <- eta[, 3]  # params[(X1.d2 + X2.d2 + 1)]
     corr <- rhovec # tanh(corr.st)
 
     dat <- ycounts
@@ -2761,12 +2855,12 @@ if (FALSE)
     A <- pnorm((eta2 - corr * eta1) * d.r)
     A.c <- 1 - A
     B <- pnorm((eta1 - corr * eta2) * d.r)
-    p11 <- pmax(pnorm2(eta1, eta2, cov12 = corr), 1000 * .Machine$double.eps)
+    p11 <- pmax(pbinorm(eta1, eta2, cov12 = corr), 1000 * .Machine$double.eps)
     p10 <- pmax(pnorm( eta1) - p11, 1000 * .Machine$double.eps)
     p0  <- pmax(pnorm(-eta1), 1000 * .Machine$double.eps)
     d.n1 <- dnorm(eta1)
     d.n2 <- dnorm(eta2)
-    d.n1n2 <- dnorm2(eta1, eta2, rho = corr)
+    d.n1n2 <- dibinorm(eta1, eta2, cov12 = corr)
     drh.drh.st <- 4 * exp(2 * corr.st)/(exp(2 * corr.st) + 1)^2
 
     dl.dbe1 <- d.n1 * (y1.y2/p11 * A + y1.cy2/p10 * A.c - cy1/p0)
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 6d0ea6b..68fc84f 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -13,6 +13,921 @@
 
 
 
+dbiclaytoncop <- function(x1, x2, alpha = 0, log = FALSE){
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  A <- x1^(-alpha) + x2^(-alpha) - 1
+  logdensity <- log1p(alpha) -
+                (1 + alpha) * (log(x1) + log(x2)) - 
+                (2 + 1 / alpha) * log(abs(A))  # Avoid warning
+
+  out.square <- (x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1)
+  logdensity[out.square] <- log(0.0)
+
+
+  index0 <- (rep(alpha, length = length(A)) < sqrt(.Machine$double.eps))
+  if (any(index0))
+    logdensity[index0] <- log(1.0)
+
+
+  index1 <- (rep(alpha, length = length(A)) < 0.0) | (A < 0.0)
+  if (any(index1))
+    logdensity[index1] <- NaN
+
+
+
+
+
+
+
+
+  if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+rbiclaytoncop <- function(n, alpha = 0) {
+  if (any(alpha < 0))
+    stop("argument 'alpha' must be greater or equal to 0")
+
+  u1 <- runif(n = n)
+  v2 <- runif(n = n)
+
+  u2 <- (u1^(-alpha) *
+        (v2^(-alpha / (1 + alpha)) - 1) + 1)^(-1 / alpha)
+
+
+  index0 <- (rep(alpha, length = length(u1)) < sqrt(.Machine$double.eps))
+  if (any(index0))
+    u2[index0] <- runif(sum(index0))
+
+  cbind(u1, u2)
+}
+
+
+
+ biclaytoncop <- function(lalpha    = "loge",
+                          ialpha    = NULL,
+                          imethod   = 1,
+                          parallel  = FALSE,
+                          zero = NULL) {
+  
+  apply.parint <- TRUE
+
+
+  lalpha <- as.list(substitute(lalpha))
+  ealpha <- link2list(lalpha)
+  lalpha <- attr(ealpha, "function.name")
+
+
+  if (length(ialpha) && any(ialpha <= 0))
+    stop("argument 'ialpha' must have values in (0, Inf)")
+
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) || imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
+  new("vglmff",
+  blurb = c(" bivariate clayton copula distribution)\n","Links:    ",
+                namesof("alpha", lalpha, earg = ealpha)),
+
+  constraints = eval(substitute(expression({
+        constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                               bool = .parallel , 
+                               constraints = constraints,
+                               apply.int = .apply.parint )
+
+        dotzero <- .zero
+        Musual <- 1
+        Yusual <- 2
+        eval(negzero.expression)
+  }), list( .zero = zero,
+            .apply.parint = apply.parint,
+            .parallel = parallel ))),
+
+  infos = eval(substitute(function(...) {
+        list(Musual = 1,
+             Yusual = 2,
+             apply.parint = .apply.parint,
+             parallel = .parallel,
+             zero = .zero )
+      }, list( .zero = zero,
+               .apply.parint = apply.parint, 
+               .parallel = parallel ))),
+
+  initialize = eval(substitute(expression({
+    Musual <- 1
+    Yusual <- 2
+
+    temp5 <-
+      w.y.check(w = w, y = y,
+                Is.positive.y = TRUE,
+                ncol.w.max = Inf,
+                ncol.y.max = Inf,
+                ncol.y.min = Yusual,
+                out.wy = TRUE,
+                colsyperw = Yusual,
+                maximize = TRUE)
+
+    w <- temp5$w
+    y <- temp5$y
+
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    extra$Musual <- Musual
+    extra$Yusual <- Yusual
+    M <- Musual * (ncoly / Yusual)
+    mynames1 <- paste("alpha", if (M / Musual > 1) 1:(M / Musual) else "",
+                      sep = "")
+    predictors.names <- c(
+      namesof(mynames1, .lalpha , earg = .ealpha , short = TRUE))
+
+
+    extra$dimnamesy1 <- dimnames(y)[[1]]
+    if (length(dimnames(y)))
+      extra$dimnamesy2 <- dimnames(y)[[2]]
+    
+    if (!length(etastart)) {
+      
+      alpha.init <- matrix(if (length( .ialpha )) .ialpha else 0 + NA,
+                           n, M / Musual, byrow = TRUE)
+
+      if (!length( .ialpha ))
+        for (spp. in 1:(M / Musual)) {
+          ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
+
+              
+              
+          alpha.init0 <- if ( .imethod == 1) {
+            k.tau <- kendall.tau(ymatj[, 1], ymatj[, 2], exact = FALSE,
+                                 max.n = 500)
+
+            max(0.1, 2 * k.tau / (1 - k.tau))  # Must be positive
+          } else if ( .imethod == 2) {
+            spearman.rho <-  max(0.05, cor(ymatj[, 1],
+                                           ymatj[, 2], meth = "spearman"))
+            rhobit(spearman.rho)
+          } else {
+            pearson.rho <- max(0.05, cor(ymatj[, 1], ymatj[, 2]))
+            rhobit(pearson.rho)
+          }
+
+
+
+
+          if (any(is.na(alpha.init[, spp.])))
+            alpha.init[, spp.] <- alpha.init0
+        }
+          
+      etastart <- theta2eta(alpha.init, .lalpha , earg = .ealpha )
+    }
+  }), list( .imethod = imethod,
+                .lalpha = lalpha,
+                .ealpha = ealpha,
+                .ialpha = ialpha ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+
+        eta <- as.matrix(eta)
+        fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
+        
+        
+        if (length(extra$dimnamesy2))
+          dimnames(fv.matrix) <- list(extra$dimnamesy1,
+                                      extra$dimnamesy2)
+        fv.matrix
+      }  , list( .lalpha = lalpha,
+                 .ealpha = ealpha ))),
+
+  last = eval(substitute(expression({
+        
+    Musual <- extra$Musual
+    Yusual <- extra$Yusual
+    misc$link <- rep( .lalpha , length = M)
+    temp.names <- mynames1
+    names(misc$link) <- temp.names
+    
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:M) {
+      misc$earg[[ii]] <- .ealpha
+    }
+
+    misc$Musual <- Musual
+    misc$Yusual <- Yusual
+    misc$imethod <- .imethod
+    misc$expected <- TRUE
+    misc$parallel  <- .parallel
+    misc$apply.parint <- .apply.parint
+    misc$multipleResponses <- TRUE
+
+  }) , list( .imethod = imethod,
+             .parallel = parallel, .apply.parint = apply.parint,
+             .lalpha = lalpha,
+             .ealpha = ealpha ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+      Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+
+      if (residuals) stop("loglikelihood residuals not ", 
+                          "implemented yet") else {
+
+      sum(c(w) * dbiclaytoncop(x1  = c(y[, c(TRUE, FALSE)]),
+                               x2  = c(y[, c(FALSE, TRUE)]),
+                               alpha = c(Alpha), log = TRUE))
+      }
+    } , list( .lalpha = lalpha,
+              .ealpha = ealpha,
+              .imethod = imethod ))),
+  vfamily = c("biclaytoncop"),
+  deriv = eval(substitute(expression({
+    Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+    Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
+    Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+
+
+
+
+    
+    AA <- y[, Yindex1]^(-Alpha) + y[, Yindex2]^(-Alpha) - 1
+    dAA.dalpha <- -y[, Yindex1]^(-Alpha) * log(y[, Yindex1]) -
+                   y[, Yindex2]^(-Alpha) * log(y[, Yindex2])
+    dl.dalpha <- 1 / (1 + Alpha) - log(y[, Yindex1] * y[, Yindex2]) -
+                 dAA.dalpha / AA * (2 + 1 / Alpha ) + log(AA) / Alpha^2
+   
+
+
+    dalpha.deta <- dtheta.deta(Alpha, .lalpha , earg = .ealpha )
+
+    dl.deta <- c(w) * cbind(dl.dalpha) * dalpha.deta
+    dl.deta
+  }), list( .lalpha = lalpha,
+            .ealpha = ealpha,
+            .imethod = imethod ))),
+
+  weight = eval(substitute(expression({
+
+
+    par <- Alpha +1 #20130808
+    denom1 <- (3 * par -2) * (2 * par - 1)
+    denom2 <- 2 * (par - 1)
+    v1 <- trigamma(1 / (denom2))
+    v2 <- trigamma(par / (denom2))
+    v3 <- trigamma((2 * par - 1) / (denom2))
+    Rho. <- 1 / denom1 * (1 + par / (denom2) * (v1 - v2) +
+            1 / (denom2) * (v2 - v3))
+    
+    out <- 1 / par^2 + 2 / (par * (par - 1) * (2 * par - 1)) +
+           4 * par / (3 * par - 2) - 2 * (2 * par - 1) * Rho. / (par - 1)
+    ned2l.dalpha  <- out
+
+
+    wz <- ned2l.dalpha * dalpha.deta^2
+ c(w) * wz
+  }), list( .lalpha = lalpha,
+            .ealpha = ealpha,
+            .imethod = imethod ))))
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+dbistudentt <- function(x1, x2, df, rho = 0, log = FALSE) {
+
+
+
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  logdensity <-
+    -(df/2 + 1) * log1p(
+    (x1^2 + x2^2 - 2 * rho * x1 * x2) / (df * (1 - rho^2))) -
+    log(2 * pi) - 0.5 * log1p(-rho^2)  # -
+
+  logdensity[df <= 0] <- NaN  # Not picked up by dt().
+
+  if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+
+if (FALSE)
+bistudent.deriv.dof <-  function(u, v, nu, rho) {
+
+  
+  t1 <- qt(u, nu, 1, 0)
+  t2 <- qt(v, nu, 1, 0)
+  t3 <- -(nu + 2.0) / 2.0
+  t10 <- nu * (1.0 - rho * rho)
+  t4 <- -2.0 * t1 * t2 / t10
+  t11 <- (t1 * t1 + t2 * t2 - 2.0 * rho * t1 * t2)
+  t5 <- 2.0 * t11 * rho / t10 / (1.0 - rho * rho)
+  t6 <- 1.0 + (t11 / t10)
+  t7 <- rho / (1.0 - rho * rho)
+  out <- (t3 * (t4 + t5) / t6  +  t7)
+}
+
+
+
+
+
+
+
+ bistudentt <-
+   function(ldf     = "loglog",
+            lrho    = "rhobit",
+            idf     = NULL,
+            irho    = NULL,
+            imethod = 1,
+            parallel = FALSE,
+            zero = -1) {
+
+
+
+
+  apply.parint <- TRUE
+
+  ldof <- as.list(substitute(ldf))
+  edof <- link2list(ldof)
+  ldof <- attr(edof, "function.name")
+
+  lrho <- as.list(substitute(lrho))
+  erho <- link2list(lrho)
+  lrho <- attr(erho, "function.name")
+
+
+  idof <- idf
+  if (length(idof) &&
+      any(idof <= 1))
+    stop("argument 'idf' must have values in (1,Inf)")
+
+
+  if (length(irho) &&
+      any(abs(irho) >= 1))
+    stop("argument 'irho' must have values in (-1,1)")
+
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
+
+  new("vglmff",
+  blurb = c("Bivariate student-t distribution\n",
+            "Links:    ",
+            namesof("df",  ldof, earg = edof), ", ",
+            namesof("rho", lrho, earg = erho)),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
+                           apply.int = .apply.parint )
+
+    dotzero <- .zero
+    Musual <- 2
+    Yusual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero,
+            .apply.parint = apply.parint,
+            .parallel = parallel ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         Yusual = 2,
+         apply.parint = .apply.parint ,
+         parallel = .parallel ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .apply.parint = apply.parint, 
+           .parallel = parallel ))),
+
+  initialize = eval(substitute(expression({
+    Musual <- 2
+    Yusual <- 2
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              ncol.y.min = Yusual,
+              out.wy = TRUE,
+              colsyperw = Yusual,
+              maximize = TRUE)
+
+    w <- temp5$w
+    y <- temp5$y
+
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    extra$Musual <- Musual
+    extra$Yusual <- Yusual
+    M <- Musual * (ncoly / Yusual)
+    mynames1 <- paste("df",  if (M / Musual > 1) 1:(M / Musual) else "",
+                      sep = "")
+    mynames2 <- paste("rho", if (M / Musual > 1) 1:(M / Musual) else "",
+                      sep = "")
+    predictors.names <- c(
+      namesof(mynames1, .ldof , earg = .edof , short = TRUE),
+      namesof(mynames2, .lrho , earg = .erho , short = TRUE))[
+              interleave.VGAM(M, M = Musual)]
+
+
+    extra$dimnamesy1 <- dimnames(y)[[1]]
+    if (length(dimnames(y)))
+      extra$dimnamesy2 <- dimnames(y)[[2]]
+
+    if (!length(etastart)) {
+
+      dof.init <- matrix(if (length( .idof )) .idof else 0 + NA,
+                         n, M / Musual, byrow = TRUE)
+      rho.init <- matrix(if (length( .irho )) .irho else 0 + NA,
+                         n, M / Musual, byrow = TRUE)
+
+      if (!length( .idof ) || !length( .irho ))
+      for (spp. in 1:(M / Musual)) {
+        ymatj <- y[, (Musual * spp. - 1):(Musual * spp.)]
+
+
+        dof.init0 <- if ( .imethod == 1) {
+
+
+          2 + rexp(n = 1, rate = 0.1)
+        } else {
+          10
+        }
+
+        if (any(is.na(dof.init[, spp.])))
+          dof.init[, spp.] <- dof.init0
+
+
+        rho.init0 <- if ( .imethod == 2) {
+          runif(n, min = -1 + 0.1, max = 1 - 0.1)
+        } else {
+          cor(ymatj[, 1], ymatj[, 2])
+        }
+
+        if (any(is.na(rho.init[, spp.])))
+          rho.init[, spp.] <- rho.init0
+
+      }
+
+      etastart <-
+        cbind(theta2eta(dof.init, .ldof , earg = .edof ),
+              theta2eta(rho.init, .lrho , earg = .erho ))
+
+      etastart <- etastart[, interleave.VGAM(M, M = Musual)]
+
+    }
+  }), list( .imethod = imethod,
+            .lrho = lrho, .ldof = ldof,
+            .erho = erho, .edof = edof,
+            .idof = idof, .irho = irho ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+
+    eta <- as.matrix(eta)
+    fv.matrix <- matrix(0.0, nrow(eta), extra$ncoly)
+
+
+    if (length(extra$dimnamesy2))
+      dimnames(fv.matrix) <- list(extra$dimnamesy1,
+                                  extra$dimnamesy2)
+    fv.matrix
+  }  , list( .lrho = lrho, .ldof = ldof,
+             .erho = erho, .edof = edof ))),
+
+  last = eval(substitute(expression({
+
+    Musual <- extra$Musual
+    Yusual <- extra$Yusual
+    misc$link <-
+      c(rep( .ldof , length = M / Musual),
+        rep( .lrho , length = M / Musual))[
+                       interleave.VGAM(M, M = Musual)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:(M / Musual)) {
+      misc$earg[[Musual*ii-1]] <- .edof
+      misc$earg[[Musual*ii  ]] <- .erho
+    }
+
+    misc$Musual <- Musual
+    misc$Yusual <- Yusual
+    misc$imethod <- .imethod
+    misc$expected <- TRUE
+    misc$parallel  <- .parallel
+    misc$apply.parint <- .apply.parint
+    misc$multipleResponses <- TRUE
+
+  }) , list( .imethod = imethod,
+             .parallel = parallel,
+             .apply.parint = apply.parint,
+             .lrho = lrho, .ldof = ldof,
+             .erho = erho, .edof = edof ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                     .ldof , earg = .edof )
+    Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                     .lrho , earg = .erho )
+
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
+      Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+      sum(c(w) * dbistudentt(x1  = y[, Yindex1, drop = FALSE],
+                             x2  = y[, Yindex2, drop = FALSE],
+                             df  = Dof,
+                             rho = Rho, log = TRUE))
+    }
+  } , list( .lrho = lrho, .ldof = ldof,
+            .erho = erho, .edof = edof,
+            .imethod = imethod ))),
+  vfamily = c("bistudentt"),
+  deriv = eval(substitute(expression({
+    Musual <- Yusual <- 2
+    Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                     .ldof , earg = .edof )
+    Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                     .lrho , earg = .erho )
+    Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
+    Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+
+
+    x1 <- c(y[, Yindex1])  # Convert into a vector
+    x2 <- c(y[, Yindex2])
+
+    dee3 <- deriv3( ~
+        -(Dof/2 + 1) * log(1 +
+        (x1^2 + x2^2 - 2 * Rho * x1 * x2) / (Dof * (1 - Rho^2))) -
+        log(2 * pi) - 0.5 * log(1 - Rho^2),
+        namevec = c("Dof", "Rho"), hessian = FALSE)
+    eval.d3 <- eval(dee3)
+
+    dl.dthetas <-  attr(eval.d3, "gradient")
+   
+    dl.ddof <- matrix(dl.dthetas[, "Dof"], n, length(Yindex1))
+    dl.drho <- matrix(dl.dthetas[, "Rho"], n, length(Yindex2))
+
+  
+  if (FALSE) {
+    dd <- cbind(y, Rho, Dof)
+    pp <- apply(dd, 1, function(x)
+                BiCopPDF(x[1], x[2], family = 2, x[3], x[4]))
+    alt.dl.ddof <- apply(dd, 1, function(x)
+                     BiCopDeriv(x[1], x[2], family = 2,
+                                x[3], x[4], "par2")) / pp
+    alt.dl.drho <- apply(dd, 1, function(x)
+                     BiCopDeriv(x[1], x[2], family = 2,
+                                x[3], x[4], "par")) / pp
+
+ print("head(dl.ddof)")
+ print( head(dl.ddof) )
+ print("head(alt.dl.ddof)")
+ print( head(alt.dl.ddof) )
+
+ print("max(abs(alt.dl.drho - dl.drho))")
+ print( max(abs(alt.dl.drho - dl.drho)) )
+ print("max(abs(alt.dl.ddof - dl.ddof))")
+ print( max(abs(alt.dl.ddof - dl.ddof)) )
+    
+  }
+
+
+
+
+
+    ddof.deta <- dtheta.deta(Dof, .ldof , earg = .edof )
+    drho.deta <- dtheta.deta(Rho, .lrho , earg = .erho )
+
+    ans <- c(w) * cbind(dl.ddof * ddof.deta,
+                        dl.drho * drho.deta)
+    ans <- ans[, interleave.VGAM(M, M = Musual)]
+    ans
+  }), list( .lrho = lrho, .ldof = ldof,
+            .erho = erho, .edof = edof,
+            .imethod = imethod ))),
+
+  weight = eval(substitute(expression({
+    wz11 <- beta(2, Dof / 2) / Dof -
+            beta(3, Dof / 2) * (Dof + 2) / (4 * Dof)
+    wz12 <- -Rho / (2 * (1 - Rho^2)) * (beta(2, Dof / 2) -
+            beta(3, Dof / 2) * (Dof + 2) / 2)
+    wz22 <- (1 + Rho^2) / (1 - Rho^2)^2 +
+            (Dof^2 + 2 * Dof) * Rho^2 *
+             beta(3, Dof / 2) / (4 * (1 - Rho^2)^2)
+    wz22 <- wz22 + (Dof^2 + 2 * Dof) * (2 - 3 * Rho^2 + Rho^6) *   
+            beta(3, Dof / 2) / (16 * (1 - Rho^2)^4)
+    wz22 <- wz22 + (Dof^2 + 2 * Dof) * (1 + Rho^2) *    # Replace - by + ???
+            beta(2, Dof / 2) / (4 * (1 - Rho^2)^2)  # denom == 4 or 2 ???
+    ned2l.ddof2   <- wz11
+    ned2l.ddofrho <- wz12
+    ned2l.drho2   <- wz22
+
+    wz <- array(c(c(w) * ned2l.ddof2 * ddof.deta^2,
+                  c(w) * ned2l.drho2 * drho.deta^2,
+                  c(w) * ned2l.ddofrho * ddof.deta * drho.deta),
+                dim = c(n, M / Musual, 3))
+    wz <- arwz2wz(wz, M = M, Musual = Musual)
+    wz
+  }), list( .lrho = lrho, .ldof = ldof,
+            .erho = erho, .edof = edof,
+            .imethod = imethod ))))
+}
+
+
+
+
+  
+
+
+dbinormcop <- function(x1, x2, rho = 0, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  x1 <- qnorm(x1)
+  x2 <- qnorm(x2)
+
+  logdensity <- (2 * rho * x1 * x2 -
+                 rho^2 * (x1^2 + x2^2)) / (2 * (1 - rho^2)) -
+                0.5 * log1p(-rho^2)
+
+  if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+
+pbinormcop <- function(q1, q2, rho = 0) {
+
+  if (!is.Numeric(q1, positive = TRUE) ||
+      any(q1 >= 1))
+    stop("bad input for argument 'q1'")
+  if (!is.Numeric(q2, positive = TRUE) ||
+      any(q2 >= 1))
+    stop("bad input for argument 'q2'")
+  if (!is.Numeric(rho) ||
+      any(abs(rho) >= 1))
+    stop("bad input for argument 'rho'")
+
+  pnorm2(x1 = qnorm(q1),
+         x2 = qnorm(q2),
+         cov12 = rho)
+}
+
+
+rbinormcop <- function(n, rho = 0) {
+
+  ymat <- rbinorm(n = n, cov12 = rho)
+  cbind(y1 = pnorm(ymat[, 1]),
+        y2 = pnorm(ymat[, 2]))
+}
+
+
+
+
+
+ binormalcop <- function(lrho    = "rhobit",
+                         irho    = NULL,
+                         imethod = 1,
+                         parallel = FALSE,
+                         zero = NULL) {
+
+
+
+  apply.parint <- TRUE
+
+
+  lrho <- as.list(substitute(lrho))
+  erho <- link2list(lrho)
+  lrho <- attr(erho, "function.name")
+
+
+  if (length(irho) &&
+      any(abs(irho) >= 1))
+    stop("argument 'irho' must have values in (-1,1)")
+
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
+  new("vglmff",
+  blurb = c("Gaussian copula (based on the bivariate normal distribution)\n",
+            "Links:    ",
+            namesof("rho", lrho, earg = erho)),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
+                           apply.int = .apply.parint )
+
+    dotzero <- .zero
+    Musual <- 1
+    Yusual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero,
+            .apply.parint = apply.parint,
+            .parallel = parallel ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 1,
+         Yusual = 2,
+         apply.parint = .apply.parint ,
+         parallel = .parallel ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .apply.parint = apply.parint, 
+           .parallel = parallel ))),
+
+  initialize = eval(substitute(expression({
+    Musual <- 1
+    Yusual <- 2
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              ncol.y.min = Yusual,
+              out.wy = TRUE,
+              colsyperw = Yusual,
+              maximize = TRUE)
+
+    w <- temp5$w
+    y <- temp5$y
+
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    extra$Musual <- Musual
+    extra$Yusual <- Yusual
+    M <- Musual * (ncoly / Yusual)
+    mynames1 <- paste("rho", if (M / Musual > 1) 1:(M / Musual) else "",
+                      sep = "")
+    predictors.names <- c(
+      namesof(mynames1, .lrho , earg = .erho , short = TRUE))
+
+
+    extra$dimnamesy1 <- dimnames(y)[[1]]
+    if (length(dimnames(y)))
+      extra$dimnamesy2 <- dimnames(y)[[2]]
+
+    if (!length(etastart)) {
+
+      rho.init <- matrix(if (length( .irho )) .irho else 0 + NA,
+                         n, M / Musual, byrow = TRUE)
+
+      if (!length( .irho ))
+      for (spp. in 1:(M / Musual)) {
+        ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
+
+
+        rho.init0 <- if ( .imethod == 1) {
+          sin(kendall.tau(ymatj[, 1], ymatj[, 2],
+                          exact = FALSE,
+                          max.n = 200) * pi / 2)
+        } else if ( .imethod == 2) {
+          sin(cor(ymatj[, 1], ymatj[, 2],
+                  method = "spearman") * pi / 6) * 2
+        } else {
+          cor(ymatj[, 1], ymatj[, 2])
+        }
+
+
+
+
+
+        if (any(is.na(rho.init[, spp.])))
+          rho.init[, spp.] <- rho.init0
+      }
+
+      etastart <- theta2eta(rho.init, .lrho , earg = .erho )
+    }
+  }), list( .imethod = imethod,
+            .lrho = lrho,
+            .erho = erho,
+            .irho = irho ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+
+    eta <- as.matrix(eta)
+    fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
+
+
+    if (length(extra$dimnamesy2))
+      dimnames(fv.matrix) <- list(extra$dimnamesy1,
+                                  extra$dimnamesy2)
+    fv.matrix
+  }  , list( .lrho = lrho,
+             .erho = erho ))),
+
+  last = eval(substitute(expression({
+
+    Musual <- extra$Musual
+    Yusual <- extra$Yusual
+    misc$link <- rep( .lrho , length = M)
+    temp.names <- mynames1
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:M) {
+      misc$earg[[ii]] <- .erho
+    }
+
+    misc$Musual <- Musual
+    misc$Yusual <- Yusual
+    misc$imethod <- .imethod
+    misc$expected <- TRUE
+    misc$parallel  <- .parallel
+    misc$apply.parint <- .apply.parint
+    misc$multipleResponses <- TRUE
+
+  }) , list( .imethod = imethod,
+             .parallel = parallel,
+             .apply.parint = apply.parint,
+             .lrho = lrho,
+             .erho = erho ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    Rho <- eta2theta(eta, .lrho , earg = .erho )
+
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
+      Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+      sum(c(w) * dbinormcop(x1  = y[, Yindex1, drop = FALSE],
+                            x2  = y[, Yindex2, drop = FALSE],
+                            rho = Rho, log = TRUE))
+    }
+  } , list( .lrho = lrho,
+            .erho = erho,
+            .imethod = imethod ))),
+  vfamily = c("binormalcop"),
+  deriv = eval(substitute(expression({
+    Rho <- eta2theta(eta, .lrho , earg = .erho )
+    Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
+    Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+
+    temp7 <- 1 - Rho^2
+    q.y <- qnorm(y)
+
+    dl.drho <- ((1 + Rho^2) * q.y[, Yindex1] * q.y[, Yindex2] -
+                Rho * (q.y[, Yindex1]^2 + q.y[, Yindex2]^2)) / temp7^2 +
+                Rho / temp7
+
+    drho.deta <- dtheta.deta(Rho, .lrho , earg = .erho )
+
+    c(w) * cbind(dl.drho) * drho.deta
+  }), list( .lrho = lrho,
+            .erho = erho,
+            .imethod = imethod ))),
+
+  weight = eval(substitute(expression({
+    ned2l.drho  <- (1 + Rho^2) / temp7^2
+    wz <- ned2l.drho * drho.deta^2
+    c(w) * wz
+  }), list( .lrho = lrho,
+            .erho = erho,
+            .imethod = imethod ))))
+}
+
+
+
+
+
+
+
+
 bilogistic4.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
@@ -35,9 +950,9 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2) stop("imethod must be 1 or 2")
+     imethod > 2) stop("argument 'imethod' must be 1 or 2")
 
   new("vglmff",
   blurb = c("Bivariate logistic distribution\n\n",
@@ -49,7 +964,7 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
             "\n", "\n",
             "Means:     location1, location2"),
   constraints = eval(substitute(expression({
-    constraints = cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -85,10 +1000,10 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
         scale.init2 <- const4 * sum(c(w) *(y[, 2] - locat.init2)^2)
       }
       loc1.init <- if (length( .iloc1 ))
-                   rep( .iloc1, length.out = n) else
+                   rep( .iloc1 , length.out = n) else
                    rep(locat.init1, length.out = n)
       loc2.init <- if (length( .iloc2 ))
-                   rep( .iloc2, length.out = n) else
+                   rep( .iloc2 , length.out = n) else
                    rep(locat.init2, length.out = n)
       scale1.init <- if (length( .iscale1 ))
                      rep( .iscale1, length.out = n) else
@@ -140,8 +1055,9 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
 
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else
-    sum(c(w) * (-zedd1 - zedd2 - 3 * log1p(exp(-zedd1)+exp(-zedd2)) -
-             log(Scale1) - log(Scale2)))
+    sum(c(w) * (-zedd1 - zedd2 -
+                3 * log1p(exp(-zedd1) + exp(-zedd2)) -
+                log(Scale1) - log(Scale2)))
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   vfamily = c("bilogistic4"),
@@ -213,14 +1129,14 @@ dbilogis4 <- function(x1, x2, loc1 = 0, scale1 = 1,
     L <- max(length(x1), length(x2),
              length(loc1), length(loc2),
              length(scale1), length(scale2))
-    x1     <- rep(x1,     length.out = L);
-    x2     <- rep(x2,     length.out = L);
-    loc1   <- rep(loc1,   length.out = L);
-    loc2   <- rep(loc2,   length.out = L);
-    scale1 <- rep(scale1, length.out = L);
-    scale2 <- rep(scale2, length.out = L);
-    zedd1 <- (-(x1-loc1)/scale1)
-    zedd2 <- (-(x2-loc2)/scale2)
+    if (length(x1    ) != L) x1     <- rep(x1,     length.out = L)
+    if (length(x2    ) != L) x2     <- rep(x2,     length.out = L)
+    if (length(loc1  ) != L) loc1   <- rep(loc1,   length.out = L)
+    if (length(loc2  ) != L) loc2   <- rep(loc2,   length.out = L)
+    if (length(scale1) != L) scale1 <- rep(scale1, length.out = L)
+    if (length(scale2) != L) scale2 <- rep(scale2, length.out = L)
+    zedd1 <- (-(x1 - loc1) / scale1)
+    zedd2 <- (-(x2 - loc2) / scale2)
     logdensity <- log(2) + log(zedd1) + log(zedd2) - log(scale1) - 
                   log(scale1) - 3 * log1p(exp(zedd1) + exp(zedd2))
     if (log.arg) logdensity else exp(logdensity)
@@ -240,6 +1156,8 @@ pbilogis4 <-
 
 
 rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
+
+
   y1 <- rlogis(n = n, location = loc1, scale = scale1)
   ezedd1 <- exp(-(y1-loc1)/scale1)
   y2 <- loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
@@ -250,6 +1168,7 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
 
+
  freund61 <- function(la  = "loge",
                       lap = "loge",
                       lb  = "loge",
@@ -284,10 +1203,11 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
             namesof("b",  lb,  earg = eb ), ", ",
             namesof("bp", lbp, earg = ebp)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x,
-                           .independent, constraints,
+    constraints <- cm.vgam(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x = x,
+                           bool = .independent ,
+                           constraints = constraints,
                            apply.int = TRUE)
-    constraints = cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list(.independent = independent, .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -412,7 +1332,7 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
     d33 <- (1-py1.lt.y2) / beta^2
     d44 <- py1.lt.y2 / betap^2
 
-    wz <- matrix(0, n, M) # diagonal
+    wz <- matrix(0, n, M)  # diagonal
     wz[, iam(1, 1, M)] <- dalpha.deta^2  * d11
     wz[, iam(2, 2, M)] <- dalphap.deta^2 * d22
     wz[, iam(3, 3, M)] <- dbeta.deta^2   * d33
@@ -430,14 +1350,14 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
 
- bivgamma.mckay <- function(lscale = "loge",
-                            lshape1 = "loge",
-                            lshape2 = "loge",
-                            iscale = NULL,
-                            ishape1 = NULL,
-                            ishape2 = NULL,
-                            imethod = 1,
-                            zero = 1) {
+ bigamma.mckay <- function(lscale = "loge",
+                           lshape1 = "loge",
+                           lshape2 = "loge",
+                           iscale = NULL,
+                           ishape1 = NULL,
+                           ishape2 = NULL,
+                           imethod = 1,
+                           zero = 1) {
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
@@ -453,15 +1373,15 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
   if (!is.null(iscale))
     if (!is.Numeric(iscale, positive = TRUE))
-      stop("'iscale' must be positive or NULL")
+      stop("argument 'iscale' must be positive or NULL")
   if (!is.null(ishape1))
     if (!is.Numeric(ishape1, positive = TRUE))
-      stop("'ishape1' must be positive or NULL")
+      stop("argument 'ishape1' must be positive or NULL")
   if (!is.null(ishape2))
     if (!is.Numeric(ishape2, positive = TRUE))
-      stop("'ishape2' must be positive or NULL")
+      stop("argument 'ishape2' must be positive or NULL")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
@@ -584,7 +1504,7 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
                y[, 2] / a))
   }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
            .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
-  vfamily = c("bivgamma.mckay"),
+  vfamily = c("bigamma.mckay"),
   deriv = eval(substitute(expression({
     aparam <- eta2theta(eta[, 1], .lscale  ,  .escale )
     shape1 <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
@@ -633,41 +1553,43 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
 
-rfrank <- function(n, alpha) {
+rbifrankcop <- function(n, alpha) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
   if (!is.Numeric(alpha, positive = TRUE))
     stop("bad input for argument 'alpha'")
   alpha <- rep(alpha, length.out = use.n)
   U <- runif(use.n)
   V <- runif(use.n)
+
   T <- alpha^U + (alpha - alpha^U) * V
   X <- U
   index <- abs(alpha - 1) < .Machine$double.eps
   Y <- U
   if (any(!index))
-    Y[!index] <- logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
+    Y[!index] <- logb(T[!index] / (T[!index] +
+                      (1 - alpha[!index]) * V[!index]),
                       base = alpha[!index])
   ans <- matrix(c(X, Y), nrow = use.n, ncol = 2)
   if (any(index)) {
-    ans[index, 1] <- runif(sum(index)) # Uniform density for alpha == 1
+    ans[index, 1] <- runif(sum(index))  # Uniform density for alpha == 1
     ans[index, 2] <- runif(sum(index))
   }
   ans
 }
 
 
-pfrank <- function(q1, q2, alpha) {
-  if (!is.Numeric(q1)) stop("bad input for 'q1'")
-  if (!is.Numeric(q2)) stop("bad input for 'q2'")
+pbifrankcop <- function(q1, q2, alpha) {
+  if (!is.Numeric(q1))                     stop("bad input for 'q1'")
+  if (!is.Numeric(q2))                     stop("bad input for 'q2'")
   if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
 
   L <- max(length(q1), length(q2), length(alpha))
-  alpha <- rep(alpha, length.out = L)
-  q1 <- rep(q1, length.out = L)
-  q2 <- rep(q2, length.out = L)
+  if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+  if (length(q1   ) != L) q1    <- rep(q1,    length.out = L)
+  if (length(q2   ) != L) q2    <- rep(q2,    length.out = L)
 
   x <- q1; y <- q2
   index <- (x >= 1 & y <  1) | (y >= 1 & x <  1) |
@@ -676,32 +1598,32 @@ pfrank <- function(q1, q2, alpha) {
   ans <- as.numeric(index)
   if (any(!index))
   ans[!index] <- logb(1 + ((alpha[!index])^(x[!index]) - 1)*
-                ((alpha[!index])^(y[!index]) - 1)/(alpha[!index] - 1), 
-                base=alpha[!index])
+                 ((alpha[!index])^(y[!index]) - 1)/(alpha[!index] - 1), 
+                 base = alpha[!index])
   ind2 <- (abs(alpha - 1) < .Machine$double.eps)
   ans[ind2] <- x[ind2] * y[ind2]
-  ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1] # P(Y2 < q2) = q2
-  ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1] # P(Y1 < q1) = q1
+  ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1]  # P(Y2 < q2) = q2
+  ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1]  # P(Y1 < q1) = q1
   ans[x <= 0 | y <= 0] <- 0
   ans[x >= 1 & y >= 1] <- 1
   ans
 }
 
 
-dfrank <- function(x1, x2, alpha, log = FALSE) {
+dbifrankcop <- function(x1, x2, alpha, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-  if (!is.Numeric(x1)) stop("bad input for 'x1'")
-  if (!is.Numeric(x2)) stop("bad input for 'x2'")
+  if (!is.Numeric(x1))                     stop("bad input for 'x1'")
+  if (!is.Numeric(x2))                     stop("bad input for 'x2'")
   if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
 
   L <- max(length(x1), length(x2), length(alpha))
-  alpha <- rep(alpha, length.out = L)
-  x1 <- rep(x1, length.out = L)
-  x2 <- rep(x2, length.out = L)
+  if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+  if (length(x1   ) != L) x1    <- rep(x1,    length.out = L)
+  if (length(x2   ) != L) x2    <- rep(x2,    length.out = L)
 
   if (log.arg) {
     denom <- alpha-1 + (alpha^x1  - 1) * (alpha^x2  - 1)
@@ -713,7 +1635,8 @@ dfrank <- function(x1, x2, alpha, log = FALSE) {
     ans <- x1
     if (any(!index))
       ans[!index] <- (alpha[!index] - 1) * log(alpha[!index]) *
-          (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
+                     (alpha[!index])^(x1[!index] +
+                                      x2[!index]) / (temp[!index])^2
     ans[x1 <= 0 | x2 <= 0 | x1 >= 1 | x2 >= 1] <- 0
     ans[index] <- 1
     ans
@@ -723,13 +1646,15 @@ dfrank <- function(x1, x2, alpha, log = FALSE) {
 
 
 
-frank.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+bifrankcop.control <- function(save.weight = TRUE, ...) {
+  list(save.weight = save.weight)
 }
 
 
 
- frank <- function(lapar = "loge", iapar = 2, nsimEIM = 250) {
+
+
+ bifrankcop <- function(lapar = "loge", iapar = 2, nsimEIM = 250) {
 
   lapar <- as.list(substitute(lapar))
   eapar <- link2list(lapar)
@@ -737,24 +1662,24 @@ frank.control <- function(save.weight = TRUE, ...) {
 
 
   if (!is.Numeric(iapar, positive = TRUE))
-    stop("'iapar' must be positive")
+    stop("argument 'iapar' must be positive")
 
 
   if (length(nsimEIM) &&
-     (!is.Numeric(nsimEIM, allowable.length = 1,
+     (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50))
-    stop("'nsimEIM' should be an integer greater than 50")
+    stop("argument 'nsimEIM' should be an integer greater than 50")
 
 
   new("vglmff",
-  blurb = c("Frank's bivariate distribution\n",
+  blurb = c("Frank's bivariate copula\n",
             "Links:    ",
             namesof("apar", lapar, earg = eapar )),
   initialize = eval(substitute(expression({
 
     if (any(y <= 0) || any(y >= 1))
-        stop("the response must have values between 0 and 1") 
+      stop("the response must have values between 0 and 1") 
 
     temp5 <-
     w.y.check(w = w, y = y,
@@ -769,20 +1694,19 @@ frank.control <- function(save.weight = TRUE, ...) {
     y <- temp5$y
 
 
-
     predictors.names <-
-      c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
+      c(namesof("apar", .lapar , earg = .eapar, short = TRUE))
 
     if (length(dimnames(y)))
       extra$dimnamesy2 <- dimnames(y)[[2]]
 
     if (!length(etastart)) {
       apar.init <- rep(.iapar, length.out = n)
-      etastart <- cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
+      etastart <- cbind(theta2eta(apar.init, .lapar , earg = .eapar ))
     }
   }), list( .lapar = lapar, .eapar = eapar, .iapar = iapar))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    apar <- eta2theta(eta, .lapar, earg = .eapar )
+    apar <- eta2theta(eta, .lapar , earg = .eapar )
     fv.matrix <- matrix(0.5, length(apar), 2)
     if (length(extra$dimnamesy2))
       dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
@@ -800,17 +1724,17 @@ frank.control <- function(save.weight = TRUE, ...) {
   }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
   loglikelihood = eval(substitute(
         function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    apar <- eta2theta(eta, .lapar, earg = .eapar )
+    apar <- eta2theta(eta, .lapar , earg = .eapar )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-        sum(c(w) * dfrank(x1 = y[, 1], x2 = y[, 2],
-                          alpha = apar, log = TRUE))
+        sum(c(w) * dbifrankcop(x1 = y[, 1], x2 = y[, 2],
+                               alpha = apar, log = TRUE))
     }
   }, list( .lapar = lapar, .eapar = eapar ))),
-  vfamily = c("frank"),
+  vfamily = c("bifrankcop"),
   deriv = eval(substitute(expression({
-    apar <- eta2theta(eta, .lapar, earg = .eapar )
-    dapar.deta <- dtheta.deta(apar, .lapar, earg = .eapar )
+    apar <- eta2theta(eta, .lapar , earg = .eapar )
+    dapar.deta <- dtheta.deta(apar, .lapar , earg = .eapar )
 
     de3 <- deriv3(~ (log((apar - 1) * log(apar)) + (y1+y2)*log(apar) -
                       2 * log(apar-1 + (apar^y1  - 1) * (apar^y2  - 1))),
@@ -832,8 +1756,8 @@ frank.control <- function(save.weight = TRUE, ...) {
 
 
     run.mean <- 0
-    for(ii in 1:( .nsimEIM )) {
-      ysim <- rfrank(n,alpha=apar)
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rbifrankcop(n, alpha = apar)
         y1 <- ysim[, 1]; y2 <- ysim[, 2];
         eval.de3 <- eval(de3)
         d2l.dthetas2 <-  attr(eval.de3, "hessian")
@@ -854,12 +1778,12 @@ frank.control <- function(save.weight = TRUE, ...) {
       D2l.dapar2 <- 1/(apar - 1)^2 + (1+log(apar))/(apar*log(apar))^2 +
                     (y[, 1]+y[, 2])/apar^2 + 2 *
                     (nump / denom - (numerator/denom)^2)
-      d2apar.deta2 <- d2theta.deta2(apar, .lapar)
+      d2apar.deta2 <- d2theta.deta2(apar, .lapar , earg = .eapar )
       wz <- c(w) * (dapar.deta^2 * D2l.dapar2 - Dl.dapar * d2apar.deta2)
       if (TRUE && intercept.only) {
         wz <- cbind(wz)
         sumw <- sum(w)
-        for(iii in 1:ncol(wz))
+        for (iii in 1:ncol(wz))
           wz[,iii] <- sum(wz[, iii]) / sumw
         pooled.weight <- TRUE
         wz <- c(w) * wz   # Put back the weights
@@ -907,9 +1831,8 @@ frank.control <- function(save.weight = TRUE, ...) {
     y <- temp5$y
 
 
-
     predictors.names <-
-      c(namesof("theta", .ltheta, .etheta , short = TRUE))
+      c(namesof("theta", .ltheta , .etheta , short = TRUE))
 
     if (!length(etastart)) {
       theta.init <- if (length( .itheta)) {
@@ -932,14 +1855,15 @@ frank.control <- function(save.weight = TRUE, ...) {
 
     misc$expected <- .expected 
     misc$multipleResponses <- FALSE
-  }), list( .ltheta = ltheta, .etheta = etheta, .expected = expected ))),
+  }), list( .ltheta = ltheta,
+            .etheta = etheta, .expected = expected ))),
 
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     theta <- eta2theta(eta, .ltheta , .etheta )
     if (residuals) stop("loglikelihood residuals not ",
                       "implemented yet") else {
-      sum(c(w) * (-exp(-theta)*y[, 1]/theta - theta*y[, 2]))
+      sum(c(w) * (-exp(-theta) * y[, 1] / theta - theta * y[, 2]))
     }
   }, list( .ltheta = ltheta, .etheta = etheta ))),
   vfamily = c("gammahyp"),
@@ -975,16 +1899,16 @@ frank.control <- function(save.weight = TRUE, ...) {
   lapar <- attr(earg, "function.name")
 
   if (length(iapar) &&
-     (!is.Numeric(iapar, allowable.length = 1) ||
+     (!is.Numeric(iapar, length.arg = 1) ||
       abs(iapar) >= 1))
     stop("argument 'iapar' must be a single number between -1 and 1")
 
-  if (!is.Numeric(tola0, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(tola0, length.arg = 1, positive = TRUE))
       stop("argument 'tola0' must be a single positive number")
 
   if (length(iapar) && abs(iapar) <= tola0)
       stop("argument 'iapar' must not be between -tola0 and tola0")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2.5)
       stop("argument 'imethod' must be 1 or 2")
@@ -1011,7 +1935,7 @@ frank.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <-
-      c(namesof("apar", .lapar, earg = .earg , short = TRUE))
+      c(namesof("apar", .lapar , earg = .earg , short = TRUE))
 
     if (length(dimnames(y)))
       extra$dimnamesy2 = dimnames(y)[[2]]
@@ -1025,12 +1949,12 @@ frank.control <- function(save.weight = TRUE, ...) {
              expm1(-mean1) * expm1(-mean2))
           }
         etastart <-
-          theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
+          theta2eta(rep(ainit, length.out = n), .lapar , earg = .earg )
       }
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar, earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     fv.matrix <- matrix(1, length(alpha), 2)
     if (length(extra$dimnamesy2))
         dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
@@ -1047,7 +1971,7 @@ frank.control <- function(save.weight = TRUE, ...) {
   }), list( .lapar = lapar, .earg = earg ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-      alpha  <- eta2theta(eta, .lapar, earg = .earg )
+      alpha  <- eta2theta(eta, .lapar , earg = .earg )
       alpha[abs(alpha) < .tola0 ] <- .tola0
       if (residuals) stop("loglikelihood residuals not ",
                           "implemented yet") else {
@@ -1058,7 +1982,7 @@ frank.control <- function(save.weight = TRUE, ...) {
   }, list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
   vfamily = c("morgenstern"),
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar, earg = .earg )
+    alpha  <- eta2theta(eta, .lapar , earg = .earg )
     alpha[abs(alpha) < .tola0 ] <- .tola0
     numerator <- 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) +
                  4*exp(-y[, 1] - y[, 2])
@@ -1066,19 +1990,19 @@ frank.control <- function(save.weight = TRUE, ...) {
              4 *alpha*exp(-y[, 1] - y[, 2]))
     dl.dalpha <- numerator / denom
 
-    dalpha.deta <- dtheta.deta(alpha,  .lapar, earg = .earg )
+    dalpha.deta <- dtheta.deta(alpha,  .lapar , earg = .earg )
 
     c(w) * cbind(dl.dalpha * dalpha.deta)
   }), list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
   weight = eval(substitute(expression({
     d2l.dalpha2 <- dl.dalpha^2
-    d2alpha.deta2 <- d2theta.deta2(alpha,  .lapar, earg = .earg )
+    d2alpha.deta2 <- d2theta.deta2(alpha,  .lapar , earg = .earg )
     wz <- c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
     if (TRUE &&
        intercept.only) {
         wz <- cbind(wz)
       sumw <- sum(w)
-      for(iii in 1:ncol(wz))
+      for (iii in 1:ncol(wz))
         wz[,iii] <- sum(wz[, iii]) / sumw
       pooled.weight <- TRUE
       wz <- c(w) * wz   # Put back the weights
@@ -1095,7 +2019,7 @@ frank.control <- function(save.weight = TRUE, ...) {
 rfgm <- function(n, alpha) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   if (!is.Numeric(alpha))
@@ -1129,8 +2053,8 @@ dfgm <- function(x1, x2, alpha, log = FALSE) {
     stop("bad input for argument 'log'")
 
   L <- max(length(x1), length(x2), length(alpha))
-  if (length(x1) != L)  x1 <- rep(x1, length.out = L)
-  if (length(x2) != L)  x2 <- rep(x2, length.out = L)
+  if (length(x1)    != L)  x1    <- rep(x1,    length.out = L)
+  if (length(x2)    != L)  x2    <- rep(x2,    length.out = L)
   if (length(alpha) != L)  alpha <- rep(alpha, length.out = L)
   ans <- 0 * x1
   xnok <- (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
@@ -1140,7 +2064,7 @@ dfgm <- function(x1, x2, alpha, log = FALSE) {
   } else {
     ans[!xnok] <-   1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
     ans[xnok] <- 0
-    if (any(ans<0))
+    if (any(ans < 0))
       stop("negative values in the density (alpha out of range)")
   }
   ans
@@ -1148,17 +2072,18 @@ dfgm <- function(x1, x2, alpha, log = FALSE) {
 
 
 pfgm <- function(q1, q2, alpha) {
-  if (!is.Numeric(q1)) stop("bad input for 'q1'")
-  if (!is.Numeric(q2)) stop("bad input for 'q2'")
-  if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
+  if (!is.Numeric(q1))     stop("bad input for 'q1'")
+  if (!is.Numeric(q2))     stop("bad input for 'q2'")
+  if (!is.Numeric(alpha))  stop("bad input for 'alpha'")
   if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
 
   L <- max(length(q1), length(q2), length(alpha))
-  if (length(q1) != L)  q1 <- rep(q1, length.out = L)
-  if (length(q2) != L)  q2 <- rep(q2, length.out = L)
+  if (length(q1)    != L)     q1 <- rep(q1,    length.out = L)
+  if (length(q2)    != L)     q2 <- rep(q2,    length.out = L)
   if (length(alpha) != L)  alpha <- rep(alpha, length.out = L)
 
-  x <- q1; y <- q2
+  x <- q1
+  y <- q2
   index <- (x >= 1 & y <  1) |
            (y >= 1 & x <  1) |
            (x <= 0 | y <= 0) |
@@ -1168,8 +2093,8 @@ pfgm <- function(q1, q2, alpha) {
     ans[!index] <-    q1[!index] *   q2[!index] * (1 + alpha[!index] *
                    (1-q1[!index])*(1-q2[!index]))
   }
-  ans[x >= 1 & y<1] <- y[x >= 1 & y<1] # P(Y2 < q2) = q2
-  ans[y >= 1 & x<1] <- x[y >= 1 & x<1] # P(Y1 < q1) = q1
+  ans[x >= 1 & y<1] <- y[x >= 1 & y<1]  # P(Y2 < q2) = q2
+  ans[y >= 1 & x<1] <- x[y >= 1 & x<1]  # P(Y1 < q1) = q1
   ans[x <= 0 | y <= 0] <- 0
   ans[x >= 1 & y >= 1] <- 1
   ans
@@ -1177,29 +2102,22 @@ pfgm <- function(q1, q2, alpha) {
 
 
 
-fgm.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
-}
 
 
 
  fgm <- function(lapar = "rhobit", iapar = NULL,
-                 imethod = 1, nsimEIM = 200) {
+                 imethod = 1) {
 
   lapar <- as.list(substitute(lapar))
   earg  <- link2list(lapar)
   lapar <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2.5)
-    stop("argument 'imethod' must be 1 or 2")
-  if (!length(nsimEIM) ||
-     (!is.Numeric(nsimEIM, allowable.length = 1,
-                  integer.valued = TRUE) ||
-      nsimEIM <= 50))
-    stop("'nsimEIM' should be an integer greater than 50")
+     imethod > 3.5)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
   if (length(iapar) &&
      (abs(iapar) >= 1))
     stop("'iapar' should be less than 1 in absolute value")
@@ -1227,33 +2145,41 @@ fgm.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <-
-      namesof("apar", .lapar, earg = .earg , short = TRUE)
+      namesof("apar", .lapar , earg = .earg , short = TRUE)
 
     if (length(dimnames(y)))
-        extra$dimnamesy2 <- dimnames(y)[[2]]
+      extra$dimnamesy2 <- dimnames(y)[[2]]
 
     if (!length(etastart)) {
       ainit  <- if (length( .iapar ))  .iapar else {
-      mean1 <- if ( .imethod == 1) weighted.mean(y[, 1], w) else
-               median(y[, 1])
-      mean2 <- if ( .imethod == 1) weighted.mean(y[, 2], w) else
-               median(y[, 2])
-      Finit <- weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
-      (Finit / (mean1 * mean2) - 1) / ((1-mean1) * (1-mean2))
+
+
+      if ( .imethod == 1) {
+        3 * cor(y[, 1], y[, 2], method = "spearman")
+      } else if ( .imethod == 2) {
+        9 * kendall.tau(y[, 1], y[, 2]) / 2
+      } else {
+        mean1 <- if ( .imethod == 1) weighted.mean(y[, 1], w) else
+                 median(y[, 1])
+        mean2 <- if ( .imethod == 1) weighted.mean(y[, 2], w) else
+                 median(y[, 2])
+        Finit <- weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
+        (Finit / (mean1 * mean2) - 1) / ((1 - mean1) * (1 - mean2))
+      }
     }
 
     ainit <- min(0.95, max(ainit, -0.95))
 
     etastart <-
-      theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
+      theta2eta(rep(ainit, length.out = n), .lapar , earg = .earg )
     }
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar, earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     fv.matrix <- matrix(0.5, length(alpha), 2)
     if (length(extra$dimnamesy2))
-        dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
+      dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
     fv.matrix
   }, list( .lapar = lapar, .earg = earg ))),
   last = eval(substitute(expression({
@@ -1262,23 +2188,22 @@ fgm.control <- function(save.weight = TRUE, ...) {
     misc$earg <- list("apar" = .earg  )
 
     misc$expected <- FALSE
-    misc$nsimEIM <- .nsimEIM
     misc$multipleResponses <- FALSE
-  }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
+  }), list( .lapar = lapar, .earg = earg))),
   loglikelihood = eval(substitute(
-          function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar, earg = .earg )
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-        sum(c(w) * dfgm(x1 = y[, 1],
-                        x2 = y[, 2], alpha = alpha, log = TRUE))
+      sum(c(w) * dfgm(x1 = y[, 1],
+                      x2 = y[, 2], alpha = alpha, log = TRUE))
     }
   }, list( .lapar = lapar, .earg = earg ))),
   vfamily = c("fgm"),
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar, earg = .earg )
+    alpha  <- eta2theta(eta, .lapar , earg = .earg )
 
-    dalpha.deta <- dtheta.deta(alpha, .lapar, earg = .earg )
+    dalpha.deta <- dtheta.deta(alpha, .lapar , earg = .earg )
 
     numerator <- (1 - 2 * y[, 1])  * (1 - 2 * y[, 2])
     denom <- 1 + alpha * numerator
@@ -1292,30 +2217,19 @@ fgm.control <- function(save.weight = TRUE, ...) {
     }
     dl.dalpha <- numerator / denom
     c(w) * cbind(dl.dalpha * dalpha.deta)
-  }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
-  weight = eval(substitute(expression({
-    run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
-      ysim <- rfgm(n, alpha=alpha)
-      numerator <- (1 - 2 * ysim[, 1])  * (1 - 2 * ysim[, 2])
-      denom <- 1 + alpha * numerator
-      dl.dalpha <- numerator / denom
-      rm(ysim)
-      temp3 <- dl.dalpha
-      run.var <- ((ii - 1) * run.var + temp3^2) / ii
-    }
-    wz <- if (intercept.only)
-        matrix(colMeans(cbind(run.var)),
-               n, dimm(M), byrow = TRUE) else cbind(run.var)
+  }), list( .lapar = lapar, .earg = earg))),
 
-    wz <- wz * dalpha.deta^2
+  weight = eval(substitute(expression({
+  wz <- lerch(alpha^2, 2, 1.5) / 4  # Checked and correct
+  wz <- wz * dalpha.deta^2
     c(w) * wz
-  }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))))
+  }), list( .lapar = lapar, .earg = earg))))
 }
 
 
 
- gumbelIbiv <- function(lapar = "identity", iapar = NULL, imethod = 1) {
+
+ bigumbelI <- function(lapar = "identity", iapar = NULL, imethod = 1) {
 
   lapar <- as.list(substitute(lapar))
   earg  <- link2list(lapar)
@@ -1323,9 +2237,9 @@ fgm.control <- function(save.weight = TRUE, ...) {
 
 
   if (length(iapar) &&
-      !is.Numeric(iapar, allowable.length = 1))
+      !is.Numeric(iapar, length.arg = 1))
     stop("'iapar' must be a single number")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
@@ -1362,12 +2276,12 @@ fgm.control <- function(save.weight = TRUE, ...) {
         (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
       }
       etastart <-
-        theta2eta(rep(ainit,  length.out = n), .lapar, earg = .earg )
+        theta2eta(rep(ainit,  length.out = n), .lapar , earg = .earg )
       }
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar, earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     cbind(rep(1, len = length(alpha)),
           rep(1, len = length(alpha)))
   }, list( .lapar = lapar, .earg = earg ))),
@@ -1382,44 +2296,44 @@ fgm.control <- function(save.weight = TRUE, ...) {
   }), list( .lapar = lapar, .earg = earg ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    alpha  <- eta2theta(eta, .lapar, earg = .earg )
+    alpha  <- eta2theta(eta, .lapar , earg = .earg )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
       denom <- (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
-            mytolerance <- .Machine$double.xmin
-            bad <- (denom <= mytolerance)   # Range violation
-            if (any(bad)) {
-                cat("There are some range violations in @deriv\n")
-                flush.console()
-            }
-            sum(bad) * (-1.0e10) + 
-            sum(w[!bad] * (-y[!bad, 1] - y[!bad, 2] +
-                alpha[!bad]*y[!bad, 1]*y[!bad, 2] + log(denom[!bad])))
-        }
+      mytolerance <- .Machine$double.xmin
+      bad <- (denom <= mytolerance)  # Range violation
+      if (any(bad)) {
+        cat("There are some range violations in @deriv\n")
+        flush.console()
+      }
+      sum(bad) * (-1.0e10) + 
+      sum(w[!bad] * (-y[!bad, 1] - y[!bad, 2] +
+          alpha[!bad] * y[!bad, 1] * y[!bad, 2] + log(denom[!bad])))
+    }
   }, list( .lapar = lapar, .earg = earg ))),
-  vfamily = c("gumbelIbiv"),
+  vfamily = c("bigumbelI"),
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar, earg = .earg )
-    numerator <- (alpha*y[, 1] - 1)*y[, 2] +
-                 (alpha*y[, 2] - 1)*y[, 1] + 1
-    denom <- (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
+    alpha  <- eta2theta(eta, .lapar , earg = .earg )
+    numerator <- (alpha * y[, 1] - 1) * y[, 2] +
+                 (alpha * y[, 2] - 1) * y[, 1] + 1
+    denom <- (alpha * y[, 1] - 1) * (alpha * y[, 2] - 1) + alpha
     denom <- abs(denom)
 
-    dl.dalpha <- numerator / denom + y[, 1]*y[, 2]
+    dl.dalpha <- numerator / denom + y[, 1] * y[, 2]
 
-    dalpha.deta <- dtheta.deta(alpha,  .lapar, earg = .earg )
+    dalpha.deta <- dtheta.deta(alpha,  .lapar , earg = .earg )
 
     c(w) * cbind(dl.dalpha * dalpha.deta)
   }), list( .lapar = lapar, .earg = earg ))),
   weight = eval(substitute(expression({
     d2l.dalpha2 <- (numerator/denom)^2 - 2*y[, 1]*y[, 2] / denom
-    d2alpha.deta2 <- d2theta.deta2(alpha, .lapar, earg = .earg )
+    d2alpha.deta2 <- d2theta.deta2(alpha, .lapar , earg = .earg )
     wz <- c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
     if (TRUE &&
            intercept.only) {
             wz <- cbind(wz)
       sumw <- sum(w)
-      for(iii in 1:ncol(wz))
+      for (iii in 1:ncol(wz))
         wz[, iii] <- sum(wz[, iii]) / sumw
       pooled.weight <- TRUE
       wz <- c(w) * wz   # Put back the weights
@@ -1458,10 +2372,10 @@ pplack <- function(q1, q2, oratio) {
     ans[!index] <- 0.5 * temp2 / (oratio[!index] - 1)
   }
 
-  ind2 <- (abs(oratio - 1) < 1.0e-6) # .Machine$double.eps
+  ind2 <- (abs(oratio - 1) < 1.0e-6)  # .Machine$double.eps
   ans[ind2] <- x[ind2] * y[ind2]
-  ans[x >= 1 & y<1] <- y[x >= 1 & y<1] # P(Y2 < q2) = q2
-  ans[y >= 1 & x<1] <- x[y >= 1 & x<1] # P(Y1 < q1) = q1
+  ans[x >= 1 & y<1] <- y[x >= 1 & y<1]  # P(Y2 < q2) = q2
+  ans[y >= 1 & x<1] <- x[y >= 1 & x<1]  # P(Y1 < q1) = q1
   ans[x <= 0 | y <= 0] <- 0
   ans[x >= 1 & y >= 1] <- 1
   ans
@@ -1472,7 +2386,7 @@ pplack <- function(q1, q2, oratio) {
 rplack <- function(n, oratio) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
 
@@ -1524,7 +2438,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
   if (length(ioratio) && (!is.Numeric(ioratio, positive = TRUE)))
     stop("'ioratio' must be positive")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1628,9 +2542,9 @@ plackett.control <- function(save.weight = TRUE, ...) {
           (y1sim+y2sim-2*y1sim*y2sim)) - 1.5 *
           log((1 + (y1sim+y2sim)*(oratio - 1))^2 -
           4 * oratio * (oratio - 1)*y1sim*y2sim)),
-                    name = "oratio", hessian= FALSE)
+                    name = "oratio", hessian = FALSE)
     run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rplack(n, oratio=oratio)
       y1sim <- ysim[, 1]
       y2sim <- ysim[, 1]
@@ -1695,8 +2609,8 @@ pamh <- function(q1, q2, alpha) {
       ans[!index] <- (q1[!index]*q2[!index]) / (1 -
                      alpha[!index]*(1-q1[!index])*(1-q2[!index]))
   }
-  ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1] # P(Y2 < q2) = q2
-  ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1] # P(Y1 < q1) = q1
+  ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1]  # P(Y2 < q2) = q2
+  ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1]  # P(Y1 < q1) = q1
   ans[x <= 0 | y <= 0] <- 0
   ans[x >= 1 & y >= 1] <- 1
   ans[abs(alpha) > 1] <- NA
@@ -1707,7 +2621,7 @@ pamh <- function(q1, q2, alpha) {
 ramh <- function(n, alpha) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   if (any(abs(alpha) > 1))
@@ -1738,13 +2652,13 @@ amh.control <- function(save.weight = TRUE, ...) {
 
   if (length(ialpha) && (abs(ialpha) > 1))
     stop("'ialpha' should be less than or equal to 1 in absolute value")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 2)
     stop("imethod must be 1 or 2")
 
   if (length(nsimEIM) &&
-    (!is.Numeric(nsimEIM, allowable.length = 1,
+    (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
      nsimEIM <= 50))
   stop("'nsimEIM' should be an integer greater than 50")
@@ -1826,9 +2740,10 @@ amh.control <- function(save.weight = TRUE, ...) {
 
     y1 <- y[, 1]
     y2 <- y[, 2]
-    de3 <- deriv3(~ (log(1-alpha+(2*alpha*y1*y2/(1-alpha*(1-y1)*(1-y2))))-
-                    2*log(1-alpha*(1-y1)*(1-y2))) ,
-                    name = "alpha", hessian= FALSE)
+    de3 <- deriv3(~ (log(1 - alpha+
+                        (2 * alpha*y1*y2/(1-alpha*(1-y1)*(1-y2)))) -
+                    2 * log(1 - alpha*(1-y1)*(1-y2))) ,
+                    name = "alpha", hessian = FALSE)
     eval.de3 <- eval(de3)
 
     dl.dalpha <-  attr(eval.de3, "gradient")
@@ -1836,13 +2751,14 @@ amh.control <- function(save.weight = TRUE, ...) {
     c(w) * dl.dalpha * dalpha.deta
   }), list( .lalpha = lalpha, .ealpha = ealpha ))),
   weight = eval(substitute(expression({
-    sd3 <- deriv3(~ (log(1-alpha+
-                    (2*alpha*y1sim*y2sim/(1-alpha*(1-y1sim)*(1-y2sim)))) -
-                     2*log(1-alpha*(1-y1sim)*(1-y2sim))) ,
-                     name = "alpha", hessian= FALSE)
+    sd3 <- deriv3(~ (log(1 - alpha +
+                        (2 * alpha * y1sim * y2sim / (1 - alpha *
+                         (1 - y1sim) * (1-y2sim)))) -
+                     2 * log(1-alpha*(1-y1sim)*(1-y2sim))),
+                     name = "alpha", hessian = FALSE)
     run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
-      ysim <- ramh(n, alpha=alpha)
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- ramh(n, alpha = alpha)
       y1sim <- ysim[, 1]
       y2sim <- ysim[, 1]
       eval.sd3 <- eval(sd3)
@@ -1877,25 +2793,47 @@ amh.control <- function(save.weight = TRUE, ...) {
 
 
 
-dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
-                    rho = 0, log = FALSE) {
+dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0,
+                    var1 = 1, var2 = 1, cov12 = 0,
+                    log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
+  sd1 <- sqrt(var1)
+  sd2 <- sqrt(var2)
+  rho <- cov12 / (sd1 * sd2)
+
 
   temp5 <- 1 - rho^2
   zedd1 <- (x1 - mean1) / sd1
   zedd2 <- (x2 - mean2) / sd2
-  logpdf <- -log(2 * pi) - log(sd1 ) - log(sd2) -
+  logpdf <- -log(2 * pi) - log(sd1) - log(sd2) -
               0.5 * log1p(-rho^2) +
-            -(0.5 / temp5)  * (zedd1^2 - 2 * rho * zedd1 * zedd2 + zedd2^2)
+            -(0.5 / temp5)  * (zedd1^2 + (-2 * rho * zedd1 + zedd2) * zedd2)
   if (log.arg) logpdf else exp(logpdf)
 }
 
 
 
+rbinorm <- function(n, mean1 = 0, mean2 = 0,
+                    var1 = 1, var2 = 1, cov12 = 0) {
+
+  Y1 <- rnorm(n)
+  Y2 <- rnorm(n)
+  X1 <- sqrt(var1) * Y1 + mean1
+  delta <- sqrt(var2 - (cov12^2) / var1)
+  X2 <- cov12 * Y1 / sqrt(var1) + delta * Y2 + mean2
+
+  ans <- cbind(X1, X2)
+  ans[is.na(delta), ] <- NA
+
+  ans
+}
+
+
+
 
  binormal <- function(lmean1 = "identity",
                       lmean2 = "identity",
@@ -1933,10 +2871,10 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
 
   trivial1 <- is.logical(eq.mean) && length(eq.mean) == 1 && !eq.mean
   trivial2 <- is.logical(eq.sd  ) && length(eq.sd  ) == 1 && !eq.sd
-  if(!trivial1 && !trivial2)
+  if (!trivial1 && !trivial2)
     stop("only one of 'eq.mean' and 'eq.sd' can be assigned a value")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1954,10 +2892,12 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
     temp8.m[2, 1] <- 1
     temp8.s <- diag(5)[, -4]
     temp8.s[4, 3] <- 1
-    constraints <- cm.vgam(temp8.m, x, .eq.mean,
-                           constraints, apply.int = TRUE)
-    constraints <- cm.vgam(temp8.s, x, .eq.sd,
-                           constraints, apply.int = TRUE)
+    constraints <- cm.vgam(temp8.m, x = x,
+                           bool = .eq.mean ,
+                           constraints = constraints, apply.int = TRUE)
+    constraints <- cm.vgam(temp8.s, x = x,
+                           bool = .eq.sd ,
+                           constraints = constraints, apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .zero = zero,
             .eq.sd   = eq.sd,
@@ -1978,11 +2918,11 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
 
 
     predictors.names <- c(
-      namesof("mean1", .lmean1, earg = .emean1, short = TRUE),
-      namesof("mean2", .lmean2, earg = .emean2, short = TRUE),
-      namesof("sd1",   .lsd1,   earg = .esd1,   short = TRUE),
-      namesof("sd2",   .lsd2,   earg = .esd2,   short = TRUE),
-      namesof("rho",   .lrho,   earg = .erho,   short = TRUE))
+      namesof("mean1", .lmean1 , earg = .emean1 , short = TRUE),
+      namesof("mean2", .lmean2 , earg = .emean2 , short = TRUE),
+      namesof("sd1",   .lsd1 ,   earg = .esd1 ,   short = TRUE),
+      namesof("sd2",   .lsd2 ,   earg = .esd2 ,   short = TRUE),
+      namesof("rho",   .lrho ,   earg = .erho ,   short = TRUE))
 
     if (length(dimnames(y)))
       extra$dimnamesy2 <- dimnames(y)[[2]]
@@ -2004,11 +2944,11 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
         imean2 <- abs(imean2) + 0.01
       }
       etastart <-
-        cbind(theta2eta(imean1, .lmean1, earg = .emean1),
-              theta2eta(imean2, .lmean2, earg = .emean2),
-              theta2eta(isd1,   .lsd1,   earg = .esd1),
-              theta2eta(isd2,   .lsd2,   earg = .esd2),
-              theta2eta(irho,   .lrho,   earg = .erho))
+        cbind(theta2eta(imean1, .lmean1 , earg = .emean1 ),
+              theta2eta(imean2, .lmean2 , earg = .emean2 ),
+              theta2eta(isd1,   .lsd1 ,   earg = .esd1 ),
+              theta2eta(isd2,   .lsd2 ,   earg = .esd2 ),
+              theta2eta(irho,   .lrho ,   earg = .erho ))
     }
   }), list( .lmean1 = lmean1, .lmean2 = lmean2,
             .emean1 = emean1, .emean2 = emean2,
@@ -2035,13 +2975,13 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
                       "mean2" = .lmean2,
                       "sd1"   = .lsd1,
                       "sd2"   = .lsd2,
-                      "rho"   = .lrho)
+                      "rho"   = .lrho )
 
     misc$earg <- list("mean1" = .emean1,
                       "mean2" = .emean2, 
                       "sd1"   = .esd1,
                       "sd2"   = .esd2,
-                      "rho"   = .erho)
+                      "rho"   = .erho )
 
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
@@ -2061,7 +3001,8 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
                           "implemented yet") else {
       sum(c(w) * dbinorm(x1 = y[, 1], x2 = y[, 2],
                          mean1 = mean1, mean2 = mean2,
-                         sd1 = sd1, sd2 = sd2, rho = Rho, log = TRUE))
+                         var1 = sd1^2, var2 = sd2^2, cov12 = Rho *sd1*sd2,
+                         log = TRUE))
     }
   } , list( .lmean1 = lmean1, .lmean2 = lmean2,
             .emean1 = emean1, .emean2 = emean2,
@@ -2123,15 +3064,14 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
     wz[, iam(3, 3, M)] <- (1 + 1 / temp5) / sd1^2
     wz[, iam(4, 4, M)] <- (1 + 1 / temp5) / sd2^2
     wz[, iam(3, 4, M)] <- -(Rho^2) / (temp5 * sd1 * sd2)
-    wz[, iam(5, 5, M)] <- 2 * (1 + 2 * Rho^2) / temp5^2 -
-                          (1 + Rho^2) / temp5^2
+    wz[, iam(5, 5, M)] <- (1 + Rho^2) / temp5^2
     wz[, iam(3, 5, M)] <- -Rho / (sd1 * temp5)
     wz[, iam(4, 5, M)] <- -Rho / (sd2 * temp5)
     for (ilocal in 1:M)
       for (jlocal in ilocal:M)
         wz[, iam(ilocal, jlocal, M)] <- wz[, iam(ilocal, jlocal, M)] *
-                                       dthetas.detas[, ilocal] *
-                                       dthetas.detas[, jlocal]
+                                        dthetas.detas[, ilocal] *
+                                        dthetas.detas[, jlocal]
       c(w) * wz
   }), list( .lmean1 = lmean1, .lmean2 = lmean2,
             .emean1 = emean1, .emean2 = emean2,
@@ -2146,6 +3086,8 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
 
 
 
+
+
 gumbelI <-
   function(la = "identity", earg = list(), ia = NULL, imethod = 1) {
 
@@ -2155,20 +3097,20 @@ gumbelI <-
 
 
 
-  if (length(ia) && !is.Numeric(ia, allowable.length = 1))
+  if (length(ia) && !is.Numeric(ia, length.arg = 1))
       stop("'ia' must be a single number")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2.5)
       stop("argument 'imethod' must be 1 or 2")
 
 
   new("vglmff",
-  blurb=c("Gumbel's Type I Bivariate Distribution\n",
+  blurb = c("Gumbel's Type I Bivariate Distribution\n",
           "Links:    ",
           namesof("a", la, earg =  earg )),
-  initialize=eval(substitute(expression({
+  initialize = eval(substitute(expression({
     if (!is.matrix(y) || ncol(y) != 2)
         stop("the response must be a 2 column matrix") 
 
@@ -2216,8 +3158,8 @@ gumbelI <-
       sum(w * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom)))
       }
   }, list( .la = la, .earg = earg ))),
-  vfamily=c("gumbelI"),
-  deriv=eval(substitute(expression({
+  vfamily = c("gumbelI"),
+  deriv = eval(substitute(expression({
       alpha  <- eta2theta(eta, .la, earg =  .earg )
       numerator <- (alpha*y[,1] - 1)*y[,2] + (alpha*y[,2] - 1)*y[,1] + 1
       denom <- (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
@@ -2226,7 +3168,7 @@ gumbelI <-
       dalpha.deta <- dtheta.deta(alpha,  .la, earg =  .earg )
       c(w) * cbind(dl.dalpha * dalpha.deta)
   }), list( .la = la, .earg = earg ))),
-  weight=eval(substitute(expression({
+  weight = eval(substitute(expression({
     d2l.dalpha2 <- (numerator/denom)^2 - 2*y[,1]*y[,2] / denom
     d2alpha.deta2 <- d2theta.deta2(alpha, .la, earg =  .earg )
     wz <- w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
@@ -2234,7 +3176,7 @@ gumbelI <-
         intercept.only) {
         wz <- cbind(wz)
         sumw <- sum(w)
-        for(iii in 1:ncol(wz))
+        for (iii in 1:ncol(wz))
             wz[,iii] <- sum(wz[,iii]) / sumw
         pooled.weight <- TRUE
         wz <- c(w) * wz   # Put back the weights
@@ -2247,7 +3189,81 @@ gumbelI <-
 
 
 
+kendall.tau <- function(x, y, exact = FALSE, max.n = 3000) {
+
+  if ((N <- length(x)) != length(y))
+    stop("arguments 'x' and 'y' do not have equal lengths")
 
+  NN <- if (!exact && N > max.n) {
+    cindex <- sample.int(n = N, size = max.n, replace = FALSE)
+    x <- x[cindex] 
+    y <- y[cindex] 
+    max.n
+  } else {
+    N
+  }
+
+
+  ans3 <-
+    c( .C("VGAM_C_kend_tau",
+         as.double(x), as.double(y),
+         as.integer(NN), ans = double(3),
+         NAOK = TRUE, PACKAGE = "VGAM")$ans)
+
+  con <- ans3[1] + ans3[2] / 2  # Ties put half and half
+  dis <- ans3[3] + ans3[2] / 2
+  (con - dis) / (con + dis)
+}
+
+
+
+
+if (FALSE)
+kendall.tau <- function(x, y, exact = TRUE, max.n = 1000) {
+
+  if ((N <- length(x)) != length(y))
+    stop("arguments 'x' and 'y' do not have equal lengths")
+  index <- iam(NA, NA, M = N, both = TRUE)
+
+  index$row.index <- index$row.index[-(1:N)] 
+  index$col.index <- index$col.index[-(1:N)] 
+
+  NN <- if (!exact && N > max.n) {
+    cindex <- sample.int(n = N, size = max.n, replace = FALSE)
+    index$row.index <- index$row.index[cindex] 
+    index$col.index <- index$col.index[cindex] 
+    max.n
+  } else{
+    choose(N, 2)
+  }
+
+  con <- sum((x[index$row.index] - x[index$col.index]) *
+             (y[index$row.index] - y[index$col.index]) > 0)
+  dis <- NN - con
+  (con - dis) / (con + dis)
+}
+
+
+
+
+dbistudenttcop <- function(x1, x2, df, rho = 0, log = FALSE) {
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  u1 <- qt(x1, df = df)
+  u2 <- qt(x2, df = df)
+
+  logdensity <-
+    -(df/2 + 1) * log1p(
+    (u1^2 + u2^2 - 2 * rho * u1 * u2) / (df * (1 - rho^2))) -
+    log(2*pi) - 0.5 * log1p(-rho^2) -
+  dt(u1, df = df, log = TRUE) -
+  dt(u2, df = df, log = TRUE)
+
+  if (log.arg) logdensity else exp(logdensity)
+}
 
 
 
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 2608d7c..797e3bc 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -128,7 +128,7 @@ Deviance.categorical.data.vgam <-
     M <- if (is.matrix(eta)) ncol(eta) else 1
     if (M > 1)
       return(NULL)
-    devi <- devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
+    devi <- devi %*% rep(1, ncol(devi))  # deviance = \sum_i devi[i]
     return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
   } else {
     sum(w * devi)
@@ -210,7 +210,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                    "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
                    "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
@@ -379,7 +381,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                    "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
@@ -448,7 +452,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     misc$whitespace <- .whitespace
 
 
-    extra <- list() # kill what was used 
+    extra <- list()  # kill what was used 
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -559,7 +563,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     warning("bad value of maxit; using 200 instead")
     maxit = 200
   }
-  list(maxit = maxit, trace = as.logical(trace)[1],
+  list(maxit = maxit,
+       trace = as.logical(trace)[1],
        panic = as.logical(panic)[1])
 }
 
@@ -571,8 +576,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
  multinomial <- function(zero = NULL, parallel = FALSE,
                          nointercept = NULL, refLevel = "last",
-                         whitespace = FALSE)
-{
+                         whitespace = FALSE) {
 
   if (length(refLevel) != 1)
     stop("the length of 'refLevel' must be one")
@@ -587,11 +591,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       warning("'refLevel' is from an ordered factor")
     refLevel <- as.character(refLevel) == levels(refLevel)
     refLevel <- (1:length(refLevel))[refLevel]
-    if (!is.Numeric(refLevel, allowable.length = 1,
+    if (!is.Numeric(refLevel, length.arg = 1,
                     integer.valued = TRUE, positive = TRUE))
       stop("could not coerce 'refLevel' into a single positive integer")
   } else
-  if (!is.Numeric(refLevel, allowable.length = 1,
+  if (!is.Numeric(refLevel, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("'refLevel' must be a single positive integer")
 
@@ -633,7 +637,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel, 
+                           constraints = constraints,
                            apply.int = FALSE)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
     constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
@@ -646,6 +652,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   infos = eval(substitute(function(...) {
     list(parallel = .parallel ,
          refLevel = .refLevel ,
+         Musual = -1,
          multipleResponses = FALSE,
          zero = .zero )
   }, list( .zero = zero,
@@ -780,12 +787,15 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
  cumulative <- function(link = "logit",
-                        parallel = FALSE, reverse = FALSE, 
+                        parallel = FALSE,  # Does not apply to the intercept
+                        reverse = FALSE, 
                         mv = FALSE,
-                        apply.parint = FALSE,
                         whitespace = FALSE) {
 
 
+  apply.parint <- FALSE
+
+
   link <- as.list(substitute(link))
   earg  <- link2list(link)
   link <- attr(earg, "function.name")
@@ -824,12 +834,16 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           Llevels <- extra$Llevels
           NOS <- extra$NOS
           Hk.matrix <- kronecker(diag(NOS), matrix(1,Llevels-1,1))
-          constraints <- cm.vgam(Hk.matrix, x, .parallel, constraints,
-                                 apply.int = .apply.parint)
+          constraints <- cm.vgam(Hk.matrix, x = x,
+                                 bool = .parallel ,
+                                 constraints = constraints,
+                                 apply.int = .apply.parint )
       }
     } else {
-      constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
-                             apply.int = .apply.parint)
+      constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                             bool = .parallel ,
+                             constraints = constraints,
+                             apply.int = .apply.parint )
     }
   }), list( .parallel = parallel, .mv = mv,
             .apply.parint = apply.parint ))),
@@ -877,7 +891,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           stop("the 'weights' argument must be a vector of all ones")
       Llevels <- max(y)
       delete.zero.colns <- FALSE 
-      orig.y <- cbind(y) # Convert y into a matrix if necessary
+      orig.y <- cbind(y)  # Convert y into a matrix if necessary
       NOS <- ncol(cbind(orig.y))
       use.y <- use.mustart <- NULL
       for (iii in 1:NOS) {
@@ -1087,7 +1101,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     if ( .mv ) {
       NOS <- extra$NOS
       Llevels <- extra$Llevels
-      wz <- matrix(0, n, NOS*(Llevels-1)) # Diagonal elts only for a start
+      wz <- matrix(0, n, NOS*(Llevels-1))  # Diagonal elts only for a start
       for (iii in 1:NOS) {
         cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
         aindex <- (iii-1)*(Llevels)   + 1:(Llevels-1)
@@ -1172,7 +1186,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
             "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
             "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .parallel = parallel, .zero = zero ))),
 
@@ -1268,7 +1284,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     },
   vfamily = c("acat", "vcategorical"),
   deriv = eval(substitute(expression({
-    zeta <- eta2theta(eta, .link , earg = .earg ) # May be zetar
+    zeta <- eta2theta(eta, .link , earg = .earg )  # May be zetar
 
     dzeta.deta <- dtheta.deta(zeta, .link , earg = .earg )
 
@@ -1339,15 +1355,14 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
  brat <- function(refgp = "last",
                   refvalue = 1,
-                  init.alpha = 1)
-{
+                  init.alpha = 1) {
   if (!is.Numeric(init.alpha, positive = TRUE))
     stop("'init.alpha' must contain positive values only")
-  if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(refvalue, length.arg = 1, positive = TRUE))
     stop("'refvalue' must be a single positive value")
 
   if (!is.character(refgp) &&
-      !is.Numeric(refgp, allowable.length = 1,
+      !is.Numeric(refgp, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("'refgp' must be a single positive integer")
 
@@ -1369,7 +1384,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
                          n, M, byrow = TRUE)
     etastart <- matrix(theta2eta(init.alpha, "loge",
                                 earg = list(theta = NULL)),
-                      n, M, byrow = TRUE)
+                       n, M, byrow = TRUE)
     refgp <- .refgp
     if (!intercept.only)
       warning("this function only works with intercept-only models")
@@ -1378,11 +1393,12 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
     predictors.names <-
       namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE)
+
   }), list( .refgp = refgp, .init.alpha=init.alpha ))),
 
   linkinv = eval(substitute( function(eta, extra = NULL) {
     probs <- NULL
-    eta <- as.matrix(eta) # in case M = 1
+    eta <- as.matrix(eta)  # in case M = 1
     for (ii in 1:nrow(eta)) {
         alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
                                        earg = list(theta = NULL)),
@@ -1479,17 +1495,16 @@ acat.deriv <- function(zeta, reverse, M, n) {
  bratt <- function(refgp = "last",
                    refvalue = 1,
                    init.alpha = 1,
-                   i0 = 0.01)
-{
-  if (!is.Numeric(i0, allowable.length = 1, positive = TRUE))
+                   i0 = 0.01) {
+  if (!is.Numeric(i0, length.arg = 1, positive = TRUE))
     stop("'i0' must be a single positive value")
   if (!is.Numeric(init.alpha, positive = TRUE))
     stop("'init.alpha' must contain positive values only")
-  if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(refvalue, length.arg = 1, positive = TRUE))
     stop("'refvalue' must be a single positive value")
 
   if (!is.character(refgp) && 
-     !is.Numeric(refgp, allowable.length = 1,
+     !is.Numeric(refgp, length.arg = 1,
                  integer.valued = TRUE, positive = TRUE))
     stop("'refgp' must be a single positive integer")
 
@@ -1501,7 +1516,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
   initialize = eval(substitute(expression({
     try.index <- 1:400
     M <- (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
-    if (!is.Numeric(M, allowable.length = 1, integer.valued = TRUE))
+    if (!is.Numeric(M, length.arg = 1, integer.valued = TRUE))
       stop("cannot determine 'M'")
     NCo <- M # Number of contestants
 
@@ -1529,8 +1544,8 @@ acat.deriv <- function(zeta, reverse, M, n) {
     if (!intercept.only)
       warning("this function only works with intercept-only models")
     extra$ties <- ties # Flat (1-row) matrix
-    extra$ybrat.indices <- .brat.indices(NCo=NCo, are.ties = FALSE)
-    extra$tbrat.indices <- .brat.indices(NCo=NCo, are.ties = TRUE) # unused
+    extra$ybrat.indices <- .brat.indices(NCo = NCo, are.ties = FALSE)
+    extra$tbrat.indices <- .brat.indices(NCo = NCo, are.ties = TRUE)  # unused
     extra$dnties <- dimnames(ties)
     uindex <- if (refgp == "last") 1:(NCo-1) else (1:(NCo))[-refgp ]
 
@@ -1551,8 +1566,8 @@ acat.deriv <- function(zeta, reverse, M, n) {
       alpha0 <- loge(eta[ii, M], inverse = TRUE)
       alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
       alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
-      probs <- rbind(probs, alpha1 / (alpha1+alpha2+alpha0)) #
-      qprobs <- rbind(qprobs, alpha0 / (alpha1+alpha2+alpha0)) #
+       probs <- rbind( probs, alpha1 / (alpha1 + alpha2 + alpha0))  #
+      qprobs <- rbind(qprobs, alpha0 / (alpha1 + alpha2 + alpha0))  #
     }
     if (length(extra$dnties))
       dimnames(qprobs) <- extra$dnties
@@ -1591,18 +1606,18 @@ acat.deriv <- function(zeta, reverse, M, n) {
     eta <- as.matrix(eta)
     for (ii in 1:nrow(eta)) {
       alpha <- .brat.alpha(eta2theta(eta[ii, -M], "loge",
-                                    earg = list(theta = NULL)),
-                          .refvalue, .refgp)
+                                     earg = list(theta = NULL)),
+                          .refvalue, .refgp )
       alpha0 <- loge(eta[ii, M], inverse = TRUE)
-      ymat <- InverseBrat(y[ii,], NCo = M, diag = 0)
-      tmat <- InverseBrat(ties[ii,], NCo = M, diag = 0)
-      answer <- rep(0, len=NCo-1) # deriv wrt eta[-M]
+      ymat <- InverseBrat(   y[ii, ], NCo = M, diag = 0)
+      tmat <- InverseBrat(ties[ii, ], NCo = M, diag = 0)
+      answer <- rep(0, len = NCo-1)  # deriv wrt eta[-M]
       for (aa in 1:NCo) {
         Daj <- alpha[aa] + alpha[uindex] + alpha0
         pja <- alpha[uindex] / Daj
         answer <- answer + alpha[uindex] *
-                  (-ymat[aa,uindex] + ymat[uindex,aa]*(1-pja)/pja -
-                  tmat[uindex,aa]) / Daj
+                  (-ymat[aa, uindex] + ymat[uindex, aa] * (1 - pja) / pja -
+                  tmat[uindex, aa]) / Daj
       }
       deriv0 <- 0 # deriv wrt eta[M]
       for (aa in 1:(NCo-1)) 
@@ -1610,8 +1625,8 @@ acat.deriv <- function(zeta, reverse, M, n) {
           Dab <- alpha[aa] + alpha[bb] + alpha0
           qab <- alpha0 / Dab
           deriv0 <- deriv0 + alpha0 *
-                    (-ymat[aa,bb] - ymat[bb,aa] +
-                    tmat[aa,bb]*(1-qab)/qab) / Dab
+                    (-ymat[aa, bb] - ymat[bb,aa] +
+                    tmat[aa, bb] * (1 - qab) / qab) / Dab
           }
         ans <- rbind(ans, w[ii] * c(answer, deriv0))
     }
@@ -1622,11 +1637,11 @@ acat.deriv <- function(zeta, reverse, M, n) {
     wz <- matrix(0, n, dimm(M))   # includes diagonal
     for (ii in 1:nrow(eta)) {
       alpha <- .brat.alpha(eta2theta(eta[ii, -M], "loge",
-                         earg = list(theta = NULL)),
+                           earg = list(theta = NULL)),
                           .refvalue, .refgp)
       alpha0 <- loge(eta[ii, M], inverse = TRUE)
-      ymat <- InverseBrat(y[ii,], NCo = M, diag = 0)
-      tmat <- InverseBrat(ties[ii,], NCo = M, diag = 0)
+      ymat <- InverseBrat(   y[ii, ], NCo = M, diag = 0)
+      tmat <- InverseBrat(ties[ii, ], NCo = M, diag = 0)
 
       for (aa in 1:(NCo)) {
         Daj <- alpha[aa] + alpha[uindex] + alpha0
@@ -1647,7 +1662,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
       if (NCo > 2) {
         ind5 <- iam(1, 1, M = NCo, both = TRUE, diag = FALSE)
-        alphajunk <- c(alpha, junk=NA)
+        alphajunk <- c(alpha, junk = NA)
         mat4 <- cbind(uindex[ind5$row],uindex[ind5$col])
         wz[ii,(M+1):ncol(wz)] <- -(ymat[mat4] + ymat[mat4[, 2:1]] +
            tmat[mat4]) * alphajunk[uindex[ind5$col]] *
@@ -1656,9 +1671,9 @@ acat.deriv <- function(zeta, reverse, M, n) {
       }
       for (sss in 1:length(uindex)) {
         jay <- uindex[sss]
-        naj <- ymat[, jay] + ymat[jay,] + tmat[, jay]
+        naj <- ymat[, jay] + ymat[jay, ] + tmat[, jay]
         Daj <- alpha[jay] + alpha + alpha0
-        wz[ii,iam(sss, NCo, M = NCo, diag = TRUE)] = 
+        wz[ii, iam(sss, NCo, M = NCo, diag = TRUE)] <- 
             -alpha[jay] * alpha0 * sum(naj / Daj^2)
       }
     }
@@ -1678,7 +1693,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
 
 .brat.indices <- function(NCo, are.ties = FALSE) {
-  if (!is.Numeric(NCo, allowable.length = 1,
+  if (!is.Numeric(NCo, length.arg = 1,
                   integer.valued = TRUE) ||
       NCo < 2)
     stop("bad input for 'NCo'")
@@ -1692,8 +1707,10 @@ acat.deriv <- function(zeta, reverse, M, n) {
 }
 
 
- Brat <- function(mat, ties = 0 * mat, string = c(">", "=="),
-                 whitespace = FALSE) {
+ Brat <- function(mat,
+                  ties = 0 * mat,
+                  string = c(">", "=="),
+                  whitespace = FALSE) {
 
 
   stopifnot(is.logical(whitespace) &&
@@ -1704,7 +1721,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
   allargs <- list(mat)  # ,...
   callit <- if (length(names(allargs))) names(allargs) else
-           as.character(1:length(allargs))
+            as.character(1:length(allargs))
   ans <- ans.ties <- NULL
   for (ii in 1:length(allargs)) {
     m <- allargs[[ii]]
@@ -1717,15 +1734,15 @@ acat.deriv <- function(zeta, reverse, M, n) {
     are.ties <- any(ties > 0)
     diag(ties) <- NA
 
-    diag(m) <- 0 # Could have been NAs
+    diag(m) <- 0  # Could have been NAs
     if (any(is.na(m)))
       stop("missing values not allowed (except on the diagonal)")
     diag(m) <- NA
 
     dm <- as.data.frame.table(m)
     dt <- as.data.frame.table(ties)
-    dm <- dm[!is.na(dm$Freq),]
-    dt <- dt[!is.na(dt$Freq),]
+    dm <- dm[!is.na(dm$Freq), ]
+    dt <- dt[!is.na(dt$Freq), ]
     usethis1 <- paste(dm[, 1], string[1], dm[, 2], sep = "")
     usethis2 <- paste(dm[, 1], string[2], dm[, 2], sep = "")
     ans <- rbind(ans, matrix(dm$Freq, nrow = 1))
@@ -1741,11 +1758,13 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
 
 
-InverseBrat <- function(yvec, NCo =
-                (1:900)[(1:900)*((1:900)-1) == ncol(rbind(yvec))],
-                multiplicity = if (is.matrix(yvec)) nrow(yvec) else 1,
-                diag = NA, string = c(">","=="),
-                whitespace = FALSE) {
+InverseBrat <-
+  function(yvec,
+           NCo = (1:900)[(1:900)*((1:900)-1) == ncol(rbind(yvec))],
+           multiplicity = if (is.matrix(yvec)) nrow(yvec) else 1,
+           diag = NA,
+           string = c(">", "=="),
+           whitespace = FALSE) {
 
 
 
@@ -1763,12 +1782,13 @@ InverseBrat <- function(yvec, NCo =
     for (i1 in 1:(NCo))
       for (i2 in 1:(NCo))
         if (i1 != i2) {
-          ans[i2,i1,mul] <- yvec[ptr]
+          ans[i2, i1, mul] <- yvec[ptr]
           ptr <- ptr + 1
         }
   ans <- if (multiplicity > 1) ans else matrix(ans, NCo, NCo)
 
-  if (is.array(yvec.orig) || is.matrix(yvec.orig)) {
+  if (is.array(yvec.orig) ||
+      is.matrix(yvec.orig)) {
     names.yvec <- dimnames(yvec.orig)[[2]]
     ii <- strsplit(names.yvec, string[1])
     cal <- NULL
@@ -1787,8 +1807,7 @@ InverseBrat <- function(yvec, NCo =
 
 
 tapplymat1 <- function(mat,
-                      function.arg = c("cumsum", "diff", "cumprod"))
-{
+                      function.arg = c("cumsum", "diff", "cumprod")) {
 
 
   if (!missing(function.arg))
@@ -1803,8 +1822,9 @@ tapplymat1 <- function(mat,
     mat <- as.matrix(mat)
   NR <- nrow(mat)
   NC <- ncol(mat)
-  fred <- dotC(name = "tapplymat1", mat=as.double(mat),
-               as.integer(NR), as.integer(NC), as.integer(type))
+  fred <- .C("tapplymat1",
+               mat = as.double(mat),
+               as.integer(NR), as.integer(NC), as.integer(type), PACKAGE = "VGAM")
 
   dim(fred$mat) <- c(NR, NC)
   dimnames(fred$mat) <- dimnames(mat)
@@ -1854,16 +1874,18 @@ tapplymat1 <- function(mat,
   blurb = c(paste("Ordinal Poisson model\n\n"), 
             "Link:     ", namesof("mu", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints,
                            apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
-    orig.y <- cbind(y) # Convert y into a matrix if necessary
+    orig.y <- cbind(y)  # Convert y into a matrix if necessary
     if ( .countdata ) {
       extra$NOS <- M <- NOS <- .NOS
       extra$Levels <- Levels <- .Levels
-      y.names <- dimnames(y)[[2]] # Hopefully the user inputted them
+      y.names <- dimnames(y)[[2]]  # Hopefully the user inputted them
     } else {
       if (any(w != 1) || ncol(cbind(w)) != 1)
         stop("the 'weights' argument must be a vector of all ones")
@@ -1919,7 +1941,7 @@ tapplymat1 <- function(mat,
             .init.mu = init.mu
           ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
-    mu <- eta2theta(eta, link= .link , earg = .earg ) # Poisson means
+    mu <- eta2theta(eta, link= .link , earg = .earg )  # Poisson means
     mu <- cbind(mu)
     mu
   }, list( .link = link, .earg = earg, .countdata = countdata ))),
@@ -2072,14 +2094,18 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
               ", ",
               namesof("scale_j", lscale, escale)),
     constraints = eval(substitute(expression({
-        J = M / 2
-        constraints <- cm.vgam(matrix(1,J,1), x, .parallel, constraints,
+        J <- M / 2
+        constraints <- cm.vgam(matrix(1,J,1), x = x,
+                               bool = .parallel ,
+                               constraints = constraints,
                                apply.int = FALSE)
         constraints[["(Intercept)"]] = rbind(constraints[["(Intercept)"]],
             matrix(0, J, ncol(constraints[["(Intercept)"]])))
 
-        cm2 <- cm.vgam(matrix(1,J,1), x, .sparallel, constraints = NULL,
-                       apply.int = FALSE)
+        cm2 <- cm.vgam(matrix(1,J,1), x = x,
+                           bool = .sparallel ,
+                           constraints = NULL,
+                           apply.int = FALSE)
 
         for (ii in 2:length(constraints))
             constraints[[ii]] =
@@ -2253,7 +2279,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
             wz0 = -c(w) *
                   (dcump.deta[, -J] / scalemat[, -J]) *
                   (dcump.deta[, -1]  / scalemat[, -1]) / mu.use[, 2:J]
-            wz0 = as.matrix(wz0) # Just in case J=2
+            wz0 = as.matrix(wz0)  # Just in case J=2
             for (ii in 1:(J-1))
               wz[, iam(2*ii-1, 2*ii+1, M = M)] = if (ooz) wz0[, ii] else 0
             wz0 = -c(w) * (dcump.dscale[, -1] * dscale.deta[, -1]) *
@@ -2310,7 +2336,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
             "the LM design matrix")
 
   nnn <- object at misc$n
-  M <- object at misc$M # ncol(B) # length(pvec) - 1
+  M <- object at misc$M # ncol(B)  # length(pvec) - 1
 
 
     if (model.multinomial) {
@@ -2337,10 +2363,10 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
         pvec  <- c(t(fitted(object)))
         pvec  <- rep(pvec, each=ppp)
         temp1 <- array(BB * pvec, c(ppp, M+1, nnn))
-        temp2 <- aperm(temp1, c(2,1,3)) # (M+1) x ppp x nnn
-        temp2 <- colSums(temp2) # ppp x nnn
+        temp2 <- aperm(temp1, c(2,1,3))  # (M+1) x ppp x nnn
+        temp2 <- colSums(temp2)  # ppp x nnn
         temp2 <- array(rep(temp2, each=M+1), c(M+1, ppp, nnn))
-        temp2 <- aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
+        temp2 <- aperm(temp2, c(2, 1, 3))  # ppp x (M+1) x nnn
         temp3 <- pvec
         ans <- array((BB - temp2) * temp3, c(ppp, M+1, nnn),
                      dimnames = list(dimnames(B)[[1]],
@@ -2408,7 +2434,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
                    dimnames = list(dimnames(B)[[1]],
                                    dimnames(fitted(object))[[1]],
                                    dimnames(fitted(object))[[2]]))
-    temp1 <- aperm(temp1, c(1, 3, 2)) # ppp x (M+1) x nnn
+    temp1 <- aperm(temp1, c(1, 3, 2))  # ppp x (M+1) x nnn
 
     if (is.null(ii)) {
       return(temp1)
@@ -2442,7 +2468,7 @@ prplot <- function(object,
     control <- prplot.control(...)
 
 
-  object <- plotvgam(object, plot.arg = FALSE, raw = FALSE) # , ...
+  object <- plotvgam(object, plot.arg = FALSE, raw = FALSE)  # , ...
 
   if (length(names(object at preplot)) != 1)
       stop("object needs to have only one term")
@@ -2585,11 +2611,11 @@ if (!isGeneric("is.zero"))
 
 
 setMethod("is.zero",  "matrix", function(object, ...)
-    is.zero.matrix(object, ...))
+          is.zero.matrix(object, ...))
 
 
 setMethod("is.zero",  "vglm", function(object, ...)
-    is.zero.vglm(object, ...))
+          is.zero.vglm(object, ...))
 
 
 
diff --git a/R/family.censored.R b/R/family.censored.R
index d4fb289..dfdba1b 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -62,7 +62,7 @@
         extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
         extra$leftcensored <- ifelse(temp == 2, TRUE, FALSE)
         extra$intervalcensored <- ifelse(temp == 3, TRUE, FALSE)
-        init.mu <- pmax((y[, 1] + y[, 2])/2, 1/8) # for intervalcensored
+        init.mu <- pmax((y[, 1] + y[, 2])/2, 1/8)  # for intervalcensored
         if (any(extra$uncensored))
         init.mu[extra$uncensored] <- pmax(y[extra$uncensored, 1], 1/8)
         if (any(extra$rightcensored))
@@ -189,7 +189,7 @@
 if (FALSE)
  cexpon <- 
  ecexpon <- function(link = "loge", location = 0) {
-  if (!is.Numeric(location, allowable.length = 1))
+  if (!is.Numeric(location, length.arg = 1))
     stop("bad input for 'location'")
 
   link <- as.list(substitute(link))
@@ -206,59 +206,58 @@ if (FALSE)
             if (location == 0) "Exponential: mu^2" else
             paste("(mu-",  location, ")^2", sep = "")),
   initialize = eval(substitute(expression({
-      extra$location <- .location
+    extra$location <- .location
 
-      if (any(y[, 1] <= extra$location))
+    if (any(y[, 1] <= extra$location))
         stop("all responses must be greater than ", extra$location)
 
-      predictors.names <- namesof("rate", .link , .earg , tag = FALSE)
+    predictors.names <- namesof("rate", .link , .earg , tag = FALSE)
 
-      type <- attr(y, "type")
-      if (type == "right" || type == "left"){
-        mu <- y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8
-      }else
-      if (type == "interval"){
-        temp <- y[, 3]
-        mu <- ifelse(temp == 3,
-                     y[, 2] + (abs(y[, 2] - extra$location) < 0.001) / 8,
-                     y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8)
-      }
-      if (!length(etastart))
-          etastart <- theta2eta(1/(mu-extra$location), .link , .earg )
+    type <- attr(y, "type")
+    if (type == "right" || type == "left"){
+      mu <- y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8
+    } else if (type == "interval") {
+      temp <- y[, 3]
+      mu <- ifelse(temp == 3,
+                   y[, 2] + (abs(y[, 2] - extra$location) < 0.001) / 8,
+                   y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8)
+    }
+    if (!length(etastart))
+      etastart <- theta2eta(1/(mu-extra$location), .link , .earg )
 
-      if (type == "right") {
-        temp <- y[, 2]
-        extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
-        extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
-        extra$leftcensored <- rep(FALSE, len = n)
-        extra$interval <- rep(FALSE, len = n)
-      } else
-      if (type == "left") {
-        temp <- y[, 2]
-        extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
-        extra$rightcensored <- rep(FALSE, len = n)
-        extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
-        extra$interval <- rep(FALSE, len = n)
-      } else
-      if (type == "counting") {
-        stop("type == 'counting' not recognized")
-        extra$uncensored <- rep(temp == 1, TRUE, FALSE)
-        extra$interval <- rep(FALSE, len = n)
-        extra$leftcensored <- rep(FALSE, len = n)
-        extra$rightcensored <- rep(FALSE, len = n)
-        extra$counting <- ifelse(temp == 0, TRUE, FALSE)
-      } else
-      if (type == "interval") {
-        temp <- y[, 3]
-        extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
-        extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
-        extra$leftcensored <- ifelse(temp == 2, TRUE, FALSE)
-        extra$interval <- ifelse(temp == 3, TRUE, FALSE)
-      } else
-        stop("'type' not recognized")
-  }), list( .location=location, .link = link ))),
+    if (type == "right") {
+      temp <- y[, 2]
+      extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+      extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+      extra$leftcensored <- rep(FALSE, len = n)
+      extra$interval <- rep(FALSE, len = n)
+    } else
+    if (type == "left") {
+      temp <- y[, 2]
+      extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+      extra$rightcensored <- rep(FALSE, len = n)
+      extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
+      extra$interval <- rep(FALSE, len = n)
+    } else
+    if (type == "counting") {
+      stop("type == 'counting' not recognized")
+      extra$uncensored <- rep(temp == 1, TRUE, FALSE)
+      extra$interval <- rep(FALSE, len = n)
+      extra$leftcensored <- rep(FALSE, len = n)
+      extra$rightcensored <- rep(FALSE, len = n)
+      extra$counting <- ifelse(temp == 0, TRUE, FALSE)
+    } else
+    if (type == "interval") {
+      temp <- y[, 3]
+      extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+      extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+      extra$leftcensored <- ifelse(temp == 2, TRUE, FALSE)
+      extra$interval <- ifelse(temp == 3, TRUE, FALSE)
+    } else
+      stop("'type' not recognized")
+  }), list( .location = location, .link = link ))),
   linkinv = eval(substitute(function(eta, extra = NULL)
-      extra$location + 1 / eta2theta(eta, .link , .earg ),
+    extra$location + 1 / eta2theta(eta, .link , .earg ),
   list( .link = link ) )),
   last = eval(substitute(expression({
     misc$location <- extra$location
@@ -266,7 +265,7 @@ if (FALSE)
     misc$multipleResponses <- FALSE
   }), list( .link = link ))),
   link = eval(substitute(function(mu, extra = NULL)
-    theta2eta(1/(mu-extra$location), .link , .earg ),
+    theta2eta(1 / (mu - extra$location), .link , .earg ),
   list( .link = link ) )),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -277,7 +276,8 @@ if (FALSE)
     cenI <- extra$interval
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else
-    sum(w[cenL] * log1p(-exp(-rate[cenL]*(y[cenL, 1]-extra$location)))) +
+    sum(w[cenL] * log1p(-exp(-rate[cenL] *
+                  (y[cenL, 1] - extra$location)))) +
     sum(w[cenU] * (-rate[cenU]*(y[cenU, 1]-extra$location))) +
     sum(w[cen0] * (log(rate[cen0]) -
                    rate[cen0]*(y[cen0, 1]-extra$location))) +
@@ -291,29 +291,29 @@ if (FALSE)
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
     cenI <- extra$interval
-    dl.drate <- 1/rate - (y[, 1]-extra$location) # uncensored
+    dl.drate <- 1/rate - (y[, 1]-extra$location)  # uncensored
     tmp200 <- exp(-rate*(y[, 1]-extra$location))
-    tmp200b <- exp(-rate*(y[, 2]-extra$location)) # for interval censored
+    tmp200b <- exp(-rate*(y[, 2]-extra$location))  # for interval censored
     if (any(cenL))
-        dl.drate[cenL] <- (y[cenL, 1]-extra$location) *
-                          tmp200[cenL] / (1 - tmp200[cenL])
+      dl.drate[cenL] <- (y[cenL, 1]-extra$location) *
+                        tmp200[cenL] / (1 - tmp200[cenL])
     if (any(cenU))
-        dl.drate[cenU] <- -(y[cenU, 1]-extra$location)
+      dl.drate[cenU] <- -(y[cenU, 1]-extra$location)
     if (any(cenI))
-        dl.drate[cenI] <- ((y[cenI, 2]-extra$location)*tmp200b[cenI]-
-        (y[cenI, 1]-extra$location)*tmp200[cenI])/
-        (-tmp200b[cenI]+tmp200[cenI])
+      dl.drate[cenI] <- ((y[cenI, 2] - extra$location) *
+                        tmp200b[cenI] - (y[cenI, 1] - extra$location) *
+                        tmp200[cenI]) / (-tmp200b[cenI] + tmp200[cenI])
 
     drate.deta <- dtheta.deta(rate, .link , .earg )
 
     c(w) * dl.drate * drate.deta
   }), list( .link = link ) )),
   weight = eval(substitute(expression({
-    A123 <- ((mu-extra$location)^2) # uncensored d2l.drate2
+    A123 <- ((mu-extra$location)^2)  # uncensored d2l.drate2
     Lowpt <- ifelse(cenL, y[, 1], extra$location)
-    Lowpt <- ifelse(cenI, y[, 1], Lowpt) #interval censored
+    Lowpt <- ifelse(cenI, y[, 1], Lowpt)  #interval censored
     Upppt <- ifelse(cenU, y[, 1], Inf)
-    Upppt <- ifelse(cenI, y[, 2], Upppt) #interval censored
+    Upppt <- ifelse(cenI, y[, 2], Upppt)  #interval censored
     tmp300 <- exp(-rate*(Lowpt - extra$location))
 
     d2l.drate2 <- 0 * y[, 1]
@@ -333,8 +333,9 @@ if (FALSE)
 
 
 
- cennormal1 <- function(lmu = "identity", lsd = "loge",
-                        imethod = 1, zero = 2) {
+ cennormal1 <-
+ cennormal <- function(lmu = "identity", lsd = "loge",
+                       imethod = 1, zero = 2) {
 
 
   lmu <- as.list(substitute(lmu))
@@ -347,7 +348,7 @@ if (FALSE)
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -359,8 +360,8 @@ if (FALSE)
                           namesof("sd", lsd, tag = TRUE), "\n",
             "Conditional variance: sd^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
-  }), list( .zero=zero ))),
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+  }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
     temp5 <-
@@ -380,28 +381,28 @@ if (FALSE)
         stop("some observations are both right and left censored!")
 
     predictors.names <-
-      c(namesof("mu", .lmu, earg =.emu, tag = FALSE),
+      c(namesof("mu", .lmu , earg =.emu, tag = FALSE),
         namesof("sd", .lsd, earg =.esd, tag = FALSE))
 
     if (!length(etastart)) {
       anyc <- extra$leftcensored | extra$rightcensored
-        i11 <- if ( .imethod == 1) anyc else FALSE  # can be all data
-        junk <- lm.wfit(x = cbind(x[!i11, ]),
-                        y = y[!i11], w = w[!i11])
-        sd.y.est <- sqrt(sum(w[!i11] * junk$resid^2) / junk$df.residual)
-        etastart <- cbind(mu = y,
-                          rep(theta2eta(sd.y.est, .lsd), length = n))
-        if (any(anyc))
-          etastart[anyc, 1] <- x[anyc,,drop = FALSE] %*% junk$coeff
+      i11 <- if ( .imethod == 1) anyc else FALSE  # can be all data
+      junk <- lm.wfit(x = cbind(x[!i11, ]),
+                      y = y[!i11], w = w[!i11])
+      sd.y.est <- sqrt(sum(w[!i11] * junk$resid^2) / junk$df.residual)
+      etastart <- cbind(mu = y,
+                        rep(theta2eta(sd.y.est, .lsd), length = n))
+      if (any(anyc))
+        etastart[anyc, 1] <- x[anyc, , drop = FALSE] %*% junk$coeff
     }
  }), list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
            .imethod = imethod ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
-    eta2theta(eta[, 1], .lmu, earg = .emu)
+    eta2theta(eta[, 1], .lmu , earg = .emu )
   }, list( .lmu = lmu, .emu = emu ))),
   last = eval(substitute(expression({
-    misc$link <-    c("mu" = .lmu, "sd" = .lsd)
+    misc$link <-    c("mu" = .lmu , "sd" = .lsd)
 
     misc$earg <- list("mu" = .emu ,"sd" = .esd )
 
@@ -415,20 +416,20 @@ if (FALSE)
     cenU <- extra$rightcensored
     cen0 <- !cenL & !cenU   # uncensored obsns
 
-    mum <- eta2theta(eta[, 1], .lmu, earg = .emu )
+    mum <- eta2theta(eta[, 1], .lmu , earg = .emu )
     sdv <- eta2theta(eta[, 2], .lsd, earg = .esd )
 
     Lower <- ifelse(cenL, y, -Inf)
     Upper <- ifelse(cenU, y,  Inf)
     ell1 <- -log(sdv[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sdv[cen0])^2
-    ell2 <- log1p(-pnorm((mum[cenL] - Lower[cenL])/sdv[cenL]))
-    ell3 <- log1p(-pnorm(( Upper[cenU] -  mum[cenU])/sdv[cenU]))
+    ell2 <- log1p(-pnorm((mum[cenL] - Lower[cenL]) / sdv[cenL]))
+    ell3 <- log1p(-pnorm(( Upper[cenU] -  mum[cenU]) / sdv[cenU]))
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else
     sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd ))),
-  vfamily = c("cennormal1"),
+  vfamily = c("cennormal"),
   deriv = eval(substitute(expression({
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
@@ -442,7 +443,7 @@ if (FALSE)
     dl.dmu <- (y-mum) / sdv^2
     dl.dsd <- (((y-mum)/sdv)^2 - 1) / sdv
 
-    dmu.deta <- dtheta.deta(mum, .lmu, earg = .emu )
+    dmu.deta <- dtheta.deta(mum, .lmu , earg = .emu )
     dsd.deta <- dtheta.deta(sdv, .lsd, earg = .esd )
 
     if (any(cenL)) {
@@ -461,7 +462,7 @@ if (FALSE)
       PhiU <- pnorm(temp21U)
       phiU <- dnorm(temp21U)
       fred21 <- phiU / (1 - PhiU)
-      dl.dmu[cenU] <- fred21 / sdv[cenU] # Negated
+      dl.dmu[cenU] <- fred21 / sdv[cenU]  # Negated
       dl.dsd[cenU] <- mumU[cenU] * fred21 / sdv[cenU]^2
       rm(fred21)
     }
@@ -470,12 +471,12 @@ if (FALSE)
   }), list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd ))),
   weight = eval(substitute(expression({
-    A1 <- 1 - pnorm((mum - Lower) / sdv) # Lower
-    A3 <- 1 - pnorm((Upper - mum) / sdv) # Upper
-    A2 <- 1 - A1 - A3                    # Middle; uncensored
+    A1 <- 1 - pnorm((mum - Lower) / sdv)  # Lower
+    A3 <- 1 - pnorm((Upper - mum) / sdv)  # Upper
+    A2 <- 1 - A1 - A3                     # Middle; uncensored
     wz <- matrix(0, n, 3)
-    wz[,iam(1, 1,M)] <- A2 * 1 / sdv^2 # ed2l.dmu2
-    wz[,iam(2, 2,M)] <- A2 * 2 / sdv^2 # ed2l.dsd2
+    wz[,iam(1, 1,M)] <- A2 * 1 / sdv^2  # ed2l.dmu2
+    wz[,iam(2, 2,M)] <- A2 * 2 / sdv^2  # ed2l.dsd2
     mumL <- mum - Lower
     temp21L <- mumL / sdv
     PhiL <- pnorm(temp21L)
@@ -503,9 +504,9 @@ if (FALSE)
     wzcenU22 <- mumU * phiU * (tmp9 + mumU * phiU / sdv) / (sdv * temp31U)
     wzcenU12 <- -phiU * ((1-PhiU)*(temp21U^2 - 1) -
                 temp21U*phiU) / temp31U
-    wzcenU11[!is.finite(wzcenU11)] <- 0 # Needed when Upper==Inf
-    wzcenU22[!is.finite(wzcenU22)] <- 0 # Needed when Upper==Inf
-    wzcenU12[!is.finite(wzcenU12)] <- 0 # Needed when Upper==Inf
+    wzcenU11[!is.finite(wzcenU11)] <- 0  # Needed when Upper==Inf
+    wzcenU22[!is.finite(wzcenU22)] <- 0  # Needed when Upper==Inf
+    wzcenU12[!is.finite(wzcenU12)] <- 0  # Needed when Upper==Inf
     wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] + A3 * wzcenU11
     wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] + A3 * wzcenU22
     wz[,iam(1, 2,M)] <- wz[,iam(1, 2,M)] + A3 * wzcenU12
@@ -548,15 +549,15 @@ if (FALSE)
       extra$rightcensored <- rep(FALSE, len = n)
 
     predictors.names <-
-      namesof("scale", .lscale, earg = .escale, tag = FALSE)
+      namesof("scale", .lscale , earg = .escale , tag = FALSE)
 
     if (!length(etastart)) {
       a.init <- (y+1/8) / sqrt(pi/2)
-      etastart <- theta2eta(a.init, .lscale, earg = .escale )
+      etastart <- theta2eta(a.init, .lscale , earg = .escale )
     }
   }), list( .lscale = lscale, .escale = escale ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Scale <- eta2theta(eta, .lscale, earg = .escale )
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
     Scale * sqrt(pi/2)
   }, list( .lscale = lscale, .escale = escale ))),
   last = eval(substitute(expression({
@@ -569,7 +570,7 @@ if (FALSE)
             .oim = oim ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    Scale <- eta2theta(eta, .lscale, earg = .escale )
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
 
     cen0 <- !extra$rightcensored   # uncensored obsns
     cenU <- extra$rightcensored
@@ -586,11 +587,11 @@ if (FALSE)
     cen0 <- !extra$rightcensored   # uncensored obsns
     cenU <- extra$rightcensored
 
-    Scale <- eta2theta(eta, .lscale, earg = .escale )
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
 
     dl.dScale <- ((y/Scale)^2 - 2) / Scale
 
-    dScale.deta <- dtheta.deta(Scale, .lscale, earg = .escale )
+    dScale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
     dl.dScale[cenU] <- y[cenU]^2 / Scale[cenU]^3
 
     c(w) * dl.dScale * dScale.deta
@@ -602,7 +603,7 @@ if (FALSE)
 
     if ( .oim ) {
       d2l.dScale2 <- 3 * (y[cenU])^2 / (Scale[cenU])^4
-      d2Scale.deta2 <- d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
+      d2Scale.deta2 <- d2theta.deta2(Scale[cenU], .lscale , earg = .escale )
       wz[cenU] <- (dScale.deta[cenU])^2 * d2l.dScale2 -
                    dl.dScale[cenU] * d2Scale.deta2
     } else {
@@ -644,7 +645,7 @@ if (FALSE)
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -655,7 +656,7 @@ if (FALSE)
     stop("bad input for argument 'probs.y'")
 
 
-  if (!is.Numeric(nrfs, allowable.length = 1) ||
+  if (!is.Numeric(nrfs, length.arg = 1) ||
       nrfs < 0 ||
       nrfs > 1)
     stop("bad input for argument 'nrfs'")
@@ -794,7 +795,7 @@ if (FALSE)
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eshape
       misc$earg[[Musual*ii  ]] <- .escale
     }
@@ -833,7 +834,7 @@ if (FALSE)
     dl.dscale <- (Shape / Scale) * (-1.0 + (y / Scale)^Shape)
 
     dshape.deta <- dtheta.deta(Shape, .lshape, earg = .eshape )
-    dscale.deta <- dtheta.deta(Scale, .lscale, earg = .escale )
+    dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
 
     myderiv <- c(w) * cbind(dl.dshape * dshape.deta,
                             dl.dscale * dscale.deta)
@@ -844,7 +845,7 @@ if (FALSE)
     EulerM <- -digamma(1.0)
 
 
-    ned2l.dshape <- (6*(EulerM - 1)^2 + pi^2)/(6*Shape^2) # KK (2003)
+    ned2l.dshape <- (6*(EulerM - 1)^2 + pi^2)/(6*Shape^2)  # KK (2003)
     ned2l.dscale <- (Shape / Scale)^2
     ned2l.dshapescale <- (EulerM-1) / Scale
 
@@ -964,7 +965,7 @@ is.SurvS4 <- function(x) inherits(x, "SurvS4")
 
 
 setIs(class1 = "SurvS4",
-      class2 = "matrix") # Forces vglm()@y to be a matrix
+      class2 = "matrix")  # Forces vglm()@y to be a matrix
 
 
 
@@ -1100,7 +1101,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1111,7 +1112,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
     stop("bad input for argument 'probs.y'")
 
 
-  if (!is.Numeric(nrfs, allowable.length = 1) ||
+  if (!is.Numeric(nrfs, length.arg = 1) ||
       nrfs < 0 ||
       nrfs > 1)
     stop("bad input for argument 'nrfs'")
@@ -1275,7 +1276,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .eAlpha
       misc$earg[[Musual*ii  ]] <- .eBetaa
     }
diff --git a/R/family.circular.R b/R/family.circular.R
index e2bb415..ab0b98e 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -17,13 +17,14 @@ dcard <- function(x, mu, rho, log = FALSE) {
 
 
   L <- max(length(x), length(mu), length(rho))
-  x   <- rep(x,   len = L);
-  mu  <- rep(mu,  len = L);
-  rho <- rep(rho, len = L);
+  if (length(x)   != L) x   <- rep(x,   len = L)
+  if (length(mu)  != L) mu  <- rep(mu,  len = L)
+  if (length(rho) != L) rho <- rep(rho, len = L)
+
   logdensity <- rep(log(0), len = L)
   xok <- (x > 0) & (x < (2*pi))
   logdensity[xok] <- -log(2*pi) + log1p(2 * rho[xok] *
-                     cos(x[xok]-mu[xok]))
+                      cos(x[xok]-mu[xok]))
   logdensity[mu  <=    0] <- NaN
   logdensity[mu  >= 2*pi] <- NaN
   logdensity[rho <= -0.5] <- NaN
@@ -52,15 +53,16 @@ qcard <- function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
     stop("'rho' must be between -0.5 and 0.5 inclusive")
   if (!is.Numeric(p, positive = TRUE) || any(p > 1))
     stop("'p' must be between 0 and 1")
+
   nn <- max(length(p), length(mu), length(rho))
-  p <- rep(p, len=nn)
-  mu <- rep(mu, len=nn)
-  rho <- rep(rho, len=nn)
+  if (length(p)   != nn) p   <- rep(p,   len = nn)
+  if (length(mu)  != nn) mu  <- rep(mu,  len = nn)
+  if (length(rho) != nn) rho <- rep(rho, len = nn)
 
 
   oldans <- 2 * pi * p
 
-  for(its in 1:maxits) {
+  for (its in 1:maxits) {
     ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
            2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
     index <- (ans <= 0) | (ans > 2*pi)
@@ -82,7 +84,7 @@ rcard <- function(n, mu, rho, ...) {
   if (!is.Numeric(rho) || max(abs(rho) > 0.5))
     stop("argument 'rho' must be between -0.5 and 0.5 inclusive")
   if (!is.Numeric(n, positive = TRUE,
-                  integer.valued = TRUE, allowable.length = 1))
+                  integer.valued = TRUE, length.arg = 1))
     stop("argument 'n' must be a single positive integer")
 
   mu <- rep(mu, len = n)
@@ -121,7 +123,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
   if (!is.Numeric(irho) || max(abs(irho)) > 0.5)
     stop("bad input for argument 'irho'")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
     stop("'nsimEIM' should be an integer greater than 50")
@@ -222,7 +224,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
     run.varcov <- 0
     ind1   <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rcard(n, mu=mu, rho=rho)
       dl.dmu <-  2 * rho * sin(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
       dl.drho <- 2 * cos(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
@@ -260,7 +262,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
   ilocat <- ilocation
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -315,7 +317,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
             theta2eta(locat.init, .llocat, earg = .elocat),
             theta2eta(scale.init, .lscale, earg = .escale))
       }
-      y <- y %% (2*pi) # Coerce after initial values have been computed
+      y <- y %% (2*pi)  # Coerce after initial values have been computed
   }), list( .imethod = imethod, .ilocat = ilocat,
             .escale = escale, .elocat = elocat,
             .lscale = lscale, .llocat = llocat,
@@ -364,7 +366,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
     ned2l.dlocat2 <- Scale * tmp6[, 2] / tmp6[, 1]
     ned2l.dscale2 <- tmp6[, 3] / tmp6[, 1] - (tmp6[, 2] / tmp6[, 1])^2
 
-    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2)  # diagonal
     wz[,iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
     wz[,iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
     c(w) * wz
diff --git a/R/family.exp.R b/R/family.exp.R
index db1fe11..292054f 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -13,7 +13,7 @@
 
 
 
-qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
+qeunif <- function(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
 
   ppp <- p
   vsmallno <- sqrt(.Machine$double.eps)
@@ -21,9 +21,9 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
   if (any(min >= max))
     stop("argument 'min' has values greater or equal ",
          "to argument 'max'")
-  if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
-      Tol_nr > 0.10)
-    stop("argument 'Tol_nr' is not a single positive value, ",
+  if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
+      Tol.nr > 0.10)
+    stop("argument 'Tol.nr' is not a single positive value, ",
          "or is too large")
   nrok <- ppp >= vsmallno & ppp <= 1.0 - vsmallno & is.finite(ppp)
 
@@ -32,14 +32,14 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
   eee[ppp > 1.0 -  smallno] <- 1.0 - sqrt(1.0 - ppp[ppp > 1.0 -  smallno])
 
 
-  for(iii in 1:Maxit_nr) {
+  for (iii in 1:Maxit.nr) {
     realdiff <- (peunif(eee[nrok]) - ppp[nrok]) / deunif(eee[nrok])
     eee[nrok] <- eee[nrok] - realdiff
-    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
-    if (iii == Maxit_nr) warning("did not converge")
+    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol.nr )) break
+    if (iii == Maxit.nr) warning("did not converge")
   }
 
-  if (max(abs(peunif(eee[nrok]) - ppp[nrok])) > Tol_nr)
+  if (max(abs(peunif(eee[nrok]) - ppp[nrok])) > Tol.nr)
     warning("did not converge on the second check")
 
   eee[ppp <       vsmallno] <-       sqrt(      ppp[ppp <       vsmallno])
@@ -109,12 +109,12 @@ reunif <- function(n, min = 0, max = 1) {
 
 
 
-qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10,
-                   Tol_nr = 1.0e-6) {
+qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10,
+                   Tol.nr = 1.0e-6) {
   ppp <- p
-  if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
-      Tol_nr > 0.10)
-    stop("argument 'Tol_nr' is not a single ",
+  if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
+      Tol.nr > 0.10)
+    stop("argument 'Tol.nr' is not a single ",
          "positive value, or is too large")
   nrok <- is.finite(ppp)
 
@@ -123,14 +123,14 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10,
 
   gnorm <- function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
 
-  for(iii in 1:Maxit_nr) {
+  for (iii in 1:Maxit.nr) {
     realdiff <- (penorm(eee[nrok]) - ppp[nrok]) / gnorm(eee[nrok])
     eee[nrok] <- eee[nrok] - realdiff
-    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
-    if (iii == Maxit_nr) warning("did not converge")
+    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol.nr )) break
+    if (iii == Maxit.nr) warning("did not converge")
   }
 
-  if (max(abs(penorm(eee[nrok]) - ppp[nrok])) > Tol_nr)
+  if (max(abs(penorm(eee[nrok]) - ppp[nrok])) > Tol.nr)
     warning("did not converge on the second check")
 
   eee[ppp == 0] <- -Inf
@@ -193,12 +193,12 @@ renorm <- function(n, mean = 0, sd = 1) {
 
 
 
-qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
+qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
   ppp <- p
   vsmallno <- sqrt(.Machine$double.eps)
-  if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
-      Tol_nr > 0.10)
-    stop("argument 'Tol_nr' is not a single positive value, or ",
+  if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
+      Tol.nr > 0.10)
+    stop("argument 'Tol.nr' is not a single positive value, or ",
          "is too large")
   nrok <- ppp >= vsmallno & is.finite(ppp)
 
@@ -211,14 +211,14 @@ qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
   eee[ppp <       vsmallno] <- sqrt(ppp[ppp < vsmallno])
 
 
-  for(iii in 1:Maxit_nr) {
+  for (iii in 1:Maxit.nr) {
     realdiff <- (peexp(eee[nrok]) - ppp[nrok]) / deexp(eee[nrok])
     eee[nrok] <- eee[nrok] - realdiff
-    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
-    if (iii == Maxit_nr) warning("did not converge")
+    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol.nr )) break
+    if (iii == Maxit.nr) warning("did not converge")
   }
 
-  if (max(abs(peexp(eee[nrok]) - ppp[nrok])) > Tol_nr)
+  if (max(abs(peexp(eee[nrok]) - ppp[nrok])) > Tol.nr)
     warning("did not converge on the second check")
 
   eee[ppp < vsmallno] <- sqrt(ppp[ppp < vsmallno])
@@ -363,7 +363,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
 
 
   if (length(ilocat) &&
-     (!is.Numeric(ilocat, allowable.length = 1, positive = TRUE)))
+     (!is.Numeric(ilocat, length.arg = 1, positive = TRUE)))
       stop("bad input for argument 'ilocation'")
   if (length(iscale) && !is.Numeric(iscale))
     stop("bad input for argument 'iscale'")
@@ -372,7 +372,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
   if (!is.Numeric(percentile, positive = TRUE) ||
       any(percentile >= 100))
     stop("bad input for argument 'percentile'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
       stop("'imethod' must be 1 or 2")
@@ -446,7 +446,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
     misc$multipleResponses <- FALSE
 
       ncoly <- ncol(y)
-      for(ii in 1:length( .percentile )) {
+      for (ii in 1:length( .percentile )) {
         y.use <- if (ncoly > 1) y[, ii] else y
         mu <- cbind(mu)
         extra$percentile[ii] <- 100 * weighted.mean(y.use <= mu[, ii], w)
@@ -488,7 +488,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
     ned2l.dlocat2 <- 0.3 / Scale^2
     ned2l.dscale2 <- 2.0 / (3 * Scale^2)
 
-    wz <- matrix(-10, n, M) # Diagonal EIM
+    wz <- matrix(-10, n, M)  # Diagonal EIM
     wz[, iam(1, 1, M = M)] <- ned2l.dlocat2 * dlocat.deta^2
     wz[, iam(2, 2, M = M)] <- ned2l.dscale2 * dscale.deta^2
 
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 14ee420..383030f 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -21,7 +21,7 @@
 rgev <- function(n, location = 0, scale = 1, shape = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
              stop("bad input for argument 'n'") else n
 
   if (!is.Numeric(location)) 
@@ -57,10 +57,11 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
+
   if (oobounds.log > 0)
     stop("bad input for argument 'oobounds.log'")
 
-  if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tolshape0'")
 
   use.n <- max(length(x), length(location), length(scale), length(shape))
@@ -219,17 +220,17 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
       max(percentiles) >= 100))
     stop("bad input for argument 'percentiles'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
      imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
   if (length(ishape) && !is.Numeric(ishape))
       stop("bad input for argument 'ishape'")
 
-  if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
+  if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE) ||
       tolshape0 > 0.1)
     stop("bad input for argument 'tolshape0'")
-  if (!is.Numeric(gshape, allowable.length = 2) ||
+  if (!is.Numeric(gshape, length.arg = 2) ||
       gshape[1] >= gshape[2])
     stop("bad input for argument 'gshape'")
   if (length(zero) &&
@@ -311,25 +312,25 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
           stop("bad input for an argument in 'lshape'")
 
       if ( .imethod == 1) {
-        nvector <- 4:10   # Arbitrary; could be made an argument
-        ynvector <- quantile(y[, 1], probs = 1-1/nvector)
-          objecFunction <- -Inf   # Actually the log-likelihood
+        nvector <- 4:10  # Arbitrary; could be made an argument
+        ynvector <- quantile(y[, 1], probs = 1 - 1/nvector)
+          objecFunction <- -Inf  # Actually the log-likelihood
           est.sigma <- !length(init.sig)
           gshape <- .gshape
           temp234 <- if (length(init.xi)) init.xi[1] else
                          seq(gshape[1], gshape[2], length.out = 12)
-          for(shapeTry in temp234) {
+          for (shapeTry in temp234) {
               xvec <- if (abs(shapeTry) < .tolshape0) log(nvector) else
                       (nvector^shapeTry - 1) / shapeTry
               fit0 <- lsfit(x = xvec, y = ynvector, intercept = TRUE)
-              sigmaTry = if (est.sigma)
+              sigmaTry <- if (est.sigma)
                 rep(fit0$coef["X"], length.out = nrow(y)) else
                 init.sig
               LocatTry <- rep(fit0$coef["Intercept"], length.out = nrow(y))
               llTry <- egev(giveWarning =
                FALSE)@loglikelihood(mu = NULL, y = y[, 1], w = w,
-               residuals = FALSE,
-               eta =
+                                    residuals = FALSE,
+                                    eta =
                cbind(theta2eta(LocatTry, .llocat , .elocat ),
                      theta2eta(sigmaTry, .lscale , .escale ),
                      theta2eta(shapeTry, .lshape , .eshape )))
@@ -366,7 +367,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
               theta2eta(init.xi,  .lshape , .eshape ))
     }
   }), list( 
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .ishape = ishape, .iscale = iscale,
 
@@ -385,7 +386,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     LP <- length(cent)
     fv <- matrix(as.numeric(NA), nrow(eta), LP)
     if (LP) {
-      for(ii in 1:LP) {
+      for (ii in 1:LP) {
         yp <- -log(cent[ii]/100)
         fv[!is.zero, ii] <- Locat[!is.zero] - sigma[!is.zero] *
                             (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
@@ -403,7 +404,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     }
     fv
   }, list(
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
            .tolshape0 = tolshape0 ))),
@@ -427,7 +428,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
 
 
 
-    misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+    misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
     misc$percentiles <- .percentiles
     misc$expected <- TRUE
     misc$tolshape0 <- .tolshape0
@@ -436,7 +437,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     if (any(shape < -0.5))
       warning("some values of the shape parameter are less than -0.5")
   }), list(
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
             .tolshape0 = tolshape0, .percentiles = percentiles ))),
@@ -476,7 +477,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
             old.answer
       }
   }, list( 
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
 
@@ -534,7 +535,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
                  dl.dsi * dsi.deta,
                  dl.dxi * dxi.deta)
   }), list(
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
             .tolshape0 = tolshape0 ))),
@@ -542,14 +543,14 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
   weight = eval(substitute(expression({
     kay <- -shape
     dd <- digamma(r.vec-kay+1)
-    ddd <- digamma(r.vec+1) # Unnecessarily evaluated at each iteration
+    ddd <- digamma(r.vec+1)  # Unnecessarily evaluated at each iteration
     temp13 <- -kay * dd + (kay^2 - kay + 1) / (1-kay)
     temp33 <- 1 - 2 * kay * ddd +
               kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
     temp23 <- -kay * dd + (1+(1-kay)^2) / (1-kay)
     GR.gev <- function(j, ri, kay) gamma(ri - j*kay + 1) /  gamma(ri)
-    tmp2 <- (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
-    tmp1 <- (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
+    tmp2 <- (1-kay)^2 * GR.gev(2, r.vec, kay)  # Latter is GR2
+    tmp1 <- (1-2*kay) * GR.gev(1, r.vec, kay)  # Latter is GR1
     k0 <- (1-2*kay)
     k1 <- k0 * kay
     k2 <- k1 * kay
@@ -584,7 +585,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
         wz[, iam(1, 2, M)] <- 2 * r.vec / sigma^2
         wz[, iam(2, 2, M)] <- -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
     (4 * dgammadx(r.vec+1, deriv.arg = 1) - 
-     3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec) # Not checked
+     3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec)  # Not checked
         }
     }
 
@@ -595,7 +596,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
     wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
     c(w) * wz
-  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape ))))
 }
 
@@ -656,20 +657,20 @@ dgammadx <- function(x, deriv.arg = 1) {
 
 
 
-  if (!is.Numeric(gshape, allowable.length = 2) ||
+  if (!is.Numeric(gshape, length.arg = 2) ||
       gshape[1] >= gshape[2])
       stop("bad input for argument 'gshape'")
     if (length(percentiles) && 
       (!is.Numeric(percentiles, positive = TRUE) ||
        max(percentiles) >= 100))
       stop("bad input for argument 'percentiles'")
-    if (!is.Numeric(imethod, allowable.length = 1,
+    if (!is.Numeric(imethod, length.arg = 1,
                     positive = TRUE, integer.valued = TRUE) ||
        imethod > 2.5)
       stop("argument 'imethod' must be 1 or 2")
     if (length(ishape) && !is.Numeric(ishape))
       stop("bad input for argument 'ishape'")
-    if (!is.Numeric(tolshape0, allowable.length = 1,
+    if (!is.Numeric(tolshape0, length.arg = 1,
                     positive = TRUE) ||
         tolshape0 > 0.1)
       stop("bad input for argument 'tolshape0'")
@@ -681,7 +682,7 @@ dgammadx <- function(x, deriv.arg = 1) {
   new("vglmff",
   blurb = c("Generalized extreme value distribution\n",
           "Links:    ",
-          namesof("location", link = llocat, earg = elocat), ", ", 
+          namesof("location", link = llocat,  earg = elocat), ", ", 
           namesof("scale",    link = lscale, earg = escale), ", ",
           namesof("shape",    link = lshape, earg = eshape)),
   constraints = eval(substitute(expression({
@@ -717,7 +718,7 @@ dgammadx <- function(x, deriv.arg = 1) {
           gshape <- .gshape
           temp234 <- if (length(init.xi)) init.xi[1] else
                      seq(gshape[1], gshape[2], length.out = 12)
-          for(xi.try in temp234) {
+          for (xi.try in temp234) {
             xvec <- if (abs(xi.try) < .tolshape0) log(nvector) else
                     (nvector^xi.try - 1) / xi.try
             fit0 <- lsfit(x = xvec, y=ynvector, intercept = TRUE)
@@ -769,7 +770,7 @@ dgammadx <- function(x, deriv.arg = 1) {
               theta2eta(init.sig, .lscale ,    earg = .escale ), 
               theta2eta(init.xi,  .lshape ,    earg = .eshape ))
     }
-  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .percentiles = percentiles, .tolshape0 = tolshape0,
             .imethod = imethod,
@@ -784,7 +785,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     LP <- length(cent)
     fv <- matrix(as.numeric(NA), nrow(eta), LP)
     if (LP) {
-      for(ii in 1:LP) {
+      for (ii in 1:LP) {
         yp <- -log(cent[ii]/100)
         fv[!is.zero,ii] <- loc[!is.zero] - sigma[!is.zero] *
                         (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
@@ -800,11 +801,11 @@ dgammadx <- function(x, deriv.arg = 1) {
       fv[xi >= 1] <- NA  # Mean exists only if xi < 1.
     }
     fv
-  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }, list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape,
            .tolshape0 = tolshape0 ))),
   last = eval(substitute(expression({
-    misc$links <-   c(location = .llocat,
+    misc$links <-   c(location = .llocat, 
                       scale    = .lscale ,
                       shape    = .lshape)
 
@@ -813,13 +814,13 @@ dgammadx <- function(x, deriv.arg = 1) {
                       shape    = .eshape)
 
 
-    misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+    misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
     misc$percentiles <- .percentiles
     misc$tolshape0 <- .tolshape0
     misc$expected <- TRUE 
     if (any(xi < -0.5))
       warning("some values of the shape parameter are less than -0.5")
-  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .tolshape0 = tolshape0,  .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
@@ -835,7 +836,7 @@ dgammadx <- function(x, deriv.arg = 1) {
                        log = TRUE, oobounds.log = -1.0e04,
                        giveWarning= .giveWarning))
       }
-  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }, list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape,
            .giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
   vfamily = c("egev", "vextremes"),
@@ -872,7 +873,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     c(w) * cbind(dl.dmu * dmu.deta,
                  dl.dsi * dsi.deta,
                  dl.dxi * dxi.deta)
-  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .tolshape0 = tolshape0 ))),
   weight = eval(substitute(expression({
@@ -883,7 +884,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     kay <- -xi  # for the formulae 
     kay[abs(kay-0.5) < .tolshape0] <- 0.501
     temp100 <- gamma(2-kay)
-    pp <- (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
+    pp <- (1-kay)^2 * gamma(1-2*kay)  # gamma(0) is undefined so kay != 0.5
     qq <- temp100 * (digamma(1-kay) - (1-kay)/kay)
     wz <- matrix(as.numeric(NA), n, 6)
     wz[, iam(1, 1, M)] <- pp / sigma^2
@@ -922,7 +923,7 @@ dgammadx <- function(x, deriv.arg = 1) {
 rgumbel <- function(n, location = 0, scale = 1) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   answer <- location - scale * log(-log(runif(use.n)))
@@ -995,7 +996,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
   new("vglmff",
   blurb = c("Gumbel distribution for extreme value regression\n",
             "Links:    ",
-            namesof("location", llocat, earg = elocat ), ", ",
+            namesof("location", llocat,  earg = elocat ), ", ",
             namesof("scale",    lscale, earg = escale )),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
@@ -1044,7 +1045,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
               theta2eta( sc.init, .lscale , earg = .escale ))
     }
 
-  }), list( .llocat = llocat, .lscale = lscale,
+  }), list( .llocat = llocat,  .lscale = lscale,
             .elocat = elocat, .escale = escale,
                               .iscale = iscale,
             .R = R, .mpv = mpv, .percentiles = percentiles ))),
@@ -1054,12 +1055,12 @@ pgumbel <- function(q, location = 0, scale = 1) {
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
 
     Percentiles <- extra$percentiles
-    LP <- length(Percentiles) # may be 0
+    LP <- length(Percentiles)  # may be 0
     if (LP > 0) {
       mpv <- extra$mpv
-      mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
+      mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv)  # LP may be 0
       Rvec <- extra$R
-      for(ii in 1:LP) {
+      for (ii in 1:LP) {
         ci <- if (is.Numeric(Rvec))
                Rvec * (1 - Percentiles[ii] / 100) else
                -log(Percentiles[ii] / 100)
@@ -1076,7 +1077,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
     mu <- loc + sigma * EulerM
   }
   mu
-  }, list( .llocat = llocat, .lscale = lscale,
+  }, list( .llocat = llocat,  .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
 
   last = eval(substitute(expression({
@@ -1086,23 +1087,23 @@ pgumbel <- function(q, location = 0, scale = 1) {
 
     misc$R <- .R
     misc$mpv <- .mpv
-    misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+    misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
     misc$percentiles <- .percentiles
-  }), list( .llocat = llocat, .lscale = lscale,
+  }), list( .llocat = llocat,  .lscale = lscale,
             .elocat = elocat, .escale = escale,
             .percentiles = percentiles,
             .mpv = mpv, .R = R ))),
   vfamily = c("gumbel", "vextremes"),
   loglikelihood = eval(substitute(
   function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    loc   <- eta2theta(eta[, 1], .llocat, earg = .elocat )
+    loc   <- eta2theta(eta[, 1], .llocat,  earg = .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
 
     r.vec <- rowSums(cbind(!is.na(y)))
     yiri <- y[cbind(1:nrow(y),r.vec)]
     ans <- -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
     max.r.vec <- max(r.vec)
-    for(jay in 1:max.r.vec) {
+    for (jay in 1:max.r.vec) {
       index <- (jay <= r.vec)
       ans[index] <- ans[index] - (y[index,jay]-loc[index]) / sigma[index]
     }
@@ -1112,7 +1113,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
 
         sum(c(w) * ans)
       }
-  }, list( .llocat = llocat, .lscale = lscale,
+  }, list( .llocat = llocat,  .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   deriv = eval(substitute(expression({
     loc   <- eta2theta(eta[, 1], .llocat , earg = .elocat )
@@ -1124,7 +1125,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
     temp2 <- (yiri - loc) / sigma
     term2 <- exp(-temp2)
 
-    dloc.deta <- dtheta.deta(loc, .llocat, earg = .elocat)
+    dloc.deta <- dtheta.deta(loc, .llocat,  earg = .elocat)
     dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
 
     dl.dloc <- (r.vec - term2) / sigma
@@ -1133,16 +1134,16 @@ pgumbel <- function(q, location = 0, scale = 1) {
 
     c(w) * cbind(dl.dloc   * dloc.deta,
                  dl.dsigma * dsigma.deta)
-  }), list( .llocat = llocat, .lscale = lscale,
+  }), list( .llocat = llocat,  .lscale = lscale,
             .elocat = elocat, .escale = escale ))),
   weight = eval(substitute(expression({
-    temp6 <- digamma(r.vec) # , integer = T
-    temp5 <- digamma(1:max(r.vec)) # , integer=T
+    temp6 <- digamma(r.vec)  # , integer = T
+    temp5 <- digamma(1:max(r.vec))  # , integer=T
     temp5 <- matrix(temp5, n, max(r.vec), byrow = TRUE)
     temp5[col(temp5) > r.vec] <- 0
     temp5 <- temp5 %*% rep(1, ncol(temp5))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
+    wz <- matrix(as.numeric(NA), n, dimm(M = 2))  # 3=dimm(M = 2)
     wz[, iam(1, 1, M)] <- r.vec / sigma^2
     wz[, iam(2, 1, M)] <- -(1 + r.vec * temp6) / sigma^2
     wz[, iam(2, 2, M)] <- (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
@@ -1161,7 +1162,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
 rgpd <- function(n, location = 0, scale = 1, shape = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
              stop("bad input for argument 'n'") else n
 
   if (!is.Numeric(location))
@@ -1202,7 +1203,7 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
   if (oobounds.log > 0)
     stop("bad input for argument 'oobounds.log'")
 
-  if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tolshape0'")
 
 
@@ -1352,7 +1353,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
     stop("bad input for argument 'giveWarning'")
   if (!is.Numeric(threshold)) 
     stop("bad input for argument 'threshold'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
      imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
@@ -1370,7 +1371,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
     (!is.Numeric(percentiles, positive = TRUE) ||
      max(percentiles) >= 100))
     stop("bad input for argument 'percentiles'")
-  if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
+  if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE) ||
       tolshape0 > 0.1)
     stop("bad input for argument 'tolshape0'")
 
@@ -1428,7 +1429,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
     if (is.Numeric(  .threshold )) {
       orig.y <- y
     }
-    ystar <- as.matrix(y - Threshold) # Operate on ystar
+    ystar <- as.matrix(y - Threshold)  # Operate on ystar
     extra$threshold <- Threshold
 
 
@@ -1496,7 +1497,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
     Musual <- 2
     pcent <- .percentiles
-    LP <- length(pcent) # NULL means LP == 0 and the mean is returned
+    LP <- length(pcent)  # NULL means LP == 0 and the mean is returned
     ncoly <- ncol(eta) / Musual
     if (!length(y.names <- extra$y.names))
       y.names <- paste("Y", 1:ncoly, sep = "")
@@ -1515,12 +1516,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
                        percentiles = c(90, 95),
                        y.name = NULL,
                        tolshape0 = 0.001) {
-      is.zero <- (abs(shape) < tolshape0 ) # A matrix
+      is.zero <- (abs(shape) < tolshape0 )  # A matrix
 
       LP <- length(percentiles)
       fv <- matrix(as.numeric(NA), length(shape), LP)
       is.zero <- (abs(shape) < tolshape0)
-      for(ii in 1:LP) {
+      for (ii in 1:LP) {
         temp <- 1 - percentiles[ii] / 100
         fv[!is.zero, ii] <- threshold[!is.zero] +
                            (temp^(-shape[!is.zero]) - 1) *
@@ -1543,7 +1544,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
       fv <- matrix(-1, nrow(sigma),  LP * ncoly)
       colnames.cumsum.fv <- NULL
-      for(jlocal in 1:ncoly) {
+      for (jlocal in 1:ncoly) {
         block.mat.fv <-
           do.one(yvec = y[, jlocal],
                  shape = shape[, jlocal],
@@ -1583,7 +1584,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .escale
       misc$earg[[Musual*ii  ]] <- .eshape
     }
@@ -1629,7 +1630,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
     mytolerance <- .Machine$double.eps
     bad <- (A <= mytolerance)
     if (any(bad) && any(w[bad] != 0)) {
-      cat(sum(w[bad],na.rm = TRUE), # "; ignoring them"
+      cat(sum(w[bad],na.rm = TRUE),  # "; ignoring them"
           "observations violating boundary constraints\n")
       flush.console()
     }
@@ -1662,7 +1663,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
     ned2l.dscale2 <- 1 / ((1+2*Shape) * sigma^2)
     ned2l.dshape2 <- 2 / ((1+2*Shape) * (1+Shape))
-    ned2l.dshapescale <- 1 / ((1+2*Shape) * (1+Shape) * sigma) # > 0 !
+    ned2l.dshapescale <- 1 / ((1+2*Shape) * (1+Shape) * sigma)  # > 0 !
 
     NOS <- M / Musual
 
@@ -1822,7 +1823,7 @@ setMethod("guplot", "vlm",
   new("vglmff",
   blurb = c("Gumbel distribution (univariate response)\n\n",
             "Links:    ",
-            namesof("location", llocat,
+            namesof("location", llocat, 
                     earg = elocat, tag = TRUE), ", ", 
             namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
             "Mean:     location + scale*0.5772..\n",
@@ -1861,12 +1862,12 @@ setMethod("guplot", "vlm",
         cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
               theta2eta(sca.init, .lscale , earg = .escale ))
     }
-  }), list( .llocat = llocat, .lscale = lscale,
+  }), list( .llocat = llocat,  .lscale = lscale,
             .elocat = elocat, .escale = escale,
                               .iscale = iscale, 
             .R = R, .mpv = mpv, .percentiles = percentiles ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
-    locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+    locat <- eta2theta(eta[, 1], .llocat,  earg = .elocat)
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
     EulerM <- -digamma(1)
     Percentiles <- extra$percentiles
@@ -1876,7 +1877,7 @@ setMethod("guplot", "vlm",
     mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv)
     Rvec <- extra$R
     if (1 <= LP)
-    for(ii in 1:LP) {
+    for (ii in 1:LP) {
       ci <- if (is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
           -log(Percentiles[ii] / 100)
       mu[,ii] <- locat - sigma * log(ci)
@@ -1889,16 +1890,16 @@ setMethod("guplot", "vlm",
       dmn2 <- c(dmn2, "MPV")
     dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
     mu
-  }, list( .llocat = llocat, .lscale = lscale,
+  }, list( .llocat = llocat,  .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   last = eval(substitute(expression({
-    misc$link <-    c(location = .llocat, scale = .lscale) 
+    misc$link <-    c(location = .llocat,  scale = .lscale) 
     misc$earg <- list(location = .elocat, scale = .escale)
-    misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+    misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
     misc$R <- .R
     misc$mpv <- .mpv
     misc$percentiles = .percentiles
-  }), list( .llocat = llocat, .lscale = lscale, .mpv = mpv,
+  }), list( .llocat = llocat,  .lscale = lscale, .mpv = mpv,
             .elocat = elocat, .escale = escale,
             .R = R, .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
@@ -1909,7 +1910,7 @@ setMethod("guplot", "vlm",
                         "implemented yet") else {
        sum(w * dgumbel(x = y, location = loc, scale = sca, log = TRUE))
     }
-  }, list( .llocat = llocat, .lscale = lscale,
+  }, list( .llocat = llocat,  .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   vfamily = "egumbel",
   deriv = eval(substitute(expression({
@@ -1923,7 +1924,7 @@ setMethod("guplot", "vlm",
     dsca.deta <- dtheta.deta(sca, .lscale , earg = .escale )
     c(w) * cbind(dl.dloc * dloc.deta,
                  dl.dsca * dsca.deta)
-  }), list( .llocat = llocat, .lscale = lscale,
+  }), list( .llocat = llocat,  .lscale = lscale,
             .elocat = elocat, .escale = escale ))),
   weight=expression({
     digamma1 <- digamma(1)
@@ -1964,20 +1965,20 @@ setMethod("guplot", "vlm",
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'zero'")
+    stop("bad input for argument 'zero'")
 
 
   new("vglmff",
   blurb = c("Censored Gumbel distribution\n\n",
             "Links:    ",
-            namesof("location", llocat, earg = elocat, tag = TRUE),
+            namesof("location", llocat,  earg = elocat, tag = TRUE),
             ", ", 
             namesof("scale", lscale, earg = escale, tag = TRUE),
             "\n",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
   constraints = eval(substitute(expression({
-      constraints = cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- cbind(y)
@@ -1998,7 +1999,7 @@ setMethod("guplot", "vlm",
       stop("some observations are both right and left censored!")
 
     predictors.names <-
-    c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+    c(namesof("location", .llocat,  earg = .elocat, tag = FALSE),
       namesof("scale",    .lscale ,    earg = .escale   , tag = FALSE))
 
     if (!length(etastart)) {
@@ -2013,7 +2014,7 @@ setMethod("guplot", "vlm",
               theta2eta(sc.init,  .lscale    , earg = .escale ))
     }
   }), list( .lscale = lscale, .iscale = iscale,
-            .llocat = llocat,
+            .llocat = llocat, 
             .elocat = elocat, .escale = escale ))), 
   linkinv = eval(substitute( function(eta, extra = NULL) {
     loc  <- eta2theta(eta[, 1], .llocat)
@@ -2022,7 +2023,7 @@ setMethod("guplot", "vlm",
     if (.mean) loc + sc * EulerM else {
       LP <- length(.percentiles)  # 0 if NULL
       mu <- matrix(as.numeric(NA), nrow(eta), LP)
-      for(ii in 1:LP) {
+      for (ii in 1:LP) {
           ci <- -log( .percentiles[ii] / 100)
           mu[, ii] <- loc - sc * log(ci)
       }
@@ -2031,100 +2032,100 @@ setMethod("guplot", "vlm",
       mu
     }
   }, list( .lscale = lscale, .percentiles = percentiles,
-           .llocat = llocat,
+           .llocat = llocat, 
            .elocat = elocat, .escale = escale ,
            .mean=mean ))), 
   last = eval(substitute(expression({
-        misc$link = c(location= .llocat, scale = .lscale) 
-        misc$earg = list(location= .elocat, scale= .escale )
-        misc$true.mu = .mean    # if FALSE then @fitted is not a true mu 
+        misc$link <- c(location= .llocat,  scale = .lscale) 
+        misc$earg <- list(location= .elocat, scale= .escale )
+        misc$true.mu <- .mean    # if FALSE then @fitted is not a true mu 
         misc$percentiles = .percentiles
-    }), list( .lscale = lscale, .mean=mean,
-              .llocat = llocat,
-              .elocat = elocat, .escale = escale ,
-              .percentiles = percentiles ))),
-    loglikelihood = eval(substitute(
+  }), list( .lscale = lscale, .mean=mean,
+            .llocat = llocat, 
+            .elocat = elocat, .escale = escale ,
+            .percentiles = percentiles ))),
+  loglikelihood = eval(substitute(
             function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
-        loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
-        sc  <- eta2theta(eta[, 2], .lscale , earg = .escale )
-        zedd <- (y-loc) / sc
-
-        cenL <- extra$leftcensored
-        cenU <- extra$rightcensored
-        cen0 <- !cenL & !cenU   # uncensored obsns
-        Fy <- exp(-exp(-zedd))
-        ell1 <- -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
-        ell2 <- log(Fy[cenL])
-        ell3 <- log1p(-Fy[cenU])
-        if (residuals) stop("loglikelihood residuals not ",
-                            "implemented yet") else
-            sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
-    }, list( .lscale = lscale,
-             .llocat = llocat,
-             .elocat = elocat, .escale = escale ))),
-    vfamily = "cgumbel",
-    deriv = eval(substitute(expression({
-        cenL <- extra$leftcensored
-        cenU <- extra$rightcensored
-        cen0 <- !cenL & !cenU   # uncensored obsns
-
-        loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
-        sc  <- eta2theta(eta[, 2], .lscale , earg = .escale )
-        zedd <- (y-loc) / sc
-        temp2 <- -expm1(-zedd)
-        dl.dloc <- temp2 / sc
-        dl.dsc <- -1/sc + temp2 * zedd / sc
-        dloc.deta <- dtheta.deta(loc, .llocat, earg = .elocat )
-        dsc.deta <- dtheta.deta(sc, .lscale , earg = .escale )
-
-        ezedd <- exp(-zedd)
-        Fy <- exp(-ezedd)
-        dFy.dloc <- -ezedd * Fy / sc
-        dFy.dsc <- zedd * dFy.dloc # -zedd * exp(-zedd) * Fy / sc
-        if (any(cenL)) {
-            dl.dloc[cenL] <- -ezedd[cenL] / sc[cenL]
-            dl.dsc[cenL] <- -zedd[cenL] * ezedd[cenL] / sc[cenL]
-        }
-        if (any(cenU)) {
-            dl.dloc[cenU] <- -dFy.dloc[cenU] / (1-Fy[cenU])
-            dl.dsc[cenU] <- -dFy.dsc[cenU] / (1-Fy[cenU])
-        }
-        c(w) * cbind(dl.dloc * dloc.deta,
-                     dl.dsc * dsc.deta)
-    }), list( .lscale = lscale,
-              .llocat = llocat,
-              .elocat = elocat, .escale = escale ))),
-    weight=expression({
-        A1 <- ifelse(cenL, Fy, 0)
-        A3 <- ifelse(cenU, 1-Fy, 0)
-        A2 <- 1 - A1 - A3   # Middle; uncensored
-        digamma1 <- digamma(1)
-        ed2l.dsc2 <- ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
-        ed2l.dloc2 <- 1 / sc^2
-        ed2l.dlocsc <- -(1 + digamma1) / sc^2 
-        wz <- matrix(as.numeric(NA), n, dimm(M = 2))
-        wz[, iam(1, 1, M)] <- A2 * ed2l.dloc2 * dloc.deta^2
-        wz[, iam(2, 2, M)] <- A2 * ed2l.dsc2 * dsc.deta^2
-        wz[, iam(1, 2, M)] <- A2 * ed2l.dlocsc * dloc.deta * dsc.deta
-        d2l.dloc2 <- -ezedd / sc^2
-        d2l.dsc2 <- (2 - zedd) * zedd * ezedd / sc^2
-        d2l.dlocsc <- (1 - zedd) * ezedd / sc^2
-        wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A1^2 * d2l.dloc2 * dloc.deta^2
-        wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A1^2 * d2l.dsc2 * dsc.deta^2
-        wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A1^2 * d2l.dlocsc *
-                            dloc.deta * dsc.deta
-        d2Fy.dloc2 <- dFy.dloc * dl.dloc + Fy * d2l.dloc2
-        d2Fy.dsc2 <- dFy.dsc * dl.dsc + Fy * d2l.dsc2
-        d2Fy.dlocsc <- dFy.dsc * dl.dloc + Fy * d2l.dlocsc
-        d2l.dloc2 <- -((1-Fy) * d2Fy.dloc2 - dFy.dloc^2) / (1-Fy)^2
-        d2l.dsc2 <- -((1-Fy) * d2Fy.dsc2 - dFy.dsc^2) / (1-Fy)^2
-        d2l.dlocsc  <- -((1-Fy) * d2Fy.dlocsc - dFy.dloc * dFy.dsc) / (1-Fy)^2
-        wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A3^2 * d2l.dloc2 * dloc.deta^2
-        wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A3^2 * d2l.dsc2 * dsc.deta^2
-        wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A3^2 * d2l.dlocsc *
-                            dloc.deta * dsc.deta
-        c(w) * wz
-    }))
+    loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    sc  <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    zedd <- (y-loc) / sc
+
+    cenL <- extra$leftcensored
+    cenU <- extra$rightcensored
+    cen0 <- !cenL & !cenU   # uncensored obsns
+    Fy <- exp(-exp(-zedd))
+    ell1 <- -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
+    ell2 <- log(Fy[cenL])
+    ell3 <- log1p(-Fy[cenU])
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else
+      sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+  }, list( .lscale = lscale,
+           .llocat = llocat, 
+           .elocat = elocat, .escale = escale ))),
+  vfamily = "cgumbel",
+  deriv = eval(substitute(expression({
+    cenL <- extra$leftcensored
+    cenU <- extra$rightcensored
+    cen0 <- !cenL & !cenU   # uncensored obsns
+
+    loc <- eta2theta(eta[, 1], .llocat,  earg = .elocat )
+    sc  <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    zedd <- (y-loc) / sc
+    temp2 <- -expm1(-zedd)
+    dl.dloc <- temp2 / sc
+    dl.dsc <- -1/sc + temp2 * zedd / sc
+    dloc.deta <- dtheta.deta(loc, .llocat,  earg = .elocat )
+    dsc.deta <- dtheta.deta(sc, .lscale , earg = .escale )
+
+    ezedd <- exp(-zedd)
+    Fy <- exp(-ezedd)
+    dFy.dloc <- -ezedd * Fy / sc
+    dFy.dsc <- zedd * dFy.dloc # -zedd * exp(-zedd) * Fy / sc
+    if (any(cenL)) {
+      dl.dloc[cenL] <- -ezedd[cenL] / sc[cenL]
+      dl.dsc[cenL] <- -zedd[cenL] * ezedd[cenL] / sc[cenL]
+    }
+    if (any(cenU)) {
+      dl.dloc[cenU] <- -dFy.dloc[cenU] / (1-Fy[cenU])
+      dl.dsc[cenU] <- -dFy.dsc[cenU] / (1-Fy[cenU])
+    }
+    c(w) * cbind(dl.dloc * dloc.deta,
+                 dl.dsc * dsc.deta)
+  }), list( .lscale = lscale,
+            .llocat = llocat, 
+            .elocat = elocat, .escale = escale ))),
+  weight = expression({
+    A1 <- ifelse(cenL, Fy, 0)
+    A3 <- ifelse(cenU, 1-Fy, 0)
+    A2 <- 1 - A1 - A3   # Middle; uncensored
+    digamma1 <- digamma(1)
+    ed2l.dsc2 <- ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
+    ed2l.dloc2 <- 1 / sc^2
+    ed2l.dlocsc <- -(1 + digamma1) / sc^2 
+    wz <- matrix(as.numeric(NA), n, dimm(M = 2))
+    wz[, iam(1, 1, M)] <- A2 * ed2l.dloc2 * dloc.deta^2
+    wz[, iam(2, 2, M)] <- A2 * ed2l.dsc2 * dsc.deta^2
+    wz[, iam(1, 2, M)] <- A2 * ed2l.dlocsc * dloc.deta * dsc.deta
+    d2l.dloc2 <- -ezedd / sc^2
+    d2l.dsc2 <- (2 - zedd) * zedd * ezedd / sc^2
+    d2l.dlocsc <- (1 - zedd) * ezedd / sc^2
+    wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A1^2 * d2l.dloc2 * dloc.deta^2
+    wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A1^2 * d2l.dsc2 * dsc.deta^2
+    wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A1^2 * d2l.dlocsc *
+                        dloc.deta * dsc.deta
+    d2Fy.dloc2 <- dFy.dloc * dl.dloc + Fy * d2l.dloc2
+    d2Fy.dsc2 <- dFy.dsc * dl.dsc + Fy * d2l.dsc2
+    d2Fy.dlocsc <- dFy.dsc * dl.dloc + Fy * d2l.dlocsc
+    d2l.dloc2 <- -((1-Fy) * d2Fy.dloc2 - dFy.dloc^2) / (1-Fy)^2
+    d2l.dsc2 <- -((1-Fy) * d2Fy.dsc2 - dFy.dsc^2) / (1-Fy)^2
+    d2l.dlocsc  <- -((1-Fy) * d2Fy.dlocsc - dFy.dloc * dFy.dsc) / (1-Fy)^2
+    wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A3^2 * d2l.dloc2 * dloc.deta^2
+    wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A3^2 * d2l.dsc2 * dsc.deta^2
+    wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A3^2 * d2l.dlocsc *
+                          dloc.deta * dsc.deta
+    c(w) * wz
+  }))
 }
 
 
@@ -2248,7 +2249,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
         namesof("shape", .lshape , earg = .eshape, short = TRUE))
 
 
-    extra$location <- rep( .location , length.out = n) # stored here
+    extra$location <- rep( .location , length.out = n)  # stored here
 
 
     if (!length(etastart)) {
@@ -2273,7 +2274,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
 
       shape.init <- if (length( .ishape ))
         rep( .ishape , length.out = n) else {
-        rep(try.this , length.out = n) # variance exists if shape > 2
+        rep(try.this , length.out = n)  # variance exists if shape > 2
       }
 
 
@@ -2336,7 +2337,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
-    rzedd <- Scale / (y - loctn) # reciprocial of zedd
+    rzedd <- Scale / (y - loctn)  # reciprocial of zedd
     dl.dloctn <- (shape + 1) / (y - loctn) -
                 (shape / (y - loctn)) * (rzedd)^shape
     dl.dScale <- shape * (1 - rzedd^shape) / Scale
@@ -2356,7 +2357,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
 
     if (length( .nsimEIM )) {
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
           ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
 
           rzedd <- Scale / (ysim - loctn)   # reciprocial of zedd
@@ -2442,7 +2443,7 @@ if (FALSE)
       namesof("scale",      .lscale , earg = .escale, short = TRUE),
       namesof("shape",      .lshape , earg = .eshape, short = TRUE))
 
-    anchorpt <- if (is.Numeric( .anchor, allowable.length = 1))
+    anchorpt <- if (is.Numeric( .anchor, length.arg = 1))
                .anchor else min(y)
     if (min(y) < anchorpt)
       stop("anchor point is too large")
@@ -2465,10 +2466,10 @@ if (FALSE)
                            y = y,  x = x, w = w, maximize = FALSE,
                            abs.arg = TRUE)
 
-      shape.init =
+      shape.init <-
         if (length( .ishape ))
           rep( .ishape , length.out = n) else {
-          rep(try.this , length.out = n) # variance exists if shape > 2
+          rep(try.this , length.out = n)  # variance exists if shape > 2
       }
 
 
@@ -2592,7 +2593,7 @@ if (FALSE)
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
 
     if (length( .nsimEIM )) {
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
           ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
 
           rzedd <- Scale / (ysim - loctn)   # reciprocial of zedd
@@ -2632,14 +2633,14 @@ if (FALSE)
 }
 
 
-recnormal1.control <- function(save.weight = TRUE, ...) {
+recnormal.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
- recnormal1 <- function(lmean = "identity", lsd = "loge",
-                        imean = NULL, isd = NULL, imethod = 1,
-                        zero = NULL) {
+ recnormal <- function(lmean = "identity", lsd = "loge",
+                       imean = NULL, isd = NULL, imethod = 1,
+                       zero = NULL) {
   lmean <- as.list(substitute(lmean))
   emean <- link2list(lmean)
   lmean <- attr(emean, "function.name")
@@ -2651,7 +2652,7 @@ recnormal1.control <- function(save.weight = TRUE, ...) {
   isdev <- isd
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 3.5)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2666,7 +2667,7 @@ recnormal1.control <- function(save.weight = TRUE, ...) {
             "\n",
             "Variance: sd^2"),
   constraints = eval(substitute(expression({
-      constraints = cm.zero.vgam(constraints, x, .zero, M)
+      constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2724,7 +2725,7 @@ recnormal1.control <- function(save.weight = TRUE, ...) {
         sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
       }
   }, list( .lsdev = lsdev, .esdev = esdev ))),
-  vfamily = c("recnormal1"),
+  vfamily = c("recnormal"),
   deriv = eval(substitute(expression({
     NN <- nrow(eta)
     mymu <- eta2theta(eta[, 1], .lmean)
@@ -2756,9 +2757,10 @@ recnormal1.control <- function(save.weight = TRUE, ...) {
           wznew <- cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
       } else {
         wzold <- wznew
-        wznew <- qnupdate(w=w, wzold = wzold, dderiv=(derivold - derivnew),
-                         deta=etanew-etaold, M = M,
-                         trace=trace)  # weights incorporated in args
+        wznew <- qnupdate(w = w, wzold = wzold,
+                          dderiv = (derivold - derivnew),
+                          deta = etanew-etaold, M = M,
+                          trace = trace)  # weights incorporated in args
     }
     wznew
   }))
@@ -2778,10 +2780,10 @@ recexp1.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                     integer.valued = TRUE, positive = TRUE) ||
-     imethod > 3.5)
-      stop("argument 'imethod' must be 1 or 2 or 3")
+      imethod > 3.5)
+    stop("argument 'imethod' must be 1 or 2 or 3")
 
 
 
@@ -2865,14 +2867,55 @@ recexp1.control <- function(save.weight = TRUE, ...) {
 
 
 
- poissonp <- function(ostatistic, dimension = 2,
-                     link = "loge",
-                     idensity = NULL, imethod = 1) {
-  if (!is.Numeric(ostatistic, positive = TRUE,
-                  allowable.length = 1, integer.valued = TRUE))
+dpois.points <- function(x, lambda, ostatistic,
+                         dimension = 2, log = FALSE) {
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  L <- max(length(x), length(lambda),
+           length(ostatistic), length(dimension))
+  if (length(x) != L)
+    x          <- rep(x,          length.out = L)
+  if (length(lambda) != L)
+    lambda     <- rep(lambda,     length.out = L)
+  if (length(ostatistic) != L)
+    ostatistic <- rep(ostatistic, length.out = L)
+  if (length(dimension) != L)
+    dimension  <- rep(dimension,  length.out = L)
+
+  if (!all(dimension %in% c(2, 3)))
+    stop("argument 'dimension' must have values 2 and/or 3")
+
+
+  ans2 <- log(2) + ostatistic * log(pi * lambda) -
+          lgamma(ostatistic) + (2 * ostatistic - 1) * log(x) -
+          lambda * pi * x^2
+
+  ans3 <- log(3) + ostatistic * log(4 * pi * lambda / 3) -
+          lgamma(ostatistic) + (3 * ostatistic - 1) * log(x) -
+          (4/3) * lambda * pi * x^3
+
+  ans <- ifelse(dimension == 2, ans2, ans3)
+
+
+  if (log.arg) ans else exp(ans)
+}
+
+
+ poisson.points <-
+  function(ostatistic, dimension = 2,
+           link = "loge",
+           idensity = NULL, imethod = 1) {
+
+
+  if (!is.Numeric(ostatistic,
+                  length.arg = 1,
+                  positive = TRUE))
     stop("argument 'ostatistic' must be a single positive integer")
   if (!is.Numeric(dimension, positive = TRUE,
-                  allowable.length = 1, integer.valued = TRUE) ||
+                  length.arg = 1, integer.valued = TRUE) ||
       dimension > 3)
     stop("argument 'dimension' must be 2 or 3")
 
@@ -2882,7 +2925,7 @@ recexp1.control <- function(save.weight = TRUE, ...) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
@@ -2901,9 +2944,9 @@ recexp1.control <- function(save.weight = TRUE, ...) {
             "Mean:    gamma(s+1/3) / (gamma(s) * (4*density*pi/3)^(1/3))"),
   initialize = eval(substitute(expression({
     if (ncol(cbind(y)) != 1)
-        stop("response must be a vector or a one-column matrix")
+      stop("response must be a vector or a one-column matrix")
     if (any(y <= 0))
-        stop("response must contain positive values only")
+      stop("response must contain positive values only")
 
 
 
@@ -2913,37 +2956,39 @@ recexp1.control <- function(save.weight = TRUE, ...) {
 
 
     if (!length(etastart)) {
-        use.this <- if ( .imethod == 1) median(y) + 1/8 else
-                   weighted.mean(y,w)
-        if ( .dimension == 2) {
-            myratio <- exp(lgamma( .ostatistic + 0.5) -
-                          lgamma( .ostatistic ))
-            density.init <- if (is.Numeric( .idensity ))
-                rep( .idensity , len = n) else
-                rep(myratio^2 / (pi * use.this^2), len = n)
-            etastart <- theta2eta(density.init, .link, earg = .earg)
-        } else {
-            myratio <- exp(lgamma( .ostatistic +1/3) -
-                          lgamma( .ostatistic ))
-            density.init <- if (is.Numeric( .idensity ))
-                rep( .idensity , len = n) else
-                rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
-            etastart <- theta2eta(density.init, .link, earg = .earg)
-        }
+      use.this <- if ( .imethod == 1) median(y) + 1/8 else
+                  weighted.mean(y,w)
+      if ( .dimension == 2) {
+        myratio <- exp(lgamma( .ostatistic + 0.5) -
+                       lgamma( .ostatistic ))
+        density.init <- if (is.Numeric( .idensity ))
+            rep( .idensity , len = n) else
+            rep(myratio^2 / (pi * use.this^2), len = n)
+        etastart <- theta2eta(density.init, .link , earg = .earg )
+      } else {
+        myratio <- exp(lgamma( .ostatistic + 1/3) -
+                       lgamma( .ostatistic ))
+        density.init <- if (is.Numeric( .idensity ))
+            rep( .idensity , len = n) else
+            rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
+        etastart <- theta2eta(density.init, .link , earg = .earg )
+      }
     }
-  }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+  }), list( .link = link, .earg = earg,
+            .ostatistic = ostatistic,
             .dimension = dimension, .imethod = imethod,
             .idensity = idensity ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    density = eta2theta(eta, .link, earg = .earg)
+    density <- eta2theta(eta, .link, earg = .earg)
     if ( .dimension == 2) {
-      myratio <- exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
+      myratio <- exp(lgamma( .ostatistic + 0.5) - lgamma( .ostatistic ))
       myratio / sqrt(density * pi)
     } else {
-      myratio <- exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
+      myratio <- exp(lgamma( .ostatistic + 1/3) - lgamma( .ostatistic))
       myratio / (4 * density * pi/3)^(1/3)
     }
-  }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+  }, list( .link = link, .earg = earg,
+           .ostatistic = ostatistic,
            .dimension = dimension ))),
   last = eval(substitute(expression({
     misc$link <-    c("density" = .link)
@@ -2952,42 +2997,43 @@ recexp1.control <- function(save.weight = TRUE, ...) {
     misc$expected <- TRUE
     misc$ostatistic <- .ostatistic
     misc$dimension <- .dimension
-  }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+  }), list( .link = link, .earg = earg,
+            .ostatistic = ostatistic,
             .dimension = dimension ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
-    density = eta2theta(eta, .link, earg = .earg)
+    density <- eta2theta(eta, .link, earg = .earg)
     if (residuals) stop("loglikelihood residuals not ",
-                        "implemented yet") else
-      if ( .dimension == 2)
-          sum(w * (log(2) + .ostatistic * log(pi * density) -
-               lgamma( .ostatistic) + (2* .ostatistic-1) * log(y) -
-               density * pi * y^2)) else
-          sum(w * (log(3) + .ostatistic * log(4*pi * density/3) -
-               lgamma( .ostatistic) + (3* .ostatistic-1) * log(y) -
-               (4/3) * density * pi * y^3))
-  }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+                        "implemented yet") else {
+      sum(c(w) * dpois.points(y, lambda = density,
+                              ostatistic = .ostatistic ,
+                              dimension = .dimension , log = TRUE))
+    }
+  }, list( .link = link, .earg = earg,
+           .ostatistic = ostatistic,
            .dimension = dimension ))),
-  vfamily = c("poissonp"),
+  vfamily = c("poisson.points"),
   deriv = eval(substitute(expression({
     density <- eta2theta(eta, .link, earg = .earg)
 
-    if ( .dimension == 2) {
-        dl.ddensity <- .ostatistic / density - pi * y^2
+    dl.ddensity <- if ( .dimension == 2) {
+      .ostatistic / density - pi * y^2
     } else {
-        dl.ddensity <- .ostatistic / density - (4/3) * pi * y^3
+      .ostatistic / density - (4/3) * pi * y^3
     }
 
-    ddensity.deta <- dtheta.deta(density, .link, earg = .earg)
+    ddensity.deta <- dtheta.deta(density, .link , earg = .earg )
 
     c(w) * dl.ddensity * ddensity.deta
-  }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+  }), list( .link = link, .earg = earg,
+            .ostatistic = ostatistic,
             .dimension = dimension ))),
   weight = eval(substitute(expression({
     ned2l.ddensity2 <- .ostatistic / density^2
     wz <- ddensity.deta^2 * ned2l.ddensity2
     c(w) * wz
-  }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+  }), list( .link = link, .earg = earg,
+            .ostatistic = ostatistic,
             .dimension = dimension ))))
 }
 
diff --git a/R/family.fishing.R b/R/family.fishing.R
deleted file mode 100644
index 1ac7499..0000000
--- a/R/family.fishing.R
+++ /dev/null
@@ -1,125 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-
-
-DeLury <- function(catch, effort,
-                   type = c("DeLury", "Leslie"),
-                   ricker = FALSE) {
-  type <- match.arg(type, c("DeLury", "Leslie"))[1]
-  if (!is.logical(ricker))
-    stop("bad input for argument 'ricker'")
-  if ((LLL <- Lcatch <- length(catch)) != (Leffort <- length(effort)))
-    stop("length(catch) != length(effort)")
-
-  CPUE <- catch / effort
-  if (type == "DeLury") {
-    Et <- cumsum(effort) - ifelse(ricker, 0.5, 1) * effort
-    logCPUE <- log(CPUE)
-    lmfit <- lm(logCPUE ~ Et, x = TRUE)
-    myq <- catchabilityCoefficient <- -coef(lmfit)[2]
-    N0 <- exp(coef(lmfit)["(Intercept)"]) / myq
-  } else {
-    Kt <- cumsum(catch) - ifelse(ricker, 0.5, 1) * catch
-    lmfit <- lm(CPUE ~ Kt, x = TRUE)
-    myq <- catchabilityCoefficient <- -coef(lmfit)[2]
-    N0 <- coef(lmfit)["(Intercept)"] / myq
-  }
-
-  rlist <-
-  list(catch = catch,
-       effort = effort,
-       type = type,
-       N0 = N0,
-       CPUE = CPUE,
-       lmfit = lmfit)
-  if (type == "DeLury") {
-    rlist$E <- Et
-  } else {
-    rlist$K <- Kt
-  }
-  rlist
-}
-
-
-
-
-
-
-wffc.P1     <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-  ifelse(length >= min.eligible, c1 + (ppm/100) *
-         ceiling(  signif(100 * length, digits = 8)  ), 0)
-
-
-wffc.P1star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-  ifelse(length >= min.eligible, c1 + ppm * length, 0)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-wffc.P2     <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-  wffc.P1(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
-  ifelse(length >= min.eligible,
-           ceiling(100*(length-min.eligible))^2, 0)
-
-wffc.P2star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-  wffc.P1star(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
-  ifelse(length >= min.eligible, 10000 * (length-min.eligible)^2, 0)
-
-
-
-
-
-wffc.P3     <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
-
-  temp1 <- floor((ceiling(100*length)/100) / min.eligible) # zz not sure
-  temp1 <- floor(length / min.eligible)
-  ans <- ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
-  ans <- ans + ifelse(temp1 >= 1, ppm * (ceiling(100*length)/100), 0)
-  maxtemp1 <- max(temp1, na.rm = TRUE)
-  if (maxtemp1 > 1)
-    for (ii in 2:maxtemp1) {
-      ans <- ans +
-            ifelse(ii <  temp1,         min.eligible  * (ii-1) * ppm, 0) +
-            ifelse(ii == temp1, (ceiling(100*length)/100 -
-                   ii*min.eligible) * (ii-1) * ppm, 0)
-    }
-  ans
-}
-
-
-
-wffc.P3star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
-  temp1 <- floor(length / min.eligible)
-  ans <- ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
-  ans <- ans + ifelse(temp1 >= 1, length * ppm, 0)
-  maxtemp1 <- max(temp1, na.rm = TRUE)
-  if (maxtemp1 > 1)
-    for (ii in 2:maxtemp1) {
-      ans <- ans + ifelse(ii <  temp1,  min.eligible  * (ii-1) * ppm, 0) +
-                   ifelse(ii == temp1, (length - ii*min.eligible) *
-                                       (ii-1) * ppm, 0)
-    }
-  ans
-}
-
-
-
-
-
-
-
diff --git a/R/family.functions.R b/R/family.functions.R
index cb3b0f5..1cf75f5 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -68,10 +68,14 @@ get.arg <- function(string) {
 
 
 
+
+
  eifun <- function(i, n)
     cbind(as.numeric((1:n) == i))
 
- eifun <- function(i, n)
+
+ eifun <-
+ I.col <- function(i, n)
     diag(n)[, i, drop = FALSE]
 
  eijfun <- function(i, n) {
@@ -109,7 +113,7 @@ tapplymat1 <-
     mat <- as.matrix(mat)
   nr <- nrow(mat)
   nc <- ncol(mat)
-  fred <- dotC(name = "tapplymat1", mat = as.double(mat),
+  fred <- .C("tapplymat1", mat = as.double(mat),
       as.integer(nr), as.integer(nc), as.integer(type))
 
   dim(fred$mat) <- c(nr, nc)
@@ -140,7 +144,7 @@ matrix.power <- function(wz, M, power, fast = TRUE) {
   }
 
   if (fast) {
-    k <- veigen(t(wz), M = M) # matrix.arg)
+    k <- veigen(t(wz), M = M)  # matrix.arg)
     evals <- k$values           # M x n
     evects <- k$vectors         # M x M x n
   } else {
@@ -212,9 +216,9 @@ veigen <- function(x, M) {
 
   n <- ncol(x)
   index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-  dimm.value <- nrow(x) # usually M or M(M+1)/2
+  dimm.value <- nrow(x)  # usually M or M(M+1)/2
 
-  z <- dotFortran(name = "veigen",
+  z <- .Fortran("veigen",
       as.integer(M),
       as.integer(n),
       as.double(x),
@@ -226,7 +230,7 @@ veigen <- function(x, M) {
       wk = double(M*M),
       as.integer(index$row), as.integer(index$col),
       as.integer(dimm.value),
-      error.code = integer(1))
+      error.code = integer(1), PACKAGE = "VGAM")
 
   if (z$error.code)
     stop("eigen algorithm (rs) returned error code ", z$error.code)
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 1068685..5029962 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -131,9 +131,9 @@
   weight = eval(substitute(expression({
     dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
-    for(i1 in 1:M)
-      for(i2 in i1:M) {
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==6 because M==3
+    for (i1 in 1:M)
+      for (i2 in i1:M) {
         index <- iam(i1,i2, M)
         wz[,index] <- rowSums(dPP[, , i1, drop = TRUE] *
                               dPP[, , i2, drop = TRUE] / mu) *
@@ -183,7 +183,7 @@
       pA <- if (is.numeric( .ipA )) rep( .ipA , len = n) else
             c(sqrt(mustart[, 1] - mustart[, 2] / 2))
       f <- if (is.numeric( .iF )) rep( .iF , len = n) else
-           rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
+           rep(0.01, len = n)  # 1- mustart[, 2]/(2*pA*(1-pA))
       if (any(pA <= 0) || any(pA >= 1))
         stop("bad initial value for 'pA'")
       etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
@@ -237,9 +237,9 @@
     dPP <- array(c(dP1, dP2), c(n, 3, 2))
     dPP.deta <- cbind(dtheta.deta(pA, link = .link , earg = .earg ),
                       dtheta.deta(f,  link = "identity"))
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
-    for(i1 in 1:M)
-      for(i2 in i1:M) {
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
+    for (i1 in 1:M)
+      for (i2 in i1:M) {
         index <- iam(i1, i2, M)
         wz[,index] <- rowSums(dPP[,,i1,drop = TRUE] *
                               dPP[,,i2,drop = TRUE] / mu) *
@@ -419,7 +419,7 @@
   }), list( .link = link, .earg = earg))),
   weight = eval(substitute(expression({
     qq <- 1-p1-p2
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
     ned2l.dp12  <-  2 * (1/p1 + 1/qq)
     ned2l.dp22  <-  2 * (1/p2 + 1/qq)
     ned2l.dp1dp2 <-  2 / qq
@@ -521,7 +521,7 @@
     ns <- 1-mS-ms-nS
     dP1 <- cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
     dP2 <- cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
-    dP3 <- cbind(0, 0, 2*ms, -2*ms,  2*ns, -2*ns) # n x 6
+    dP3 <- cbind(0, 0, 2*ms, -2*ms,  2*ns, -2*ns)  # n x 6
     dl1 <- rowSums(y * dP1 / mu)
     dl2 <- rowSums(y * dP2 / mu)
     dl3 <- rowSums(y * dP3 / mu)
@@ -530,9 +530,9 @@
   }), list( .link = link, .earg = earg))),
   weight = eval(substitute(expression({
     dPP <- array(c(dP1,dP2,dP3), c(n,6, 3))
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
-    for(i1 in 1:M)
-      for(i2 in i1:M) {
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==6 because M==3
+    for (i1 in 1:M)
+      for (i2 in i1:M) {
         index <- iam(i1,i2, M)
         wz[,index] <- rowSums(dPP[,,i1,drop = TRUE] *
                               dPP[,,i2,drop = TRUE] / mu) *
@@ -647,7 +647,7 @@
   }), list( .link = link, .earg = earg))),
 
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
 
     ned2l.dp2  <- (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
     ned2l.dq2  <- (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index ea46f8b..f7dd023 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -15,7 +15,7 @@
 
  binomialff <- function(link = "logit",
                         dispersion = 1, mv = FALSE, onedpar = !mv,
-                        parallel = FALSE, apply.parint = FALSE,
+                        parallel = FALSE,  # apply.parint = FALSE,
                         zero = NULL,
                         bred = FALSE,
                         earg.link = FALSE) {
@@ -26,6 +26,7 @@
 
 
 
+  apply.parint <- FALSE
   estimated.dispersion <- dispersion == 0
 
 
@@ -50,7 +51,9 @@
          "Link:     ", namesof("mu", link, earg = earg), "\n",
          "Variance: mu * (1 - mu)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = .apply.parint )
 
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
@@ -66,10 +69,10 @@
   initialize = eval(substitute(expression({
     assign("CQO.FastAlgorithm",
            ( .link == "logit" || .link == "cloglog"),
-           envir = VGAM:::VGAMenv)
+           envir = VGAMenv)
     assign("modelno", if ( .link == "logit") 1 else
                       if ( .link == "cloglog") 4 else NULL,
-           envir = VGAM:::VGAMenv)
+           envir = VGAMenv)
 
 
 
@@ -178,20 +181,20 @@
   }, list( .link = link, .earg = earg  ))),
 
   last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
-        rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
-    if (exists("modelno", envir = VGAM:::VGAMenv))
-        rm("modelno", envir = VGAM:::VGAMenv)
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+        rm("CQO.FastAlgorithm", envir = VGAMenv)
+    if (exists("modelno", envir = VGAMenv))
+        rm("modelno", envir = VGAMenv)
 
     dpar <- .dispersion
     if (!dpar) {
         temp87 <- (y-mu)^2 * wz / (dtheta.deta(mu, link = .link ,
-                                  earg = .earg )^2) # w cancel
+                                  earg = .earg )^2)  # w cancel
       if (.mv && ! .onedpar) {
         dpar <- rep(as.numeric(NA), len = M)
         temp87 <- cbind(temp87)
         nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
-        for(ii in 1:M)
+        for (ii in 1:M)
           dpar[ii] <- sum(temp87[, ii]) / (nrow.mu - ncol(x))
         if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
           names(dpar) <- dimnames(y)[[2]]
@@ -212,7 +215,7 @@
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
   }), list( .dispersion = dispersion,
@@ -228,11 +231,11 @@
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
       if (residuals) {
-        w * (y / mu - (1-y) / (1-mu))
+        c(w) * (y / mu - (1-y) / (1-mu))
       } else {
 
         ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-                   y * w # Convert proportions to counts
+                   y * w  # Convert proportions to counts
         nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
                   round(w)
 
@@ -257,8 +260,8 @@
 
   deriv = eval(substitute(expression({
     yBRED <- if ( .bred ) {
-      Hvector <- hatvaluesbasic(X_vlm = X_vlm_save,
-                                diagWm = c(t(w * mu))) # Handles M>1
+      Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
+                                diagWm = c(t(w * mu)))  # Handles M>1
 
       varY <- mu * (1 - mu) / w  # Is a matrix if M>1. Seems the most correct.
       d1.ADJ <-   dtheta.deta(mu, .link , earg = .earg )
@@ -300,7 +303,7 @@
       cbind(c(w) * dtheta.deta(mu, link = .link ,
                                earg = .earg )^2 / tmp100)
     }
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       index500 <- !is.finite(tmp200[, ii]) |
                    (abs(tmp200[, ii]) < .Machine$double.eps)
       if (any(index500)) { # Diagonal 0's are bad
@@ -389,7 +392,7 @@
         dpar <- sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
       } else {
         dpar <- rep(0, len = M)
-        for(spp in 1:M) {
+        for (spp in 1:M) {
           temp <- w * dmu.deta[,spp]^2
           dpar[spp] <- sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) / (
                        length(mu[,spp]) - ncol(x))
@@ -405,7 +408,7 @@
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
@@ -511,7 +514,7 @@
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
@@ -586,7 +589,7 @@ pinv.gaussian <- function(q, mu, lambda) {
 rinv.gaussian <- function(n, mu, lambda) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   mu     <- rep(mu,     len = use.n);
@@ -614,11 +617,13 @@ rinv.gaussian <- function(n, mu, lambda) {
 
  inv.gaussianff <- function(lmu = "loge", llambda = "loge",
                             imethod = 1,  ilambda = NULL,
-                            parallel = FALSE, apply.parint = FALSE,
+                            parallel = FALSE,
                             shrinkage.init = 0.99,
                             zero = NULL) {
 
 
+  apply.parint <- FALSE
+
 
   lmu <- as.list(substitute(lmu))
   emu <- link2list(lmu)
@@ -629,11 +634,11 @@ rinv.gaussian <- function(n, mu, lambda) {
   llambda <- attr(elambda, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -658,7 +663,9 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Mean:     ", "mu\n",
             "Variance: mu^3 / lambda"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = .apply.parint )
 
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
@@ -706,7 +713,7 @@ rinv.gaussian <- function(n, mu, lambda) {
           mediany <- apply(y, 2, median)
           matrix(1.1 * mediany + 1/8, n, ncoly, byrow = TRUE)
         } else if ( .imethod == 3) {
-          use.this <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
+          use.this <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
           (1 - .sinit) * y  + .sinit * use.this
         } else {
           matrix(colSums(y * w) / colSums(w) + 1/8,
@@ -742,7 +749,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .emu
       misc$earg[[Musual*ii  ]] <- .elambda
     }
@@ -838,7 +845,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -852,7 +859,9 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Link:     ", namesof("mu", link, earg = earg), "\n",
             "Variance: mu"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+                           bool = .parallel , 
+                           constraints = constraints)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -885,7 +894,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     M <- ncoly <- ncol(y)
 
-    assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
+    assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAMenv)
 
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
@@ -908,7 +917,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     if (!length(etastart)) {
       mu.init <- pmax(y, 1/8)
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         if ( .imethod == 2) {
           mu.init[, iii] <- weighted.mean(y[, iii], w[, iii]) + 1/8
         } else if ( .imethod == 3) {
@@ -928,17 +937,17 @@ rinv.gaussian <- function(n, mu, lambda) {
   }, list( .link = link, .earg = earg))),
 
   last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
-      rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+      rm("CQO.FastAlgorithm", envir = VGAMenv)
     dpar <- .dispersion
     if (!dpar) {
       temp87 <- (y-mu)^2 *
-          wz / (dtheta.deta(mu, link = .link , earg = .earg )^2) # w cancel
+          wz / (dtheta.deta(mu, link = .link , earg = .earg )^2)  # w cancel
       if (M > 1 && ! .onedpar ) {
         dpar <- rep(as.numeric(NA), length = M)
         temp87 <- cbind(temp87)
         nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
-        for(ii in 1:M)
+        for (ii in 1:M)
           dpar[ii] <- sum(temp87[, ii]) / (nrow.mu - ncol(x))
         if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
           names(dpar) <- dimnames(y)[[2]]
@@ -961,7 +970,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
   }), list( .dispersion = dispersion, .imethod = imethod,
@@ -975,15 +984,15 @@ rinv.gaussian <- function(n, mu, lambda) {
 
   loglikelihood =
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    if (residuals) w * (y / mu - 1) else {
-      sum(w * dpois(x = y, lambda = mu, log = TRUE))
+    if (residuals) c(w) * (y / mu - 1) else {
+      sum(c(w) * dpois(x = y, lambda = mu, log = TRUE))
     }
   },
   vfamily = "poissonff",
   deriv = eval(substitute(expression({
     yBRED <- if ( .bred ) {
-      Hvector <- hatvaluesbasic(X_vlm = X_vlm_save,
-                                diagWm = c(t(w * mu))) # Handles M>1
+      Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
+                                diagWm = c(t(w * mu)))  # Handles M>1
 
 
       varY <- mu # Is a matrix if M>1.
@@ -1067,7 +1076,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
- dexppoisson <- function(lmean = "loge",
+ double.exppoisson <- function(lmean = "loge",
                          ldispersion = "logit",
                          idispersion = 0.8,
                          zero = NULL) {
@@ -1095,7 +1104,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Mean:     ", "mean\n",
             "Variance: mean / dispersion"),
   constraints = eval(substitute(expression({
-    constraints = cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1159,7 +1168,7 @@ rinv.gaussian <- function(n, mu, lambda) {
       }
   }, list( .lmean = lmean, .emean = emean,
            .ldisp = ldisp, .edisp = edisp ))),
-  vfamily = "dexppoisson",
+  vfamily = "double.exppoisson",
   deriv = eval(substitute(expression({
     lambda <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
     Disper <- eta2theta(eta[, 2], link = .ldisp,
@@ -1178,7 +1187,7 @@ rinv.gaussian <- function(n, mu, lambda) {
   }), list( .lmean = lmean, .emean = emean,
             .ldisp = ldisp, .edisp = edisp ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2)  # diagonal
     usethis.lambda <- pmax(lambda, .Machine$double.eps / 10000)
     wz[, iam(1, 1, M)] <- (Disper / usethis.lambda) * dlambda.deta^2
     wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
@@ -1190,7 +1199,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
- dexpbinomial <- function(lmean = "logit", ldispersion = "logit",
+ double.expbinomial <- function(lmean = "logit", ldispersion = "logit",
                           idispersion = 0.25, zero = 2) {
 
   lmean <- as.list(substitute(lmean))
@@ -1257,7 +1266,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             init.mu <-
             mustart <- (0.5 + nvec * y) / (1 + nvec)
         } else
-            stop("for the dexpbinomial family, response 'y' must be a ",
+            stop("for the double.expbinomial family, response 'y' must be a ",
                  "vector of 0 and 1's\n",
                      "or a factor (first level = fail, ",
                      "other levels = success),\n",
@@ -1304,20 +1313,20 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
-      temp1 <- y * log(ifelse(y > 0, y, 1)) # y*log(y)
-      temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
+      temp1 <- y * log(ifelse(y > 0, y, 1))  # y*log(y)
+      temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0))  # (1-y)*log(1-y)
       sum(0.5 * log(Disper) + w * (y * Disper * log(prob) +
          (1-y) * Disper * log1p(-prob) +
          temp1 * (1-Disper) + temp2 * (1 - Disper)))
     }
   }, list( .lmean = lmean, .emean = emean,
            .ldisp = ldisp, .edisp = edisp ))),
-  vfamily = "dexpbinomial",
+  vfamily = "double.expbinomial",
   deriv = eval(substitute(expression({
     prob   <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
     Disper <- eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
-    temp1 <- y * log(ifelse(y > 0, y, 1)) # y*log(y)
-    temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
+    temp1 <- y * log(ifelse(y > 0, y, 1))  # y*log(y)
+    temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0))  # (1-y)*log(1-y)
     temp3 <- prob * (1.0-prob)
     temp3 <- pmax(temp3, .Machine$double.eps * 10000)
 
@@ -1333,7 +1342,7 @@ rinv.gaussian <- function(n, mu, lambda) {
   }), list( .lmean = lmean, .emean = emean,
             .ldisp = ldisp, .edisp = edisp ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = 2)  # diagonal
     wz[, iam(1, 1, M)] <- w * (Disper / temp3) * dprob.deta^2
     wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
     wz
@@ -1344,16 +1353,16 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
- mbinomial <- function(mvar = NULL, link = "logit",
-                      parallel = TRUE,
-                      smallno = .Machine$double.eps^(3/4)) {
+ matched.binomial <- function(mvar = NULL, link = "logit",
+                              parallel = TRUE,
+                              smallno = .Machine$double.eps^(3/4)) {
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
   if (!is.Numeric(smallno, positive = TRUE,
-                  allowable.length = 1) ||
+                  length.arg = 1) ||
       smallno > 1e-4)
     stop("bad input for 'smallno'")
   if (is.logical(parallel) && !parallel)
@@ -1370,12 +1379,14 @@ rinv.gaussian <- function(n, mu, lambda) {
     blurb = c("Matched binomial model (intercepts fitted)\n\n", 
               "Link:     ", namesof("mu[,j]", link, earg = earg)),
     constraints = eval(substitute(expression({
-        constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+        constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                               bool = .parallel ,
+                               constraints = constraints,
                                apply.int = TRUE)
         constraints[[extra$mvar]] <- diag(M)
 
         specialCM <- list(a = vector("list", M-1))
-        for(ii in 1:(M-1)) {
+        for (ii in 1:(M-1)) {
           specialCM[[1]][[ii]] <-
             (constraints[[extra$mvar]])[, 1+ii,drop = FALSE]
         }
@@ -1440,7 +1451,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
@@ -1465,10 +1476,10 @@ rinv.gaussian <- function(n, mu, lambda) {
 
       sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
           dbinom(x = ycounts, size = nvec, prob = mu,
-                       log = TRUE))
+                 log = TRUE))
       }
   },
-  vfamily = c("mbinomial", "vcategorical"),
+  vfamily = c("matched.binomial", "vcategorical"),
   deriv = eval(substitute(expression({
     answer <- if ( .link == "logit") {
       w * (y - mu)
@@ -1483,7 +1494,7 @@ rinv.gaussian <- function(n, mu, lambda) {
         (y/mu - 1)/(1-mu)
       }
       result <- matrix(0, n, M)
-      result[cbind(1:n, extra$index9)] = answer
+      result[cbind(1:n, extra$index9)] <- answer
     result
   }), list( .link = link, .earg = earg))),
   weight = eval(substitute(expression({
@@ -1509,7 +1520,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 mypool <- function(x, index) {
   answer <- x
   uindex <- unique(index)
-  for(ii in uindex) {
+  for (ii in uindex) {
     ind0 <- (index == ii)
     answer[ind0] <- sum(x[ind0])
   }
@@ -1543,7 +1554,9 @@ mypool <- function(x, index) {
     blurb = c("Matched binomial model (intercepts not fitted)\n\n", 
               "Link:     ", namesof("mu[,j]", link, earg = earg)),
     constraints = eval(substitute(expression({
-        constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+        constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
                                apply.int = FALSE)
     }), list( .parallel = parallel ))),
     initialize = eval(substitute(expression({
@@ -1656,201 +1669,201 @@ mypool <- function(x, index) {
   link <- attr(earg, "function.name")
 
 
-    new("vglmff",
-    blurb = if (mv) c("Augmented multivariate binomial model\n\n", 
-           "Link:     ",
-           namesof("mu.1[,j]", link, earg = earg), ", ",
-           namesof("mu.2[,j]", link, earg = earg),
-           "\n",
-           "Variance: mu[,j]*(1-mu[,j])") else
-           c("Augmented binomial model\n\n", 
-           "Link:     ",
-           namesof("mu.1[,j]", link, earg = earg), ", ",
-           namesof("mu.2[,j]", link, earg = earg),
-           "\n",
-           "Variance: mu*(1-mu)"),
-    deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-        Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y=cbind(y, 1-y),
-                                       w = w, residuals = residuals,
-                                       eta = eta, extra = extra)
-    },
-    infos = eval(substitute(function(...) {
-      list(Musual = 2,
-           parallel = .parallel)
-    }, list( .parallel = parallel ))),
-    initialize = eval(substitute(expression({
+  new("vglmff",
+  blurb = if (mv) c("Augmented multivariate binomial model\n\n", 
+         "Link:     ",
+         namesof("mu.1[,j]", link, earg = earg), ", ",
+         namesof("mu.2[,j]", link, earg = earg),
+         "\n",
+         "Variance: mu[,j]*(1-mu[,j])") else
+         c("Augmented binomial model\n\n", 
+         "Link:     ",
+         namesof("mu.1[,j]", link, earg = earg), ", ",
+         namesof("mu.2[,j]", link, earg = earg),
+         "\n",
+         "Variance: mu*(1-mu)"),
+  deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+      Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y=cbind(y, 1-y),
+                                     w = w, residuals = residuals,
+                                     eta = eta, extra = extra)
+  },
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         parallel = .parallel)
+  }, list( .parallel = parallel ))),
+  initialize = eval(substitute(expression({
 
-        Musual = 2
+    Musual = 2
 
-        if ( .mv ) {
-            y = as.matrix(y)
-            M = Musual * ncol(y)
-            if (!all(y == 0 | y == 1))
-                stop("response must contain 0's and 1's only")
-            dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
-            dn2 = if (length(dn2)) {
-                paste("E[", dn2, "]", sep = "") 
-            } else {
-                paste("mu", 1:M, sep = "") 
-            }
-            predictors.names <-
-              c(namesof(if (M > 1) dn2 else
-                        "mu.1", .link , earg = .earg , short = TRUE),
-                namesof(if (M > 1) dn2 else
-                        "mu.2", .link , earg = .earg , short = TRUE))
-            NOS = M / Musual
-            predictors.names <-
-            predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+    if ( .mv ) {
+        y = as.matrix(y)
+        M = Musual * ncol(y)
+        if (!all(y == 0 | y == 1))
+            stop("response must contain 0's and 1's only")
+        dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+        dn2 = if (length(dn2)) {
+            paste("E[", dn2, "]", sep = "") 
+        } else {
+            paste("mu", 1:M, sep = "") 
+        }
+        predictors.names <-
+          c(namesof(if (M > 1) dn2 else
+                    "mu.1", .link , earg = .earg , short = TRUE),
+            namesof(if (M > 1) dn2 else
+                    "mu.2", .link , earg = .earg , short = TRUE))
+        NOS = M / Musual
+        predictors.names <-
+        predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
 
 
+        if (!length(mustart) && !length(etastart))
+          mustart = (0.5 + w * y) / (1 + w)
+    } else {
+
+      dn2 = c("mu1.", "mu2.")
+      M = Musual
+
+
+
+        if (!all(w == 1))
+          extra$orig.w = w
+
+
+        NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
+                          is.data.frame(x)) ncol(x) else as.integer(1)
+        if (NCOL(y) == 1) {
+            if (is.factor(y)) y = (y != levels(y)[1])
+            nvec = rep(1, n)
+            y[w == 0] <- 0
+            if (!all(y == 0 | y == 1))
+                stop("response values 'y' must be 0 or 1")
             if (!length(mustart) && !length(etastart))
               mustart = (0.5 + w * y) / (1 + w)
-        } else {
 
-            dn2 = c("mu1.", "mu2.")
-            M = Musual
-
-
-
-            if (!all(w == 1))
-              extra$orig.w = w
-
-
-            NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
-                              is.data.frame(x)) ncol(x) else as.integer(1)
-            if (NCOL(y) == 1) {
-                if (is.factor(y)) y = (y != levels(y)[1])
-                nvec = rep(1, n)
-                y[w == 0] <- 0
-                if (!all(y == 0 | y == 1))
-                    stop("response values 'y' must be 0 or 1")
-                if (!length(mustart) && !length(etastart))
-                  mustart = (0.5 + w * y) / (1 + w)
-
-
-                no.successes = y
-                if (min(y) < 0)
-                    stop("Negative data not allowed!")
-                if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
-                    stop("Number of successes must be integer-valued")
-            } else if (NCOL(y) == 2) {
-                if (min(y) < 0)
-                    stop("Negative data not allowed!")
-                if (any(abs(y - round(y)) > 1.0e-8))
-                    stop("Count data must be integer-valued")
-                y = round(y)
-                nvec = y[, 1] + y[, 2]
-                y = ifelse(nvec > 0, y[, 1] / nvec, 0)
-                w = w * nvec
-                if (!length(mustart) && !length(etastart))
-                  mustart = (0.5 + nvec * y) / (1 + nvec)
-            } else {
-                stop("for the binomialff family, response 'y' must be a ",
-                     "vector of 0 and 1's\n",
-                     "or a factor (first level = fail, ",
-                                   "other levels = success),\n",
-                     "or a 2-column matrix where col 1 is the no. of ",
-                     "successes and col 2 is the no. of failures")
-            }
-            predictors.names <-
-              c(namesof("mu.1", .link , earg = .earg , short = TRUE),
-                namesof("mu.2", .link , earg = .earg , short = TRUE))
-        }
-    }), list( .link = link, .mv = mv, .earg = earg))),
-    linkinv = eval(substitute(function(eta, extra = NULL) {
-        Mdiv2  =  ncol(eta) / 2
-        index1 =  2*(1:Mdiv2) - 1
-        mu =  eta2theta(eta[, index1],
-                        link = .link , earg = .earg )
-        mu
-    }, list( .link = link, .earg = earg  ))),
-    last = eval(substitute(expression({
-        misc$link <- rep( .link , length = M)
-        names(misc$link) <- if (M > 1) dn2 else "mu"
 
-        misc$earg <- vector("list", M)
-        names(misc$earg) <- names(misc$link)
-        for(ii in 1:M)
-          misc$earg[[ii]] <- .earg
-
-        misc$parallel <- .parallel
-        misc$expected <- TRUE
-        misc$mv <- .mv
-    }), list( .link = link, .mv = mv, .earg = earg,
-              .parallel = parallel ))),
-    linkfun = eval(substitute(function(mu, extra = NULL) {
-        usualanswer = theta2eta(mu, .link , earg = .earg )
-        kronecker(usualanswer, matrix(1, 1, 2))
-    }, list( .link = link, .earg = earg))),
-    loglikelihood =
-      function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-        if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
+              no.successes = y
+              if (min(y) < 0)
+                  stop("Negative data not allowed!")
+              if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+                  stop("Number of successes must be integer-valued")
+          } else if (NCOL(y) == 2) {
+              if (min(y) < 0)
+                  stop("Negative data not allowed!")
+              if (any(abs(y - round(y)) > 1.0e-8))
+                  stop("Count data must be integer-valued")
+              y = round(y)
+              nvec = y[, 1] + y[, 2]
+              y = ifelse(nvec > 0, y[, 1] / nvec, 0)
+              w = w * nvec
+              if (!length(mustart) && !length(etastart))
+                mustart = (0.5 + nvec * y) / (1 + nvec)
+          } else {
+              stop("for the binomialff family, response 'y' must be a ",
+                   "vector of 0 and 1's\n",
+                   "or a factor (first level = fail, ",
+                                 "other levels = success),\n",
+                   "or a 2-column matrix where col 1 is the no. of ",
+                   "successes and col 2 is the no. of failures")
+          }
+          predictors.names <-
+            c(namesof("mu.1", .link , earg = .earg , short = TRUE),
+              namesof("mu.2", .link , earg = .earg , short = TRUE))
+      }
+  }), list( .link = link, .mv = mv, .earg = earg))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    Mdiv2  =  ncol(eta) / 2
+    index1 =  2*(1:Mdiv2) - 1
+    mu =  eta2theta(eta[, index1],
+                    link = .link , earg = .earg )
+    mu
+  }, list( .link = link, .earg = earg  ))),
+  last = eval(substitute(expression({
+    misc$link <- rep( .link , length = M)
+    names(misc$link) <- if (M > 1) dn2 else "mu"
 
-          ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-                    y * w # Convert proportions to counts
-          nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
-                    round(w)
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:M)
+      misc$earg[[ii]] <- .earg
 
-          smallno = 1.0e6 * .Machine$double.eps
-          smallno = sqrt(.Machine$double.eps)
-          if (max(abs(ycounts - round(ycounts))) > smallno)
-              warning("converting 'ycounts' to integer in @loglikelihood")
-          ycounts = round(ycounts)
+    misc$parallel <- .parallel
+    misc$expected <- TRUE
+    misc$mv <- .mv
+  }), list( .link = link, .mv = mv, .earg = earg,
+            .parallel = parallel ))),
+  linkfun = eval(substitute(function(mu, extra = NULL) {
+    usualanswer = theta2eta(mu, .link , earg = .earg )
+    kronecker(usualanswer, matrix(1, 1, 2))
+  }, list( .link = link, .earg = earg))),
+  loglikelihood =
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+      if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
 
-          sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
-              dbinom(x = ycounts, size = nvec, prob = mu,
-                           log = TRUE))
-        }
-    },
-    vfamily = c("augbinomial", "vcategorical"),
-    deriv = eval(substitute(expression({
-      Musual = 2
-      Mdiv2 =  M / 2
+      ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+                y * w # Convert proportions to counts
+      nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+                round(w)
 
-      NOS = M / Musual
+        smallno = 1.0e6 * .Machine$double.eps
+        smallno = sqrt(.Machine$double.eps)
+        if (max(abs(ycounts - round(ycounts))) > smallno)
+            warning("converting 'ycounts' to integer in @loglikelihood")
+        ycounts = round(ycounts)
 
-      Konst1 = 1  # Works with this
-      deriv1 = Konst1 * w *
-        if ( .link == "logit") {
-            y * (1 - mu)
-        } else  {
-            stop("this is not programmed in yet")
-            dtheta.deta(mu, link = .link , earg = .earg ) *
-            (y / mu - 1.0) / (1.0 - mu)
-        }
-      deriv2 = Konst1 * w *
-        if ( .link == "logit") {
-           -(1 - y) * mu
-        } else  {
-            stop("this is not programmed in yet")
-            dtheta.deta(mu, link = .link , earg = .earg ) *
-            (y / mu - 1.0) / (1.0 - mu)
-        }
+        sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+            dbinom(x = ycounts, size = nvec, prob = mu,
+                         log = TRUE))
+      }
+  },
+  vfamily = c("augbinomial", "vcategorical"),
+  deriv = eval(substitute(expression({
+    Musual = 2
+    Mdiv2 =  M / 2
+
+    NOS = M / Musual
+
+    Konst1 = 1  # Works with this
+    deriv1 = Konst1 * w *
+      if ( .link == "logit") {
+          y * (1 - mu)
+      } else  {
+          stop("this is not programmed in yet")
+          dtheta.deta(mu, link = .link , earg = .earg ) *
+          (y / mu - 1.0) / (1.0 - mu)
+      }
+    deriv2 = Konst1 * w *
+      if ( .link == "logit") {
+         -(1 - y) * mu
+      } else  {
+          stop("this is not programmed in yet")
+          dtheta.deta(mu, link = .link , earg = .earg ) *
+          (y / mu - 1.0) / (1.0 - mu)
+      }
 
-      myderiv = (cbind(deriv1,
-                       deriv2))[, interleave.VGAM(Musual * NOS,
-                                                  M = Musual)]
-      myderiv
-    }), list( .link = link, .earg = earg))),
-    weight = eval(substitute(expression({
-        tmp100 = mu * (1.0 - mu)
+    myderiv = (cbind(deriv1,
+                     deriv2))[, interleave.VGAM(Musual * NOS,
+                                                M = Musual)]
+    myderiv
+  }), list( .link = link, .earg = earg))),
+  weight = eval(substitute(expression({
+      tmp100 = mu * (1.0 - mu)
 
-        tmp200 = if ( .link == "logit") {
+      tmp200 = if ( .link == "logit") {
           cbind(w * tmp100)
         } else {
           cbind(w * dtheta.deta(mu, link = .link , earg = .earg )^2 / tmp100)
         }
 
-        wk_wt1 = (Konst1^2) * tmp200 * (1 - mu)
-        wk_wt2 = (Konst1^2) * tmp200 *      mu
+      wk.wt1 = (Konst1^2) * tmp200 * (1 - mu)
+      wk.wt2 = (Konst1^2) * tmp200 *      mu
 
 
 
 
-        my_wk_wt = cbind(wk_wt1, wk_wt2)
-        my_wk_wt = my_wk_wt[, interleave.VGAM(Musual * NOS, M = Musual)]
-        my_wk_wt
-    }), list( .link = link, .earg = earg))))
+    my.wk.wt = cbind(wk.wt1, wk.wt2)
+    my.wk.wt = my.wk.wt[, interleave.VGAM(Musual * NOS, M = Musual)]
+    my.wk.wt
+  }), list( .link = link, .earg = earg))))
 }
 
 
diff --git a/R/family.loglin.R b/R/family.loglin.R
index eda531c..6d96144 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -6,7 +6,13 @@
 
 
 
- loglinb2 <- function(exchangeable = FALSE, zero = NULL) {
+ loglinb2 <- function(exchangeable = FALSE, zero = 3) {
+
+
+
+  if (!is.logical(exchangeable))
+    warning("argument 'exchangeable' should be a single logical")
+
 
   new("vglmff",
   blurb = c("Log-linear model for binary data\n\n",
@@ -14,9 +20,14 @@
             "Identity: u1, u2, u12",
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
-                           .exchangeable , constraints,
-                           apply.int = TRUE)
+    cm.intercept.default <- diag(3)
+
+    constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
+                           apply.int = TRUE,
+                           cm.default           = cm.intercept.default,
+                           cm.intercept.default = cm.intercept.default)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
@@ -119,7 +130,12 @@
 
 
 
- loglinb3 <- function(exchangeable = FALSE, zero = NULL) {
+ loglinb3 <- function(exchangeable = FALSE, zero = 4:6) {
+
+
+  if (!is.logical(exchangeable))
+    warning("argument 'exchangeable' should be a single logical")
+
 
   new("vglmff",
   blurb = c("Log-linear model for trivariate binary data\n\n",
@@ -127,9 +143,14 @@
             "Identity: u1, u2, u3, u12, u13, u23",
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
-                           .exchangeable, constraints,
-                           apply.int = TRUE)
+    cm.intercept.default <- diag(6)
+
+    constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x = x,
+                           bool = .exchangeable ,
+                           constraints = constraints,
+                           apply.int = TRUE,
+                           cm.default           = cm.intercept.default,
+                           cm.intercept.default = cm.intercept.default)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
diff --git a/R/family.math.R b/R/family.math.R
index 241435d..08ded5a 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -12,6 +12,27 @@
 
 
 
+log1pexp <- function(x) {
+
+  ans <- log1p(exp(x))
+  big <- (x > 10)
+  ans[big] <- x[big] + log1p(exp(-x[big]))
+  ans
+}
+
+
+
+
+
+
+
+erf <- function(x)
+  2 * pnorm(x * sqrt(2)) - 1
+
+erfc <- function(x)
+  2 * pnorm(x * sqrt(2), lower.tail = FALSE)
+
+
 
 
 lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
@@ -26,7 +47,7 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
   cutpt <- 3.0
   if (any(myTF <- !is.na(x) & x > cutpt)) {
     L1 <- log(x[!is.na(x) & x > cutpt])  # log(as.complex(x))
-    L2 <- log(L1) # log(as.complex(L1))
+    L2 <- log(L1)  # log(as.complex(L1))
     wzinit <- L1 - L2 +
           (L2 +
           (L2*( -2 + L2)/(2) +
@@ -69,7 +90,7 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
   if (!is.Numeric(shape, positive = TRUE))
     stop("bad input for argument 'shape'")
 
-  if (!is.Numeric(tmax, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(tmax, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tmax'")
   if (tmax < 10)
     warning("probably argument 'tmax' is too small")
@@ -83,14 +104,14 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
   psidp1 <- psidp - 1 / shape^2
 
   fred <-
-    dotC(name = "VGAM_C_vdigami",
+    .C("VGAM_C_vdigami",
          d = as.double(matrix(0, 6, nnn)),
          x = as.double(q), p = as.double(shape),
          as.double(gplog), as.double(gp1log), as.double(psip),
          as.double(psip1), as.double(psidp), as.double(psidp1),
          ifault = integer(nnn),
          tmax = as.double(tmax),
-         as.integer(nnn))
+         as.integer(nnn), PACKAGE = "VGAM")
   answer <- matrix(fred$d, nnn, 6, byrow = TRUE)
   dimnames(answer) <- list(names(q),
                            c("q", "q^2", "shape", "shape^2",
@@ -109,4 +130,63 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
 
 
 
+expint <- function(x) {
+
+
+  LLL <- length(x)
+  answer <- .C("sf_C_expint",
+                 x = as.double(x),
+                 size = as.integer(LLL),
+                 ans = double(LLL), PACKAGE = "VGAM")$ans
+
+  answer[x  < 0] <- NA
+  answer[x == 0] <- NA
+
+  answer
+}
+
+
+
+expexpint <- function(x) {
+
+
+
+
+  LLL <- length(x)
+  answer <- .C("sf_C_expexpint",
+                 x = as.double(x),
+                 size = as.integer(LLL),
+                 ans = double(LLL), PACKAGE = "VGAM")$ans
+
+  answer[x  < 0] <- NA
+  answer[x == 0] <- NA
+
+  answer
+}
+
+
+
+
+
+
+expint.E1 <- function(x) {
+
+
+
+
+  LLL <- length(x)
+  answer <- .C("sf_C_expint_e1",
+                 x = as.double(x),
+                 size = as.integer(LLL),
+                 ans = double(LLL), PACKAGE = "VGAM")$ans
+
+  answer[x  < 0] <- NA
+  answer[x == 0] <- NA
+
+  answer
+}
+
+
+
+
 
diff --git a/R/family.mixture.R b/R/family.mixture.R
index dd4bc3a..fec81b2 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -13,12 +13,12 @@
 
 
 
-mix2normal1.control <- function(trace = TRUE, ...) {
-    list(trace = trace)
+mix2normal.control <- function(trace = TRUE, ...) {
+  list(trace = trace)
 }
 
 
- mix2normal1 <-
+ mix2normal <-
     function(lphi = "logit",
              lmu = "identity",
              lsd = "loge",
@@ -26,7 +26,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
              imu1 = NULL, imu2 = NULL,
              isd1 = NULL, isd2 = NULL,
              qmu = c(0.2, 0.8),
-             equalsd = TRUE,
+             eq.sd = TRUE,
              nsimEIM = 100,
              zero = 1) {
   lphi <- as.list(substitute(lphi))
@@ -46,14 +46,14 @@ mix2normal1.control <- function(trace = TRUE, ...) {
   esd1 <- esd2 <- esd
 
 
-  if (!is.Numeric(qmu, allowable.length = 2,
+  if (!is.Numeric(qmu, length.arg = 2,
                   positive = TRUE) ||
       any(qmu >= 1))
     stop("bad input for argument 'qmu'")
 
 
   if (length(iphi) &&
-     (!is.Numeric(iphi, allowable.length = 1,
+     (!is.Numeric(iphi, length.arg = 1,
                   positive = TRUE) ||
       iphi>= 1))
       stop("bad input for argument 'iphi'")
@@ -67,9 +67,9 @@ mix2normal1.control <- function(trace = TRUE, ...) {
     stop("bad input for argument 'isd2'")
 
 
-  if (!is.logical(equalsd) || length(equalsd) != 1)
-    stop("bad input for argument 'equalsd'")
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.logical(eq.sd) || length(eq.sd) != 1)
+    stop("bad input for argument 'eq.sd'")
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("'nsimEIM' should be an integer greater than 10")
@@ -87,10 +87,12 @@ mix2normal1.control <- function(trace = TRUE, ...) {
             "Variance: phi*sd1^2 + (1 - phi)*sd2^2 + ",
                       "phi*(1 - phi)*(mu1-mu2)^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(rbind(diag(4), c(0, 0, 1,0)), x, .equalsd ,
-                           constraints, apply.int = TRUE)
+    constraints <- cm.vgam(rbind(diag(4), c(0, 0, 1,0)), x = x,
+                           bool = .eq.sd ,
+                           constraints = constraints,
+                           apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
-  }), list( .zero = zero, .equalsd = equalsd ))),
+  }), list( .zero = zero, .eq.sd = eq.sd ))),
   initialize = eval(substitute(expression({
 
     temp5 <-
@@ -129,10 +131,10 @@ mix2normal1.control <- function(trace = TRUE, ...) {
                       len = n)
       init.sd2 <- rep(if(length( .isd2 )) .isd2 else sd(sorty[ind.2]),
                       len = n)
-      if ( .equalsd ) {
+      if ( .eq.sd ) {
         init.sd1 <- init.sd2 = (init.sd1 + init.sd2)/2
         if (!all.equal( .esd1, .esd2 ))
-          stop("'esd1' and 'esd2' must be equal if 'equalsd = TRUE'")
+          stop("'esd1' and 'esd2' must be equal if 'eq.sd = TRUE'")
       }
       etastart <- cbind(
                   theta2eta(init.phi, .lphi, earg = .ephi),
@@ -144,7 +146,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
   }), list(.lphi = lphi, .lmu = lmu,
            .iphi = iphi, .imu1 = imu1, .imu2 = imu2,
            .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
-           .esd1 = esd1, .esd2 = esd2, .equalsd = equalsd,
+           .esd1 = esd1, .esd2 = esd2, .eq.sd = eq.sd,
            .lsd = lsd, .isd1 = isd1, .isd2 = isd2, .qmu = qmu))),
   linkinv = eval(substitute(function(eta, extra = NULL){
       phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
@@ -162,10 +164,10 @@ mix2normal1.control <- function(trace = TRUE, ...) {
                       "sd1" = .esd1, "mu2" = .emu2, "sd2" = .esd2)
 
     misc$expected <- TRUE
-    misc$equalsd <- .equalsd
+    misc$eq.sd <- .eq.sd
     misc$nsimEIM <- .nsimEIM
     misc$multipleResponses <- FALSE
-  }), list(.lphi = lphi, .lmu = lmu, .lsd = lsd, .equalsd = equalsd,
+  }), list(.lphi = lphi, .lmu = lmu, .lsd = lsd, .eq.sd = eq.sd,
            .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
            .esd1 = esd1, .esd2 = esd2,
            .nsimEIM = nsimEIM ))),
@@ -185,7 +187,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
           .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
           .esd1 = esd1, .esd2 = esd2,
           .lsd = lsd ))),
-  vfamily = c("mix2normal1"),
+  vfamily = c("mix2normal"),
   deriv = eval(substitute(expression({
     phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
     mu1 <- eta2theta(eta[, 2], link = .lmu,  earg = .emu1)
@@ -227,7 +229,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
         (1 - phi) * dnorm((ysim-mu2)/sd2) / sd2),
         c("phi","mu1","sd1","mu2","sd2"), hessian= TRUE)
     run.mean <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- ifelse(runif(n) < phi, rnorm(n, mu1, sd1),
                                      rnorm(n, mu2, sd2))
 
@@ -236,8 +238,8 @@ mix2normal1.control <- function(trace = TRUE, ...) {
       rm(ysim)
 
       temp3 <- matrix(0, n, dimm(M))
-      for(ss in 1:M)
-          for(tt in ss:M)
+      for (ss in 1:M)
+          for (tt in ss:M)
               temp3[,iam(ss,tt, M)] <-  -d2l.dthetas2[,ss,tt]
 
       run.mean <- ((ii-1) * run.mean + temp3) / ii
@@ -282,11 +284,11 @@ mix2poisson.control <- function(trace = TRUE, ...) {
 
 
 
-  if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+  if (!is.Numeric(qmu, length.arg = 2, positive = TRUE) ||
       any(qmu >= 1))
     stop("bad input for argument 'qmu'")
   if (length(iphi) &&
-     (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
+     (!is.Numeric(iphi, length.arg = 1, positive = TRUE) ||
      iphi >= 1))
     stop("bad input for argument 'iphi'")
   if (length(il1) && !is.Numeric(il1))
@@ -295,7 +297,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
     stop("bad input for argument 'il2'")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("'nsimEIM' should be an integer greater than 10")
@@ -408,7 +410,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
            .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
     run.mean <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- ifelse(runif(n) < phi, rpois(n, lambda1),
                                     rpois(n, lambda2))
       f1 <- dpois(x = ysim, lam = lambda1)
@@ -486,11 +488,11 @@ mix2exp.control <- function(trace = TRUE, ...) {
   el1 <- el2 <- elambda
 
 
-  if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+  if (!is.Numeric(qmu, length.arg = 2, positive = TRUE) ||
       any(qmu >= 1))
     stop("bad input for argument 'qmu'")
   if (length(iphi) &&
-     (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
+     (!is.Numeric(iphi, length.arg = 1, positive = TRUE) ||
       iphi >= 1))
     stop("bad input for argument 'iphi'")
   if (length(il1) && !is.Numeric(il1))
@@ -501,7 +503,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
 
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+  if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("'nsimEIM' should be an integer greater than 10")
 
@@ -611,7 +613,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
            .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   weight = eval(substitute(expression({
     run.mean <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- ifelse(runif(n) < phi, rexp(n, lambda1),
                                      rexp(n, lambda2))
       f1 <- dexp(x = ysim, rate=lambda1)
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 0a81284..a56b1fa 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -22,13 +22,13 @@ vnonlinear.control <- function(save.weight = TRUE, ...) {
 
 
 
-subset_lohi <- function(xvec, yvec,
+subset.lohi <- function(xvec, yvec,
                         probs.x = c(0.15, 0.85),
                         type = c("median", "wtmean", "unwtmean"),
                         wtvec = rep(1, len = length(xvec))) {
 
 
-  if (!is.Numeric(probs.x, allowable.length = 2))
+  if (!is.Numeric(probs.x, length.arg = 2))
     stop("argument 'probs.x' must be numeric and of length two")
 
   min.q <- quantile(xvec, probs = probs.x[1] )
@@ -93,10 +93,10 @@ micmen.control <- function(save.weight = TRUE, ...) {
 
   firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("argument 'imethod' must be integer")
-  if (!is.Numeric(probs.x, allowable.length = 2))
+  if (!is.Numeric(probs.x, length.arg = 2))
     stop("argument 'probs.x' must be numeric and of length two")
   if (!is.logical(oim) || length(oim) != 1)
     stop("argument 'oim' must be single logical")
@@ -104,7 +104,7 @@ micmen.control <- function(save.weight = TRUE, ...) {
     stopifnot(nsimEIM > 10, length(nsimEIM) == 1,
               nsimEIM == round(nsimEIM))
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("'imethod' must be 1 or 2 or 3")
@@ -159,7 +159,7 @@ micmen.control <- function(save.weight = TRUE, ...) {
     if (ncol(as.matrix(Xm2)) != 1)
       stop("regressor not found or is not a vector. Use the ",
            "'form2' argument without an intercept")
-    Xm2 <- as.vector(Xm2) # Make sure
+    Xm2 <- as.vector(Xm2)  # Make sure
     extra$Xm2 <- Xm2          # Needed for @linkinv
 
     predictors.names <-
@@ -176,7 +176,7 @@ micmen.control <- function(save.weight = TRUE, ...) {
         init2 <- median(init1 * Xm2 / y - Xm2)
       }
       if ( .imethod == 1 || .imethod == 2) {
-        mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
+        mysubset <- subset.lohi(Xm2, y, probs.x = .probs.x,
                   type = ifelse( .imethod == 1, "median", "wtmean"),
                   wtvec = w)
 
@@ -218,13 +218,13 @@ micmen.control <- function(save.weight = TRUE, ...) {
     misc$earg <- list(theta1 = .earg1 , theta2 = .earg2 )
 
     misc$rpar <- rpar
-    fit$df.residual <- n - rank   # Not nrow_X_vlm - rank
-    fit$df.total <- n             # Not nrow_X_vlm
+    fit$df.residual <- n - rank   # Not nrow.X.vlm - rank
+    fit$df.total <- n             # Not nrow.X.vlm
 
     extra$Xm2 <- NULL             # Regressor is in control$regressor 
     dpar <- .dispersion
     if (!dpar) {
-      dpar <- sum(c(w) * (y - mu)^2) / (n - ncol_X_vlm)
+      dpar <- sum(c(w) * (y - mu)^2) / (n - ncol.X.vlm)
     }
     misc$dispersion <- dpar
 
@@ -377,7 +377,7 @@ skira.control <- function(save.weight = TRUE, ...) {
 
   firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
 
-  if (!is.Numeric(probs.x, allowable.length = 2))
+  if (!is.Numeric(probs.x, length.arg = 2))
     stop("argument 'probs.x' must be numeric and of length two")
 
   estimated.dispersion <- dispersion == 0
@@ -386,7 +386,7 @@ skira.control <- function(save.weight = TRUE, ...) {
   if (mode(link2) != "character" && mode(link2) != "name")
     link2 <- as.character(substitute(link2))
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("argument 'imethod' must be integer")
 
@@ -453,7 +453,7 @@ skira.control <- function(save.weight = TRUE, ...) {
         max.q <- quantile(Xm2, probs = .probs.x[2] )
       if ( .imethod == 3 || .imethod == 2 ) {
 
-        mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
+        mysubset <- subset.lohi(Xm2, y, probs.x = .probs.x,
                   type = ifelse( .imethod == 2, "median", "wtmean"),
                   wtvec = w)
 
@@ -486,8 +486,8 @@ skira.control <- function(save.weight = TRUE, ...) {
         fitted(smooth.spline(Xm2, y, w = w, df = 2.0))
       }
 
-      mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
-                type = "wtmean", wtvec = w)
+      mysubset <- subset.lohi(Xm2, y, probs.x = .probs.x,
+                              type = "wtmean", wtvec = w)
 
 
       mat.x <- with(mysubset, cbind(c(1, 1),
@@ -543,7 +543,7 @@ skira.control <- function(save.weight = TRUE, ...) {
     fit$df.total <- n
     dpar <- .dispersion
     if (!dpar) {
-      dpar <- sum(c(w) * (y - mu)^2) / (n - ncol_X_vlm)
+      dpar <- sum(c(w) * (y - mu)^2) / (n - ncol.X.vlm)
     }
     misc$dispersion <- dpar
     misc$default.dispersion <- 0
@@ -624,7 +624,7 @@ skira.control <- function(save.weight = TRUE, ...) {
       mysigma <- sqrt( median( (y - mu)^2 ) ) / 100
       mysigma <- 1
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- 1 / (theta1 + theta2 * Xm2) + rnorm(n, sd = mysigma)
         temp3 <- (ysim - mu) * dmus.dthetas * dthetas.detas
         run.varcov <- run.varcov +
diff --git a/R/family.normal.R b/R/family.normal.R
index c55f5dc..9bd8173 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -14,7 +14,7 @@ VGAM.weights.function <- function(w, M, n) {
 
   ncolw <- ncol(as.matrix(w))
   if (ncolw == 1) {
-    wz <- matrix(w, nrow = n, ncol = M) # w_i * diag(M)
+    wz <- matrix(w, nrow = n, ncol = M)  # w_i * diag(M)
   } else if (ncolw == M) {
     wz <- as.matrix(w)
   } else if (ncolw < M && M > 1) {
@@ -40,7 +40,7 @@ VGAM.weights.function <- function(w, M, n) {
 
  gaussianff <- function(dispersion = 0, parallel = FALSE, zero = NULL) {
 
-  if (!is.Numeric(dispersion, allowable.length = 1) ||
+  if (!is.Numeric(dispersion, length.arg = 1) ||
       dispersion < 0)
     stop("bad input for argument 'dispersion'")
   estimated.dispersion <- dispersion == 0
@@ -50,7 +50,9 @@ VGAM.weights.function <- function(w, M, n) {
   blurb = c("Vector linear/additive model\n",
             "Links:    identity for Y1,...,YM"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -113,7 +115,7 @@ VGAM.weights.function <- function(w, M, n) {
       wz <- VGAM.weights.function(w = w, M = M, n = n)
       temp5 <- ResSS.vgam(y-mu, wz = wz, M = M)
         dpar <- temp5 / (length(y) -
-        (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
+        (if (is.numeric(ncol(X.vlm.save))) ncol(X.vlm.save) else 0))
     }
     misc$dispersion <- dpar
     misc$default.dispersion <- 0
@@ -145,14 +147,11 @@ VGAM.weights.function <- function(w, M, n) {
     M <- if (is.matrix(y)) ncol(y) else 1
     n <- if (is.matrix(y)) nrow(y) else length(y)
     wz <- VGAM.weights.function(w = w, M = M, n = n)
- print("head(wz) -----------------------------")
- print( head(wz) )
     temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
 
 
 
     if (M == 1 || ncol(wz) == M) {
- print("hi3 ooooo")
       -0.5 * temp1 + 0.5 * sum(log(wz)) - n * (M / 2) * log(2*pi)
     } else {
       if (all(wz[1, ] == apply(wz, 2, min)) &&
@@ -207,9 +206,9 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
 
 
   L <- max(length(x), length(mean), length(sd))
-  x    <- rep(x,    len = L);
-  mean <- rep(mean, len = L);
-  sd   <- rep(sd,   len = L);
+  if (length(x)    != L) x    <- rep(x,    len = L)
+  if (length(mean) != L) mean <- rep(mean, len = L)
+  if (length(sd)   != L) sd   <- rep(sd,   len = L)
 
   if (log.arg) {
     ifelse(x < 0, log(0), dnorm(x, mean = mean, sd = sd, log = TRUE) -
@@ -222,41 +221,36 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
 
 pposnorm <- function(q, mean = 0, sd = 1) {
   L <- max(length(q), length(mean), length(sd))
-  q <- rep(q, len = L);
-  mean <- rep(mean, len = L);
-  sd <- rep(sd, len = L);
+  if (length(q)    != L) q    <- rep(q,    len = L)
+  if (length(mean) != L) mean <- rep(mean, len = L)
+  if (length(sd)   != L) sd   <- rep(sd,   len = L)
+
   ifelse(q < 0, 0, (pnorm(q, mean = mean, sd = sd) -
-                    pnorm(0, mean = mean, sd = sd)) / pnorm(q = mean/sd))
+                    pnorm(0, mean = mean, sd = sd)) / pnorm(q = mean / sd))
 }
 
 
 qposnorm <- function(p, mean = 0, sd = 1) {
-  if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
-    stop("bad input for argument 'p'")
-  qnorm(p = p + (1-p) * pnorm(0, mean = mean, sd = sd),
+  qnorm(p = p + (1 - p) * pnorm(0, mean = mean, sd = sd),
         mean = mean, sd = sd)
 }
 
 
 rposnorm <- function(n, mean = 0, sd = 1) {
-  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'n'")
-  mean <- rep(mean, length = n)
-  sd <- rep(sd, length = n)
   qnorm(p = runif(n, min = pnorm(0, mean = mean, sd = sd)),
         mean = mean, sd = sd)
 }
 
 
 
- posnormal1.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+ posnormal.control <- function(save.weight = TRUE, ...) {
+  list(save.weight = save.weight)
 }
 
 
 
 
- posnormal1 <- function(lmean = "identity", lsd = "loge",
+ posnormal <- function(lmean = "identity", lsd = "loge",
                         imean = NULL, isd = NULL,
                         nsimEIM = 100, zero = NULL) {
  warning("this VGAM family function is not working properly yet")
@@ -281,7 +275,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
   if (length(nsimEIM))
-    if (!is.Numeric(nsimEIM, allowable.length = 1,
+    if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE) ||
         nsimEIM <= 10)
       stop("argument 'nsimEIM' should be an integer greater than 10")
@@ -329,7 +323,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
         init.me <- rep(quantile(y, probs = 0.40), len = n)
       if (!length(init.sd))
         init.sd <- rep(sd(c(y)) * 1.2, len = n)
-      etastart <- cbind(theta2eta(init.me, .lmean, earg = .emean),
+      etastart <- cbind(theta2eta(init.me, .lmean, earg = .emean ),
                         theta2eta(init.sd, .lsd,   earg = .esd ))
     }
   }), list( .lmean = lmean, .lsd = lsd,
@@ -355,7 +349,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
             .nsimEIM = nsimEIM ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
     mysd <- eta2theta(eta[, 2], .lsd,   earg = .esd )
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
@@ -364,9 +358,9 @@ rposnorm <- function(n, mean = 0, sd = 1) {
     }
   }, list( .lmean = lmean, .lsd = lsd,
            .emean = emean, .esd = esd ))),
-  vfamily = c("posnormal1"),
+  vfamily = c("posnormal"),
   deriv = eval(substitute(expression({
-    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
     mysd <- eta2theta(eta[, 2], .lsd,  earg = .esd )
 
     zedd <- (y-mymu) / mysd
@@ -376,7 +370,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
     dl.dmu <- zedd / mysd^2 - temp7 / temp8
     dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
 
-    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
     dsd.deta <- dtheta.deta(mysd, .lsd,   earg = .esd )
     dthetas.detas <- cbind(dmu.deta, dsd.deta)
     c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
@@ -386,7 +380,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     if (length( .nsimEIM )) {
-        for(ii in 1:( .nsimEIM )) {
+        for (ii in 1:( .nsimEIM )) {
           ysim <- rposnorm(n, m=mymu, sd = mysd)
           zedd <- (ysim-mymu) / mysd
           temp7 <- dnorm(-mymu/mysd)
@@ -460,16 +454,12 @@ pbetanorm <- function(q, shape1, shape2, mean = 0, sd = 1,
 
 
 qbetanorm <- function(p, shape1, shape2, mean = 0, sd = 1) {
-  if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
-    stop("bad input for argument 'p'")
   qnorm(p = qbeta(p = p, shape1 = shape1, shape2 = shape2),
         mean = mean, sd = sd)
 }
 
 
 rbetanorm <- function(n, shape1, shape2, mean = 0, sd = 1) {
-  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'n'")
   qnorm(p = qbeta(p = runif(n), shape1 = shape1, shape2 = shape2),
         mean = mean, sd = sd)
 }
@@ -483,13 +473,16 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
   rm(log)
 
 
-  if (!is.Numeric(d, allowable.length = 1) ||
+  if (!is.Numeric(d, length.arg = 1) ||
       max(d) >= 2)
     stop("bad input for argument 'd'")
 
   L <- max(length(x), length(mean), length(sigma))
-  x <- rep(x, len = L); mean <- rep(mean, len = L);
-  sigma <- rep(sigma, len = L);
+  if (length(x)     != L) x     <- rep(x,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sigma) != L) sigma <- rep(sigma, len = L)
+
+
   hh <- 2 - d
   KK <- 1 / (1 + 1/hh + 0.75/hh^2)
   if (log.arg) {
@@ -503,14 +496,15 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
 
 
 ptikuv <- function(q, d, mean = 0, sigma = 1) {
-  if (!is.Numeric(d, allowable.length = 1) ||
+  if (!is.Numeric(d, length.arg = 1) ||
       max(d) >= 2)
     stop("bad input for argument 'd'")
 
   L <- max(length(q), length(mean), length(sigma))
-  q    <- rep(q,      len = L);
-  mean <- rep(mean,   len = L);
-  sigma <- rep(sigma, len = L);
+  if (length(q)     != L) q     <- rep(q,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sigma) != L) sigma <- rep(sigma, len = L)
+
   zedd1 <- 0.5 * ((q - mean) / sigma)^2
   ans <- q*0 + 0.5
   hh <- 2 - d
@@ -532,7 +526,7 @@ ptikuv <- function(q, d, mean = 0, sigma = 1) {
 qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
   if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
     stop("bad input for argument 'p'")
-  if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
+  if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
     stop("bad input for argument 'd'")
   if (!is.Numeric(mean))
     stop("bad input for argument 'mean'")
@@ -540,9 +534,9 @@ qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
     stop("bad input for argument 'sigma'")
 
   L <- max(length(p), length(mean), length(sigma))
-  p <- rep(p,         len = L);
-  mean <- rep(mean,   len = L);
-  sigma <- rep(sigma, len = L);
+  if (length(p)     != L) p     <- rep(p,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sigma) != L) sigma <- rep(sigma, len = L)
   ans <- rep(0.0, len = L)
 
   myfun <- function(x, d, mean = 0, sigma = 1, p)
@@ -568,13 +562,13 @@ qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
 rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
   if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE))
     stop("bad input for argument 'n'")
-  if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
+  if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
     stop("bad input for argument 'd'")
-  if (!is.Numeric(mean, allowable.length = 1))
+  if (!is.Numeric(mean, length.arg = 1))
     stop("bad input for argument 'mean'")
-  if (!is.Numeric(sigma, allowable.length = 1))
+  if (!is.Numeric(sigma, length.arg = 1))
     stop("bad input for argument 'sigma'")
-  if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(Smallno, positive = TRUE, length.arg = 1) ||
       Smallno > 0.01 ||
       Smallno < 2 * .Machine$double.eps)
       stop("bad input for argument 'Smallno'")
@@ -596,7 +590,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
       Upper <- Upper + sigma
     x <- runif(2*n, min = Lower, max = Upper)
     index <- runif(2*n, max = ymax) <
-            dtikuv(x, d = d, mean = mean, sigma = sigma)
+             dtikuv(x, d = d, mean = mean, sigma = sigma)
     sindex <- sum(index)
     if (sindex) {
       ptr2 <- min(n, ptr1 + sindex - 1)
@@ -628,7 +622,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
      (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
      max(zero) > 2))
     stop("bad input for argument 'zero'")
-  if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
+  if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
       stop("bad input for argument 'd'")
 
 
@@ -669,14 +663,14 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
       }
       mean.init <- rep(weighted.mean(y, w), len = n) 
       etastart <-
-        cbind(theta2eta(mean.init,  .lmean,  earg = .emean),
+        cbind(theta2eta(mean.init,  .lmean,  earg = .emean ),
               theta2eta(sigma.init, .lsigma, earg = .esigma))
     }
   }),list( .lmean = lmean, .lsigma = lsigma,
                            .isigma = isigma, .d = d,
            .emean = emean, .esigma = esigma ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta[, 1], .lmean, earg = .emean)
+    eta2theta(eta[, 1], .lmean, earg = .emean )
   }, list( .lmean = lmean,
            .emean = emean, .esigma = esigma ))),
   last = eval(substitute(expression({
@@ -690,7 +684,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
             .emean = emean, .esigma = esigma ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    mymu  <- eta2theta(eta[, 1], .lmean,  earg = .emean)
+    mymu  <- eta2theta(eta[, 1], .lmean,  earg = .emean )
     sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
@@ -701,10 +695,10 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
            .emean = emean, .esigma = esigma ))),
   vfamily = c("tikuv"),
   deriv = eval(substitute(expression({
-    mymu  <- eta2theta(eta[, 1], .lmean,  earg = .emean)
+    mymu  <- eta2theta(eta[, 1], .lmean,  earg = .emean )
     sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
 
-    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
     dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
 
     zedd <- (y - mymu) / sigma
@@ -726,7 +720,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
     ned2l.dmymu2 <- Dnos / sigma^2
     ned2l.dnu2   <- Dstar / sigma^2
 
-    wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+    wz <- matrix(as.numeric(NA), n, M)  # diagonal matrix
     wz[, iam(1, 1, M)] <- ned2l.dmymu2 * dmu.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dnu2 * dsigma.deta^2
     c(w) * wz
@@ -736,108 +730,106 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
 
 
 
-dfnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
-  if (!is.Numeric(a1, positive = TRUE) ||
-      !is.Numeric(a2, positive = TRUE))
-    stop("bad input for arguments 'a1' and 'a2'")
-  if (any(a1 <= 0 | a2 <= 0))
-    stop("arguments 'a1' and 'a2' must have positive values only")
-  ans <- dnorm(x = x/(a1*sd) - mean/sd)/(a1*sd) +
-         dnorm(x = x/(a2*sd) + mean/sd)/(a2*sd)
+dfoldnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2 = 1,
+                      log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+  ans <- dnorm(x = x/(a1*sd) - mean/sd) / (a1*sd) +
+         dnorm(x = x/(a2*sd) + mean/sd) / (a2*sd)
   ans[x < 0] <- 0
-  ans[a1 <= 0 | a2 <= 0 | is.na(a1) | is.na(a2)] <- NA
-  ans
+
+  ans[a1 <= 0 | a2 <= 0] <- NA
+  ans[sd <= 0] <- NA
+
+  if (log.arg) log(ans) else ans
 }
 
 
-pfnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
-  if (!is.Numeric(a1, positive = TRUE) ||
-      !is.Numeric(a2, positive = TRUE))
-    stop("bad input for arguments 'a1' and 'a2'")
-  if (any(a1 <= 0 | a2 <= 0))
-    stop("arguments 'a1' and 'a2' must have positive values only")
-  L <- max(length(q), length(mean), length(sd))
-  q <- rep(q, len = L);
-  mean <- rep(mean, len = L);
-  sd <- rep(sd, len = L);
+pfoldnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2 = 1) {
+
 
-  ifelse(q < 0, 0,
-         pnorm(q =  q/(a1*sd) - mean/sd) -
-         pnorm(q = -q/(a2*sd) - mean/sd))
+  L <- max(length(q), length(mean), length(sd))
+  if (length(q)    != L) q    <- rep(q,    len = L)
+  if (length(mean) != L) mean <- rep(mean, len = L)
+  if (length(sd)   != L) sd   <- rep(sd,   len = L)
+
+  ans <- ifelse(q < 0, 0, pnorm(q =  q/(a1*sd) - mean/sd) -
+                          pnorm(q = -q/(a2*sd) - mean/sd))
+  ans[a1 <= 0 | a2 <= 0] <- NA
+  ans[sd <= 0] <- NA
+  ans
 }
 
 
-qfnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
+qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
   if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
     stop("bad input for argument 'p'")
-  if (!is.Numeric(a1, positive = TRUE) ||
-     !is.Numeric(a2, positive = TRUE))
-    stop("bad input for arguments 'a1' and 'a2'")
-  if (any(a1 <= 0 | a2 <= 0))
-    stop("arguments 'a1' and 'a2' must have positive values only")
 
   L <- max(length(p), length(mean), length(sd), length(a1), length(a2))
-  p    <- rep(p,    len = L);
-  mean <- rep(mean, len = L);
-  sd   <- rep(sd,   len = L);
-  a1   <- rep(a1,   len = L);
-  a2   <- rep(a2,   len = L);
+  if (length(p)    != L) p    <- rep(p,    len = L)
+  if (length(mean) != L) mean <- rep(mean, len = L)
+  if (length(sd)   != L) sd   <- rep(sd,   len = L)
+  if (length(a1)   != L) a1   <- rep(a1,   len = L)
+  if (length(a2)   != L) a2   <- rep(a2,   len = L)
   ans  <- rep(0.0 , len = L)
 
-  myfun <- function(x, mean = 0, sd = 1, a1 = 1, a2=2, p)
-    pfnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
+  myfun <- function(x, mean = 0, sd = 1, a1 = 1, a2 = 2, p)
+    pfoldnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
 
-  for(i in 1:L) {
-    mytheta <- mean[i]/sd[i]
-    EY <- sd[i] * ((a1[i]+a2[i]) *
+  for (ii in 1:L) {
+    mytheta <- mean[ii]/sd[ii]
+    EY <- sd[ii] * ((a1[ii]+a2[ii]) *
          (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
-         a2[i] * mytheta)
+          a2[ii] * mytheta)
     Upper <- 2 * EY
-    while (pfnorm(q = Upper, mean = mean[i], sd = sd[i],
-                  a1 = a1[i], a2 = a2[i]) < p[i])
-        Upper <- Upper + sd[i]
-    ans[i] <- uniroot(f = myfun, lower = 0, upper = Upper,
-                     mean = mean[i],
-                     sd = sd[i], a1 = a1[i], a2 = a2[i],
-                     p = p[i], ...)$root
+    while (pfoldnorm(q = Upper, mean = mean[ii], sd = sd[ii],
+                     a1 = a1[ii], a2 = a2[ii]) < p[ii])
+      Upper <- Upper + sd[ii]
+    ans[ii] <- uniroot(f = myfun, lower = 0, upper = Upper,
+                       mean = mean[ii], sd = sd[ii],
+                       a1 = a1[ii], a2 = a2[ii],
+                       p = p[ii], ...)$root
   }
+
+  ans[a1 <= 0 | a2 <= 0] <- NA
+  ans[sd <= 0] <- NA
+
   ans
 }
 
 
-rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
-  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'n'")
-  if (!is.Numeric(a1, positive = TRUE) ||
-      !is.Numeric(a2, positive = TRUE))
-    stop("bad input for arguments 'a1' and 'a2'")
-  if (any(a1 <= 0 | a2 <= 0))
-    stop("arguments 'a1' and 'a2' must have positive values only")
+rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
+
   X <- rnorm(n, mean = mean, sd = sd)
-  pmax(a1 * X, -a2*X)
+  ans <- pmax(a1 * X, -a2*X)
+  ans[a1 <= 0 | a2 <= 0] <- NA
+  ans[sd <= 0] <- NA
+  ans
 }
 
 
 
 
- fnormal1 <- function(lmean = "identity", lsd = "loge",
+ foldnormal <- function(lmean = "identity", lsd = "loge",
                       imean = NULL,       isd = NULL,
                       a1 = 1, a2 = 1,
                       nsimEIM = 500, imethod = 1, zero = NULL) {
-  if (!is.Numeric(a1, positive = TRUE, allowable.length = 1) ||
-      !is.Numeric(a2, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(a1, positive = TRUE, length.arg = 1) ||
+      !is.Numeric(a2, positive = TRUE, length.arg = 1))
     stop("bad input for arguments 'a1' and 'a2'")
   if (any(a1 <= 0 | a2 <= 0))
     stop("arguments 'a1' and 'a2' must each be a positive value")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
 
 
-
   lmean <- as.list(substitute(lmean))
   emean <- link2list(lmean)
   lmean <- attr(emean, "function.name")
@@ -852,7 +844,7 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("argument 'nsimEIM' should be an integer greater than 10")
@@ -908,20 +900,20 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
             stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
             Ahat <- m1d^2 / m2d
             thetahat <- sqrt(max(1/Ahat -1, 0.1))
-            mean.init <- rep(if(length( .imean)) .imean else
+            mean.init <- rep(if (length( .imean)) .imean else
                 thetahat * sqrt((stddev^2 + meany^2) * Ahat), len = n)
-            sd.init <- rep(if(length( .isd)) .isd else
+            sd.init <- rep(if (length( .isd)) .isd else
                 sqrt((stddev^2 + meany^2) * Ahat), len = n)
 }
 
 
       stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
       meany <- weighted.mean(y, w)
-      mean.init <- rep(if(length( .imean )) .imean else
-          {if( .imethod == 1) median(y) else meany}, len = n)
-      sd.init <- rep(if(length( .isd )) .isd else
-          {if( .imethod == 1)  stddev else 1.2*sd(c(y))}, len = n)
-      etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean),
+      mean.init <- rep(if (length( .imean )) .imean else
+          {if ( .imethod == 1) median(y) else meany}, len = n)
+      sd.init <- rep(if (length( .isd )) .isd else
+          {if ( .imethod == 1)  stddev else 1.2*sd(c(y))}, len = n)
+      etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean ),
                         theta2eta(sd.init,   .lsd,   earg = .esd ))
     }
   }), list( .lmean = lmean, .lsd = lsd,
@@ -929,7 +921,7 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
             .imean = imean, .isd = isd,
             .a1 = a1, .a2 = a2, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
     mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
     mytheta <- mymu / mysd
     mysd * (( .a1 + .a2 ) * (mytheta * pnorm(mytheta) +
@@ -955,24 +947,26 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
             .a1 = a1, .a2 = a2 ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
     mysd <- eta2theta(eta[, 2], .lsd,   earg = .esd )
     a1vec <- .a1
     a2vec <- .a2
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
-          sum(c(w)*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
-                    dnorm(x=y/(a2vec*mysd) + mymu/mysd)/(a2vec*mysd)))
+
+        sum(c(w) * dfoldnorm(y, mean = mymu, sd = mysd,
+                             a1 = a1vec, a2 = a2vec, log = TRUE))
       }
   }, list( .lmean = lmean, .lsd = lsd,
-           .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
-  vfamily = c("fnormal1"),
+           .emean = emean, .esd = esd,
+           .a1 = a1, .a2 = a2 ))),
+  vfamily = c("foldnormal"),
   deriv = eval(substitute(expression({
     Musual <- 2
-    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+    mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
     mysd <- eta2theta(eta[, 2], .lsd,   earg = .esd )
 
-    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+    dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
     dsd.deta <- dtheta.deta(mysd, .lsd,   earg = .esd )
 
     a1vec <- .a1
@@ -982,7 +976,7 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
                            mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
                 name = c("mymu", "mysd"), hessian = FALSE)
     eval.d3 <- eval(d3)
-    dl.dthetas <-  attr(eval.d3, "gradient") # == cbind(dl.dmu, dl.dsd)
+    dl.dthetas <-  attr(eval.d3, "gradient")  # == cbind(dl.dmu, dl.dsd)
     DTHETA.detas <- cbind(dmu.deta, dsd.deta)
     c(w) * DTHETA.detas * dl.dthetas
   }), list( .lmean = lmean, .lsd = lsd,
@@ -994,17 +988,17 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
                              mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
                   name = c("mymu", "mysd"), hessian = TRUE)
     run.mean <- 0
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- abs(rnorm(n, m = mymu, sd = mysd))
-      ysim <- rfnorm(n = n, mean = mymu, sd = mysd,
+      ysim <- rfoldnorm(n = n, mean = mymu, sd = mysd,
                      a1 = a1vec, a2 = a2vec)
       eval.de3 <- eval(de3)
       d2l.dthetas2 <- attr(eval.de3, "hessian")
       rm(ysim)
 
       temp3 <- matrix(0, n, dimm(M))
-      for(ss in 1:M)
-        for(tt in ss:M)
+      for (ss in 1:M)
+        for (tt in ss:M)
           temp3[, iam(ss,tt, M)] <-  -d2l.dthetas2[, ss,tt]
 
       run.mean <- ((ii-1) * run.mean + temp3) / ii
@@ -1043,14 +1037,14 @@ lqnorm <- function(qpower = 2,
 
 
 
-  if (!is.Numeric(qpower, allowable.length = 1) || qpower <= 1)
+  if (!is.Numeric(qpower, length.arg = 1) || qpower <= 1)
     stop("bad input for argument 'qpower'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
       shrinkage.init < 0 ||
       shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -1085,11 +1079,11 @@ lqnorm <- function(qpower = 2,
 
     if (!length(etastart))  {
         meany <- weighted.mean(y, w)
-        mean.init <- rep(if(length( .i.mu)) .i.mu else
-            {if( .imethod == 2) median(y) else 
-             if ( .imethod == 1) meany else
-             .sinit * meany + (1 - .sinit) * y
-            }, len = n)
+        mean.init <- rep(if (length( .i.mu )) .i.mu else {
+          if ( .imethod == 2) median(y) else 
+          if ( .imethod == 1) meany else
+            .sinit * meany + (1 - .sinit) * y
+        }, len = n)
         etastart <- theta2eta(mean.init, link = .link, earg = .earg)
     }
   }), list( .imethod = imethod, .i.mu = imu,
@@ -1146,13 +1140,13 @@ dtobit <- function(x, mean = 0, sd = 1,
   rm(log)
 
 
-  L <- max(length(x), length(mean), length(sd), length(Lower),
-          length(Upper))
-  x     <- rep(x,     len = L);
-  mean  <- rep(mean,  len = L);
-  sd    <- rep(sd,    len = L);
-  Lower <- rep(Lower, len = L);
-  Upper <- rep(Upper, len = L);
+  L <- max(length(x), length(mean), length(sd),
+           length(Lower), length(Upper))
+  if (length(x)     != L) x     <- rep(x,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sd)    != L) sd    <- rep(sd,    len = L)
+  if (length(Lower) != L) Lower <- rep(Lower, len = L)
+  if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
   ans <- dnorm(x = x, mean = mean, sd = sd, log = log.arg)
   ans[x <  Lower] <- if (log.arg) log(0.0) else 0.0
@@ -1192,13 +1186,13 @@ ptobit <- function(q, mean = 0, sd = 1,
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("argument 'log.p' must be a single logical")
 
-  L <- max(length(q), length(mean), length(sd), length(Lower),
-          length(Upper))
-  q     <- rep(q,     len = L);
-  mean  <- rep(mean,  len = L);
-  sd    <- rep(sd,    len = L);
-  Lower <- rep(Lower, len = L);
-  Upper <- rep(Upper, len = L);
+  L <- max(length(q), length(mean), length(sd),
+           length(Lower), length(Upper))
+  if (length(q)     != L) q     <- rep(q,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sd)    != L) sd    <- rep(sd,    len = L)
+  if (length(Lower) != L) Lower <- rep(Lower, len = L)
+  if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
   ans <- pnorm(q = q, mean = mean, sd = sd, lower.tail = lower.tail)
   ind1 <- (q <  Lower)
@@ -1217,13 +1211,13 @@ ptobit <- function(q, mean = 0, sd = 1,
 qtobit <- function(p, mean = 0, sd = 1,
                   Lower = 0, Upper = Inf) {
 
-  L <- max(length(p), length(mean), length(sd), length(Lower),
-           length(Upper))
-  p     <- rep(p, len = L);
-  mean  <- rep(mean, len = L);
-  sd    <- rep(sd, len = L);
-  Lower <- rep(Lower, len = L);
-  Upper <- rep(Upper, len = L);
+  L <- max(length(p), length(mean), length(sd),
+           length(Lower), length(Upper))
+  if (length(p)     != L) p     <- rep(p,     len = L)
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sd)    != L) sd    <- rep(sd,    len = L)
+  if (length(Lower) != L) Lower <- rep(Lower, len = L)
+  if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
   ans <- qnorm(p = p, mean = mean, sd = sd)
   pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd)
@@ -1248,14 +1242,13 @@ rtobit <- function(n, mean = 0, sd = 1,
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
             stop("bad input for argument 'n'") else n
-  L <- max(use.n, length(mean), length(sd), length(Lower),
-          length(Upper))
-  mean  <- rep(mean,  len = L);
-  sd    <- rep(sd,    len = L);
-  Lower <- rep(Lower, len = L);
-  Upper <- rep(Upper, len = L);
+  L <- use.n
+  if (length(mean)  != L) mean  <- rep(mean,  len = L)
+  if (length(sd)    != L) sd    <- rep(sd,    len = L)
+  if (length(Lower) != L) Lower <- rep(Lower, len = L)
+  if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
   ans <- rnorm(n = use.n, mean = mean, sd = sd)
   cenL <- (ans < Lower)
@@ -1302,7 +1295,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1316,12 +1309,12 @@ tobit.control <- function(save.weight = TRUE, ...) {
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("argument 'nsimEIM' should be an integer greater than 10")
 
-  if(mode(type.fitted) != "character" && mode(type.fitted) != "name")
+  if (mode(type.fitted) != "character" && mode(type.fitted) != "name")
         type.fitted <- as.character(substitute(type.fitted))
   type.fitted <- match.arg(type.fitted,
                            c("uncensored", "censored", "mean.obs"))[1]
@@ -1403,7 +1396,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
       mu.init <-
       sd.init <- matrix(0.0, n, ncoly)
-      for(ii in 1:ncol(y)) {
+      for (ii in 1:ncol(y)) {
         use.i11 <- i11[, ii]
         mylm <- lm.wfit(x = cbind(x[!use.i11, ]),
                        y = y[!use.i11, ii], w = w[!use.i11, ii])
@@ -1474,7 +1467,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .emu
       misc$earg[[Musual*ii  ]] <- .esd
     }
@@ -1593,7 +1586,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
             .Lower = Lower, .Upper = Upper ))),
   weight = eval(substitute(expression({
 
-    wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
 
@@ -1603,12 +1596,12 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     run.varcov <- 0
 
-    for(spp. in 1:ncoly) {
+    for (spp. in 1:ncoly) {
       run.varcov <- 0
       muvec <- mum[, spp.]
       sdvec <- sdm[, spp.]
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rtobit(n = n, mean = muvec, sd = sdvec,
                       Lower = Lowmat[, spp.], Upper = Uppmat[, spp.])
         cenL <- attr(ysim, "cenL")
@@ -1655,19 +1648,19 @@ tobit.control <- function(save.weight = TRUE, ...) {
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-        for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
                       (spp. - 1) * Musual + kay,
                       M = M)
           wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
         }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
     } else {
 
       wz1 <- matrix(0.0, n, dimm(Musual))
-      for(spp. in 1:ncoly) {
+      for (spp. in 1:ncoly) {
         zedd  <- (y[, spp.] - mum[, spp.]) / sdm[, spp.]
         zedd0 <- (            mum[, spp.]) / sdm[, spp.]
         phivec <- dnorm(zedd0)
@@ -1688,14 +1681,14 @@ tobit.control <- function(save.weight = TRUE, ...) {
       wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
                    dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
-      for(jay in 1:Musual)
-          for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+          for (kay in jay:Musual) {
               cptr <- iam((spp. - 1) * Musual + jay,
                           (spp. - 1) * Musual + kay,
                           M = M)
               wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
       }
-      } # End of for(spp.) loop
+      } # End of for (spp.) loop
 
     } # End of EIM
 
@@ -1714,18 +1707,23 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
- normal1 <- function(lmean = "identity", lsd = "loge", lvar = "loge",
-                     var.arg = FALSE,
-                     imethod = 1,
-                     isd = NULL,
-                     parallel = FALSE,
-                     apply.parint = FALSE,
-                     smallno = 1.0e-5,
-                     zero = -2) {
+
+
+ normal1 <-
+ uninormal <- function(lmean = "identity", lsd = "loge", lvar = "loge",
+                       var.arg = FALSE,
+                       imethod = 1,
+                       isd = NULL,
+                       parallel = FALSE,
+                       smallno = 1.0e-5,
+                       zero = -2) {
+
 
 
 
 
+  apply.parint <- FALSE
+
 
   lmean <- as.list(substitute(lmean))
   emean <- link2list(lmean)
@@ -1748,7 +1746,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
       stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(smallno, allowable.length = 1,
+  if (!is.Numeric(smallno, length.arg = 1,
                   positive = TRUE))
       stop("argument 'smallno' must be positive and close to 0")
   if (smallno > 0.1) {
@@ -1756,7 +1754,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
     smallno <- 0.1
   }
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
       stop("argument 'imethod' must be 1 or 2 or 3 or 4")
@@ -1787,8 +1785,11 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
   constraints = eval(substitute(expression({
 
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
-                           apply.int = .apply.parint )
+    constraints <-
+      cm.vgam(matrix(1, M, 1), x = x,
+              bool = .parallel ,
+              constraints = constraints,
+              apply.int = .apply.parint )
 
     dotzero <- .zero
     Musual <- 2
@@ -1870,21 +1871,21 @@ tobit.control <- function(save.weight = TRUE, ...) {
         jfit <- lm.wfit(x = x,  y = y[, jay], w = w[, jay])
         mean.init[, jay] <- if ( .lmean == "loge")
                             pmax(1/1024, y[, jay]) else
-          if( .imethod == 1) median(y[, jay]) else
-          if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
-          if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
+          if ( .imethod == 1) median(y[, jay]) else
+          if ( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
+          if ( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
                              0.5 + y[, jay] * 0.5 else
                                  mean(jfit$fitted)
 
         sdev.init[, jay] <-
-          if( .imethod == 1) {
+          if ( .imethod == 1) {
             sqrt( sum(w[, jay] *
                 (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
-          } else if( .imethod == 2) {
+          } else if ( .imethod == 2) {
             if (jfit$df.resid > 0)
               sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
               sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
-          } else if( .imethod == 3) {
+          } else if ( .imethod == 3) {
             sqrt( sum(w[, jay] * 
                   (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
           } else {
@@ -1950,7 +1951,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", Musual * ncoly)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .emean
       misc$earg[[Musual*ii  ]] <- if ( .var.arg ) .evare else .esdev
     }
@@ -1998,7 +1999,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
            .lmean = lmean,
            .smallno = smallno,
            .var.arg = var.arg ))),
-  vfamily = c("normal1"),
+  vfamily = c("uninormal"),
   deriv = eval(substitute(expression({
     ncoly <- extra$ncoly
     Musual <- extra$Musual
@@ -2040,15 +2041,25 @@ tobit.control <- function(save.weight = TRUE, ...) {
                  if ( .var.arg ) dl.dva * dva.deta else
                                  dl.dsd * dsd.deta)
     ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+
+
+
+
+
+
     ans
   }), list( .lmean = lmean, .lsdev = lsdev, .lvare = lvare,
             .emean = emean, .esdev = esdev, .evare = evare,
             .smallno = smallno,
             .var.arg = var.arg ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is 1-column too
+    wz <- matrix(as.numeric(NA), n, M)  # diag matrix; y is 1-column too
+
+
+
 
     ned2l.dmu2 <- 1 / sdev^2
+
     if ( .var.arg ) {
       ned2l.dva2 <- 0.5 / Varm^2
     } else {
@@ -2064,7 +2075,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
   }), list( .var.arg = var.arg ))))
-}  #  End of normal1()
+}  #  End of uninormal()
 
 
 
@@ -2074,15 +2085,15 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
- normal1.term <-
-  function(linklist = NULL, # list(),
-           earglist = NULL, # list(),
+ normal.vcm <-
+  function(link.list = list("(Default)" = "identity"),
+           earg.list = list("(Default)" = list()),
            lsd = "loge", lvar = "loge",
            esd = list(), evar = list(),
            var.arg = FALSE,
            imethod = 1,
+           icoefficients = NULL,
            isd = NULL,
-           ieta.coeffs = NULL,
            zero = "M") {
 
 
@@ -2090,7 +2101,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
-
+  orig.esd  <- esd
+  orig.evar <- evar
 
   lsd <- as.list(substitute(lsd))
   esd <- link2list(lsd)
@@ -2102,13 +2114,12 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
-
   if (is.character(zero) && zero != "M")
     stop("bad input for argument 'zero'")
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
       stop("argument 'imethod' must be 1 or 2 or 3 or 4")
@@ -2118,11 +2129,13 @@ tobit.control <- function(save.weight = TRUE, ...) {
     stop("argument 'var.arg' must be a single logical")
 
 
-
   new("vglmff",
   blurb = c("Univariate normal distribution with ",
-            "varying coefficients links/constraints\n\n",
+            "varying coefficients\n\n",
             "Links:    ",
+            "G1: g1(coeff:v1), ",
+            "G2: g2(coeff:v2)",
+            ", ..., ",
             if (var.arg)
             namesof("var",  lvar, earg = evar, tag = TRUE) else
             namesof("sd" ,  lsd,  earg = esd,  tag = TRUE), "; ",
@@ -2131,12 +2144,11 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
   constraints = eval(substitute(expression({
 
-
     dotzero <- .zero
     if (is.character(dotzero) && dotzero == "M")
       dotzero <- M
 
-    Musual <- M
+    Musual <- NA
     eval(negzero.expression)
   }), list( .zero = zero 
           ))),
@@ -2148,101 +2160,119 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
   initialize = eval(substitute(expression({
 
-    asgn <- attr(x, "assign")
-    nasgn <- names(asgn)
     asgn2 <- attr(Xm2, "assign")
     nasgn2 <- names(asgn2)
 
 
- print("head(x)")
- print( head(x) )
- print("head(Xm2)")
- print( head(Xm2) )
-
 
- print("attributes(x)")
- print( attributes(x) )
- print("attributes(Xm2)")
- print( attributes(Xm2) )
 
+  link.list.lengths <- unlist(lapply(asgn2, length))
 
 
+  link.list <- .link.list
+  earg.list <- .earg.list
+  if (FALSE) {
+    if (length(link.list) > 0)
+      if (length(nasgn2) != length(names(link.list)) ||
+          !all(sort(nasgn2) == sort(names(link.list))))
+        stop("names of 'link.list' do not match argument 'form2'")
+    if (length(earg.list) > 0)
+      if (length(nasgn2) != length(names(earg.list)) ||
+          !all(sort(nasgn2) == sort(names(earg.list))))
+        stop("names of 'earg.list' do not match argument 'form2'")
+  }
 
 
- print("names(constraints)")
- print( names(constraints) )
- print('nasgn')
- print( nasgn )
- print('nasgn2')
- print( nasgn2 )
 
+  link.list.ordered <- vector("list", ncol(Xm2))
+  earg.list.ordered <- vector("list", ncol(Xm2))
 
-    linklist <- .linklist
-    Linklist <- vector("list", length(nasgn2))
-    names(Linklist) <- nasgn2
-    for (ilocal in 1:length(nasgn2))
-      Linklist[[ilocal]] <- "identity"
-    if (length( linklist ) > 0) {
-      for (ilocal in 1:length(nasgn2))
-        if (any(names(linklist) == nasgn2[ilocal]))
-          Linklist[[ilocal]] <- linklist[[(nasgn2[ilocal])]]
-    }
- print('linklist')
- print( linklist )
- print('Linklist')
- print( Linklist )
- print('unlist(Linklist)')
- print( unlist(Linklist) )
 
 
 
+  if (sum(names(link.list) == "(Default)") > 1)
+    stop("only one default allowed in argument 'link.list'!")
+  if (sum(names(earg.list) == "(Default)") > 1)
+    stop("only one default allowed in argument 'earg.list'!")
+  default.link <- if (any(names(link.list) == "(Default)"))
+    link.list[["(Default)"]] else "identity"
+  default.earg <- if (any(names(earg.list) == "(Default)"))
+    earg.list[["(Default)"]] else list()
 
 
+  names(link.list.ordered) <-
+  names(earg.list.ordered) <- colnames(Xm2)
+  i.ptr <- 1
+  for (jlocal in 1:length(nasgn2)) {
+    for (klocal in 1:link.list.lengths[jlocal]) {
+      link.list.ordered[[i.ptr]] <-
+        if (any(names(link.list) == nasgn2[jlocal]))
+          link.list[[(nasgn2[jlocal])]] else
+          default.link
+      earg.list.ordered[[i.ptr]] <-
+        if (any(names(earg.list) == nasgn2[jlocal]))
+          earg.list[[(nasgn2[jlocal])]] else
+          default.earg
+      i.ptr <- i.ptr + 1
+    }
+  }
+  link.list <- link.list.ordered
+  earg.list <- earg.list.ordered
+  extra$link.list <- link.list
+  extra$earg.list <- earg.list
 
 
-    orig.y <- y
+    
 
+  if (any(is.mlogit <- (unlist(link.list.ordered) == "mlogit"))) {
+    if (sum(is.mlogit) < 2)
+      stop("at least two 'mlogit' links need to be specified, else none")
+    col.index.is.mlogit <- (1:length(is.mlogit))[is.mlogit]
+    extra$col.index.is.mlogit <- col.index.is.mlogit
+    extra$is.mlogit <- is.mlogit
+  }
 
+    
 
 
     temp5 <-
     w.y.check(w = w, y = y,
               ncol.w.max = 1,
-              ncol.y.max = 1,
+              ncol.y.max = 1,  # M-1 ?
               out.wy = TRUE,
-              colsyperw = 1,
+              colsyperw = 1,  # Use M-1, not 1, for plotvgam(y=TRUE)
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
 
 
-    number.mlogit <- sum(unlist(Linklist) == "mlogit")
-    print("number.mlogit")
-    print( number.mlogit )
-    if (number.mlogit == 1)
-      stop('cannot have only one "mlogit"')
 
+    extra$ncoly <- ncoly <- ncol(y)
+    extra$M <- M <- ncol(Xm2) + 1 -
+                    (length(extra$is.mlogit) > 0)
+    Musual <- NA  # Since this cannot be determined apriori.
 
-    ncoly <- ncol(y)
-    Musual <- NA
-    extra$ncoly <- ncoly
     extra$Musual <- Musual
-    M <- ncol(Xm2) - (number.mlogit > 0) + 1
- print("M ,,,,,,,,,")
- print( M )
-    extra$Xm2 <- Xm2
+    extra$Xm2 <- Xm2  # Needed for @linkinv
+    extra$depvar <- y
+
 
 
 
-    cn.Xm2 <- colnames(Xm2)
-    mynames1 <- NULL
-    for (ilocal in 1:length(cn.Xm2))
-      mynames1 <- c(mynames1,
-                namesof(cn.Xm2[ilocal], Linklist[[ilocal]],
-                        list(), tag = FALSE))
 
- print("mynames1")
- print( mynames1 )
+  mynames1 <- paste("coeff:", colnames(Xm2), sep = "")
+
+
+  for (jlocal in 1:length(mynames1)) {
+    mynames1[jlocal] <- namesof(mynames1[jlocal],
+                                link = link.list[[jlocal]],
+                                earg = earg.list[[jlocal]], short = TRUE)
+  }
+  extra$all.mynames1 <- all.mynames1 <- mynames1
+
+  if (LLL <- length(extra$is.mlogit)) {
+    mynames1 <- mynames1[-max(extra$col.index.is.mlogit)]
+  }
 
     mynames2 <- paste(if ( .var.arg ) "var" else "sd",
                       if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -2252,106 +2282,176 @@ tobit.control <- function(save.weight = TRUE, ...) {
           if ( .var.arg ) 
           namesof(mynames2, .lvar  , earg = .evar  , tag = FALSE) else
           namesof(mynames2, .lsd   , earg = .esd   , tag = FALSE))
- print("predictors.names ,,,,,,,,,")
- print( predictors.names )
     extra$predictors.names <- predictors.names
 
 
     if (!length(etastart)) {
-      sdev.init <- mean.init <- matrix(0, n, ncoly)
-      for (jay in 1:ncoly) {
-        jfit <- lm.wfit(x = Xm2,  y = y[, jay], w = w[, jay])
-        mean.init[, jay] <- if ( mynames2 == "loge")
-                            pmax(1/1024, y[, jay]) else
-          if( .imethod == 1) median(y[, jay]) else
-          if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
-          if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
-                             0.5 + y[, jay] * 0.5 else
-                                 mean(jfit$fitted)
 
-        sdev.init[, jay] <-
-          if( .imethod == 1) {
-            sqrt( sum(w[, jay] *
-                (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
-          } else if( .imethod == 2) {
-            if (jfit$df.resid > 0)
-              sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
-              sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
-          } else if( .imethod == 3) {
-            sqrt( sum(w[, jay] * 
-                  (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
-          } else {
-            sqrt( sum(w[, jay] * abs(y[, jay] -
-                                     mean.init[, jay])) / sum(w[, jay]) )
-          }
+      jfit <- lm.wfit(x = Xm2,  y = c(y), w = c(w))
+      jfit.coeff <- jfit$coeff
 
-        if (any(sdev.init[, jay] <= sqrt( .Machine$double.eps ) ))
-          sdev.init[, jay] <- 1.01
 
- print("head(sdev.init[, jay])9")
- print( head(sdev.init[, jay])  )
+
+
+      if (icoefficients.given <- is.numeric( .icoefficients ))
+        jfit.coeff <- rep( .icoefficients , length = length(jfit.coeff))
+
+
+
+      if (!icoefficients.given)
+      for (jlocal in 1:length(nasgn2)) {
+        if (link.list[[jlocal]] %in%
+            c("cauchit", "probit", "cloglog", "logit",
+              "logc", "golf", "polf", "nbolf") &&
+            abs(jfit.coeff[jlocal] - 0.5) >= 0.5)
+          jfit.coeff[jlocal] <- 0.5 +
+            sign(jfit.coeff[jlocal] - 0.5) * 0.25
+
+        if (link.list[[jlocal]] %in% c("rhobit", "fisherz") &&
+            abs(jfit.coeff[jlocal]) >= 1)
+          jfit.coeff[jlocal] <- sign(jfit.coeff[jlocal]) * 0.5
+
+        if (link.list[[jlocal]] == "loglog" &&
+            abs(jfit.coeff[jlocal]) <= 1)
+          jfit.coeff[jlocal] <- 1 + 1/8
+
+        if (link.list[[jlocal]] == "logoff" &&
+            is.numeric(LLL <- (earg.list[[jlocal]])$offset) &&
+            jfit.coeff[jlocal] <= -LLL)
+          jfit.coeff[jlocal] <- max((-LLL) * 1.05,
+                                    (-LLL) * 0.95, -LLL + 1)
+
+        if (link.list[[jlocal]] == "loge" &&
+            jfit.coeff[jlocal] <= 0.001)
+          jfit.coeff[jlocal] <- 1/8
+      }
+
+      if (!icoefficients.given)
+      if (LLL <- length(extra$is.mlogit)) {
+        raw.coeffs <- jfit.coeff[extra$col.index.is.mlogit]
+        possum1 <- (0.01 + abs(raw.coeffs)) / sum(0.01 + abs(raw.coeffs))
+        jfit.coeff[extra$is.mlogit] <- possum1
+      }
+
+
+      thetamat.init <- matrix(jfit.coeff, n, length(jfit.coeff),
+                              byrow = TRUE)
+      etamat.init <- 1 * thetamat.init  # May delete a coln later
+      for (jlocal in 1:ncol(etamat.init)) {
+        earg.use <- if (!length(extra$earg.list)) {
+          list(theta = NULL)
+        } else {
+          extra$earg.list[[jlocal]]
+        }
+
+        if (length(extra$is.mlogit) && !extra$is.mlogit[jlocal])
+          etamat.init[, jlocal] <-
+            theta2eta(thetamat.init[, jlocal],
+                      link = extra$link.list[[jlocal]],
+                      earg = earg.use)
       }
 
+      if (LLL <- length(extra$col.index.is.mlogit)) {
+        etamat.init[, extra$col.index.is.mlogit[-LLL]] <-
+          mlogit(thetamat.init[, extra$col.index.is.mlogit])
+        etamat.init <- etamat.init[, -max(extra$col.index.is.mlogit)]
+      }
+      
+
+      mean.init <- jfit$fitted
+      sdev.init <-
+          if ( .imethod == 1) {
+            sqrt( sum(w * (y - mean.init)^2) / sum(w) )
+          } else if ( .imethod == 2) {
+            if (jfit$df.resid > 0)
+              sqrt( sum(w * jfit$resid^2) / jfit$df.resid ) else
+              sqrt( sum(w * jfit$resid^2) / sum(w) )
+          } else if ( .imethod == 3) {
+            sqrt( sum(w * (y - mean.init)^1.5) / sum(w) )
+          } else {
+            sqrt( sum(w * abs(y - mean.init)) / sum(w) )
+          }
+
+      inflation.factor <- 1.5
+      sdev.init <- sdev.init * inflation.factor
+      sdev.init[sdev.init <= sqrt( .Machine$double.eps )] <- 0.01
 
       if (length( .isdev )) {
         sdev.init <- matrix( .isdev , n, ncoly, byrow = TRUE)
       }
 
-
       etastart <-
-        cbind(eta.equi.probs,
+        cbind(etamat.init,  # eta.equi.probs,
               if ( .var.arg )
               theta2eta(sdev.init^2, .lvar , earg = .evar ) else
               theta2eta(sdev.init  , .lsd  , earg = .esd  ))
 
       colnames(etastart) <- predictors.names
- print("head(etastart)9")
- print( head(etastart) )
-
-      new.coeffs <- weighted.mean(y, w)
-      extra$new.coeffs <- new.coeffs 
-
     }
-  }), list( .linklist = linklist,
-            .earglist = earglist,
+  }), list( .link.list = link.list,
+            .earg.list = earg.list,
             .lsd = lsd, .lvar = lvar,
             .esd = esd, .evar = evar,
+            .orig.esd = orig.esd, .orig.evar = orig.evar,
+            .var.arg = var.arg,
             .isdev = isd,
-            .ieta.coeffs = ieta.coeffs,
-            .var.arg = var.arg, .imethod = imethod ))),
+            .icoefficients = icoefficients,
+            .imethod = imethod ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
- print("hi9")
-
     M <- ncol(eta)
-    betas.matrix <- 1 / (1 + exp(-eta[, -M, drop = FALSE]))
-    betas.matrix <- cbind(betas.matrix,
-                          1 / (1 + rowSums(exp(eta[, -M, drop = FALSE]))))
- print("head(betas.matrix)1")
- print( head(betas.matrix) )
 
-      betas.matrix <- cbind(extra$new.coeffs[1], betas.matrix)
+  coffs <- eta[, -M, drop = FALSE]
+
+  if (LLL <- length(extra$col.index.is.mlogit)) {
+    last.one <- extra$col.index.is.mlogit[LLL]
+    coffs <- cbind(coffs[, 1:(last.one-1)],
+                   probs.last.mlogit = 0,  # \eta_j\equiv0 for last "mlogit"
+                   if (last.one == M) NULL else
+                   coffs[, last.one:ncol(coffs)])
+    colnames(coffs) <- extra$all.mynames1 
+  }
+
+
+  for (jlocal in 1:ncol(coffs)) {
+    earg.use <- if (!length(extra$earg.list[[jlocal]])) {
+      list(theta = NULL)
+    } else {
+      extra$earg.list[[jlocal]]
+    }
 
- print("head(betas.matrix)2")
- print( head(betas.matrix) )
- print("head(extra$Xm2)")
- print( head(extra$Xm2) )
+    if (length(extra$is.mlogit) && !extra$is.mlogit[jlocal]) {
+      iskip <- (jlocal > max(extra$col.index.is.mlogit))
+      coffs[, jlocal] <- eta2theta(eta[, jlocal - iskip],
+                                   link = extra$link.list[[jlocal]],
+                                   earg = earg.use)
+    }
+  }
 
 
+    if (LLL <- length(extra$col.index.is.mlogit)) {
+      coffs[, extra$col.index.is.mlogit] <-
+        mlogit(eta[, extra$col.index.is.mlogit[-LLL], drop = FALSE],
+               inverse = TRUE)
+    }
 
-    rowSums(extra$Xm2 * betas.matrix)
-  }, list( .linklist = linklist,
-           .earglist = earglist,
+    rowSums(extra$Xm2 * coffs)
+  }, list( .link.list = link.list,
+           .earg.list = earg.list,
            .esd = esd , .evar = evar ))),
 
   last = eval(substitute(expression({
     Musual <- extra$Musual
-    misc$link <- c(rep( "mlogit", length = M - 1),
-                   rep( .lsd   , length = ncoly))
-    temp.names <- c(mynames1, mynames2)
-    names(misc$link) <- temp.names
 
 
+    misc$link <- c(link.list.ordered,
+                   "sd" = if ( .var.arg ) .lvar else .lsd )
+
+
+    temp.earg.list <- c(earg.list.ordered,
+                        "sd" = if ( .var.arg ) list( .orig.evar ) else
+                                               list( .orig.esd  ))
+    misc$earg <- temp.earg.list
 
 
     misc$var.arg <- .var.arg
@@ -2359,21 +2459,23 @@ tobit.control <- function(save.weight = TRUE, ...) {
     misc$expected <- TRUE
     misc$imethod <- .imethod
     misc$multipleResponses <- FALSE
-  }), list( .linklist = linklist,
-            .earglist = earglist,
+    misc$icoefficients <- .icoefficients
+  }), list( .link.list = link.list,
+            .earg.list = earg.list,
             .lsd = lsd, .lvar = lvar,
             .esd = esd, .evar = evar,
+            .orig.esd = orig.esd, .orig.evar = orig.evar,
+            .icoefficients = icoefficients,
             .var.arg = var.arg, .imethod = imethod ))),
 
+
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    ncoly <- extra$ncoly
-    Musual <- 1 # extra$Musual
     if ( .var.arg ) {
-      Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvar , earg = .evar )
+      Varm <- eta2theta(eta[, ncol(eta)], .lvar , earg = .evar )
       sdev <- sqrt(Varm)
     } else {
-      sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd  , earg = .esd  )
+      sdev <- eta2theta(eta[, ncol(eta)], .lsd  , earg = .esd  )
     }
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
@@ -2382,56 +2484,86 @@ tobit.control <- function(save.weight = TRUE, ...) {
   }, list( .lsd = lsd, .lvar = lvar,
            .esd = esd, .evar = evar,
            .var.arg = var.arg ))),
-  vfamily = c("normal1.term"),
-  deriv = eval(substitute(expression({
- print("------ in @ deriv -------------")
-    extra$new.coeffs <- new.coeffs
+  vfamily = c("normal.vcm"),
+
 
-    ncoly <- extra$ncoly
-    Musual <- 1 # extra$Musual
+
+  deriv = eval(substitute(expression({
 
     if ( .var.arg ) {
-      Varm <- eta2theta(eta[, Musual*(1:ncoly)    ], .lvar , earg = .evar )
+      Varm <- eta2theta(eta[, M], .lvar , earg = .evar )
       sdev <- sqrt(Varm)
     } else {
-      sdev <- eta2theta(eta[, Musual*(1:ncoly)    ], .lsd  , earg = .esd  )
+      sdev <- eta2theta(eta[, M], .lsd  , earg = .esd  )
     }
 
+    zedd <- (y - mu) / sdev
+    dl.dmu <- c(zedd / sdev)  #   dl.dmu <- (y - mymu) / sdev^2
+
+    dmu.dcoffs <- Xm2
+
+    mymu <- mu
+
 
+    coffs <- eta[, -M, drop = FALSE]  # Exclude log(sdev) or log(var)
 
-    betas.matrix <- 1 / (1 + exp(-eta[, -M, drop = FALSE]))
-    betas.matrix <- cbind(betas.matrix,
-                          1 / (1 + rowSums(exp(eta[, -M, drop = FALSE]))))
- print("head(betas.matrix)5")
- print( head(betas.matrix) )
+    if (LLL <- length(extra$is.mlogit)) {
+      last.one <- max(extra$col.index.is.mlogit)
+      coffs <- cbind(coffs[, 1:(last.one-1)],
+                     probsLastmlogit = 0,  # \eta_j\equiv0 for last "mlogit"
+                     if (last.one == M) NULL else
+                     coffs[, last.one:ncol(coffs)])
+      colnames(coffs) <- extra$all.mynames1
+    }
 
-    if ( !extra$sum1.intercept &&
-          any(colnames(extra$X_LM) == "(Intercept)"))
-      betas.matrix <- cbind(extra$new.coeffs[1], betas.matrix)
+    dcoffs.deta <- coffs  # Includes any last "mlogit"
 
- print("head(betas.matrix)6")
- print( head(betas.matrix) )
- print("head(extra$Xm2)")
- print( head(extra$Xm2) )
+    for (jlocal in 1:ncol(coffs)) {
+      earg.use <- if (!length(extra$earg.list[[jlocal]])) {
+        list(theta = NULL)
+      } else {
+        extra$earg.list[[jlocal]]
+      }
+
+      if (!length(extra$is.mlogit) ||
+          !extra$is.mlogit[jlocal]) {
+        iskip <- length(extra$is.mlogit) &&
+                 (jlocal  > max(extra$col.index.is.mlogit))
+        coffs[, jlocal] <- eta2theta(eta[, jlocal - iskip],
+                                     link = extra$link.list[[jlocal]],
+                                     earg = earg.use)
+      }
+    }
+
+    if (LLL <- length(extra$col.index.is.mlogit)) {
+      coffs[, extra$col.index.is.mlogit] <-
+        mlogit(eta[, extra$col.index.is.mlogit[-LLL], drop = FALSE],
+               inverse = TRUE)
+    }
+
+
+  for (jlocal in 1:ncol(coffs)) {
+    if (!length(extra$is.mlogit) ||
+        !extra$is.mlogit[jlocal]) {
+      earg.use <- if (!length(extra$earg.list[[jlocal]])) {
+        list(theta = NULL)
+      } else {
+        extra$earg.list[[jlocal]]
+      }
+      dcoffs.deta[, jlocal] <-
+        dtheta.deta(coffs[, jlocal], 
+                    link = extra$link.list[[jlocal]],
+                    earg = earg.use)
+    }
+  }
 
-    use.x <- if ( sum1.intercept )
-             Xm2[, -ncol(Xm2), drop = FALSE] else
-             Xm2[, -c(1, ncol(Xm2)), drop = FALSE]
-    mymu <- rowSums(Xm2 * betas.matrix)
-    dMu.deta <- mymu * (1 - mymu) * use.x
 
 
- print("head(mymu)9")
- print( head(mymu) )
- print("head(dMu.deta)9")
- print( head(dMu.deta) )
     if ( .var.arg ) {
       dl.dva <- -0.5 / Varm + 0.5 * (y - mymu)^2 / sdev^4
     } else {
       dl.dsd <- -1.0 / sdev +       (y - mymu)^2 / sdev^3
     }
-    dl.dmu <- (y - mymu) / sdev^2
-
 
     if ( .var.arg ) {
       dva.deta <- dtheta.deta(Varm, .lvar , earg = .evar )
@@ -2439,66 +2571,95 @@ tobit.control <- function(save.weight = TRUE, ...) {
       dsd.deta <- dtheta.deta(sdev, .lsd  , earg = .esd )
     }
 
-    ans <- c(w) *
-           cbind(dl.dmu * dMu.deta,
-                 if ( .var.arg ) dl.dva * dva.deta else
-                                 dl.dsd * dsd.deta)
- print("head(deriv.ans)9")
- print( head(ans) )
-    ans
-  }), list( .linklist = linklist, .lsd = lsd, .lvar = lvar,
-            .earglist = earglist, .esd = esd, .evar = evar,
+    
+    dMu.deta <- dmu.dcoffs * dcoffs.deta  # n x pLM, but may change below
+    if (LLL <- length(extra$col.index.is.mlogit)) {
+      dMu.deta[, extra$col.index.is.mlogit[-LLL]] <-
+         coffs[, extra$col.index.is.mlogit[-LLL]] *
+        (dmu.dcoffs[, extra$col.index.is.mlogit[-LLL]] -
+         rowSums(dmu.dcoffs[, extra$col.index.is.mlogit]  *
+                      coffs[, extra$col.index.is.mlogit]))
+      dMu.deta <- dMu.deta[, -extra$col.index.is.mlogit[LLL]]
+    }
+    
+
+    dl.deta <- if ( .var.arg )
+               c(w) * cbind(dl.dmu * dMu.deta,
+                            "var" = c(dl.dva * dva.deta)) else
+               c(w) * cbind(dl.dmu * dMu.deta,
+                            "sd"  = c(dl.dsd * dsd.deta))
+ 
+    dl.deta
+  }), list( .link.list = link.list, .lsd = lsd, .lvar = lvar,
+            .earg.list = earg.list, .esd = esd, .evar = evar,
             .var.arg = var.arg ))),
-  weight = eval(substitute(expression({
- print("------ in @ weight -------------")
-    wz <- matrix(0, n, dimm(M)) # diag matrix; y is 1-column too
- print("head(wz)")
- print( head(wz) )
 
-    if ( .var.arg ) {
-      ned2l.dva2 <- 0.5 / Varm^2
-    } else {
-      ned2l.dsd2 <- 2 / sdev^2
-    }
+      
 
 
 
+  weight = eval(substitute(expression({
+    wz <- matrix(0.0, n, dimm(M))  # Treated as a general full matrix
 
 
     wz[, iam(M, M, M = M)] <- if ( .var.arg ) {
+      ned2l.dva2 <- 0.5 / Varm^2
       ned2l.dva2 * dva.deta^2
     } else {
+      ned2l.dsd2 <- 2 / sdev^2
       ned2l.dsd2 * dsd.deta^2
     }
 
 
+
+    if (length(extra$col.index.is.mlogit)) {
+      LLL <- max(extra$col.index.is.mlogit)
+      dmu.dcoffs <- dmu.dcoffs[, -LLL]
+      dcoffs.deta <- dcoffs.deta[, -LLL]
+    }
+
+
     index <- iam(NA, NA, M  , both = TRUE, diag = TRUE)
     indtw <- iam(NA, NA, M-1, both = TRUE, diag = TRUE)
- print("index")
- print( index )
- print("indtw")
- print( indtw )
+    ned2l.dmu2 <- 1 / sdev^2
 
-    
-    twz <- dMu.deta[, indtw$row.index, drop = FALSE] *
-           dMu.deta[, indtw$col.index, drop = FALSE]
- print("head(twz)9------------------------------------------------")
- print( head(twz) )
+ 
+
+
+
+ 
+    if ((LLL <- length(extra$col.index.is.mlogit))) {
+       dmu.dcoffs[, extra$col.index.is.mlogit[-LLL]] <-
+         dMu.deta[, extra$col.index.is.mlogit[-LLL]]
+      dcoffs.deta[, extra$col.index.is.mlogit[-LLL]] <- 1
+     }
+  
+    twz  <- crossprod(dmu.dcoffs * sqrt(c(w))) / sum(w)
 
+    twz <- matrix(twz[cbind(indtw$row.index,
+                            indtw$col.index)],
+                  n, dimm(M-1), byrow = TRUE)
+    if (length(indtw$row.index) != dimm(M-1))
+      stop("dim of twz incorrect")
 
-    for (ilocal in 1:ncol(twz))
-      wz[, iam(index$row.index[ilocal],
-               index$col.index[ilocal], M = M)] <-
+    twz <- twz *
+           dcoffs.deta[, indtw$row.index, drop = FALSE] *
+           dcoffs.deta[, indtw$col.index, drop = FALSE] *
+           ned2l.dmu2
+
+    for (ilocal in 1:length(indtw$row.index))
+      wz[, iam(indtw$row.index[ilocal],
+               indtw$col.index[ilocal], M = M)] <-
      twz[, iam(indtw$row.index[ilocal],
                indtw$col.index[ilocal], M = M-1)]
 
 
- print("head(wz)9------------------------------------------------")
- print( head(wz) )
-
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+    c(w) * wz
   }), list( .var.arg = var.arg ))))
-} # End of normal1.term()
+}  # End of normal.vcm()
+
+
+
 
 
 
@@ -2597,7 +2758,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
   }), list( .lmulog = lmulog, .lsdlog = lsdlog,
             .emulog = emulog, .esdlog = esdlog ))),
   weight = expression({
-    wz <- matrix(as.numeric(NA), n, 2) # Diagonal!
+    wz <- matrix(as.numeric(NA), n, 2)  # Diagonal!
     ned2l.dmulog2 <- 1 / sdlog^2
     ned2l.dsdlog2 <- 2 * ned2l.dmulog2
 
@@ -2672,7 +2833,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
       } else {
         pvalue.vec <- NULL
         powers.try <- .powers.try
-        for(delta in 10^powers.try) {
+        for (delta in 10^powers.try) {
           pvalue.vec <- c(pvalue.vec,
                          shapiro.test(sample(log(y-miny+delta),
                          size=min(5000, length(y ))))$p.value) 
@@ -2769,15 +2930,15 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
-dsnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
+dskewnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("bad input for argument 'scale'")
+
+
   zedd <- (x - location) / scale
   loglik <- log(2) + dnorm(zedd, log = TRUE) +
             pnorm(shape * zedd, log.p = TRUE) - log(scale)
@@ -2790,12 +2951,12 @@ dsnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
 
 
 
-rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
+rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
 
   rho <- shape / sqrt(1 + shape^2)
   u0 <- rnorm(n)
-  v <- rnorm(n)
-  u1 <- rho*u0 + sqrt(1 - rho^2) * v
+  v  <- rnorm(n)
+  u1 <- rho * u0 + sqrt(1 - rho^2) * v
   ans <- location + scale * ifelse(u0 >= 0, u1, -u1)
   ans[scale <= 0] <- NA
   ans
@@ -2804,9 +2965,11 @@ rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
 
 
 
- skewnormal1 <- function(lshape = "identity",
-                         ishape = NULL,
-                         nsimEIM = NULL) {
+
+
+ skewnormal <- function(lshape = "identity",
+                        ishape = NULL,
+                        nsimEIM = NULL) {
 
 
   lshape <- as.list(substitute(lshape))
@@ -2815,7 +2978,7 @@ rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
 
 
   if (length(nsimEIM) &&
-     (!is.Numeric(nsimEIM, allowable.length = 1,
+     (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10))
     stop("argument 'nsimEIM' should be an integer greater than 10")
@@ -2849,8 +3012,8 @@ rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
       namesof("shape", .lshape , earg = .eshape , tag = FALSE)
 
     if (!length(etastart)) {
-      init.shape <- if (length( .ishape))
-        rep( .ishape, len = n) else {
+      init.shape <- if (length( .ishape ))
+        rep( .ishape , len = n) else {
         temp <- y
         index <- abs(y) < sqrt(2/pi)-0.01
         temp[!index] <- y[!index]
@@ -2862,10 +3025,13 @@ rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
   }), list( .lshape = lshape, .eshape = eshape,
             .ishape = ishape ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lshape, earg = .eshape)
+    alpha <- eta2theta(eta, .lshape , earg = .eshape )
     alpha * sqrt(2/(pi * (1+alpha^2 )))
   }, list( .eshape = eshape, .lshape = lshape ))),
   last = eval(substitute(expression({
+
+
+
     misc$link <-    c(shape = .lshape) 
 
     misc$earg <- list(shape = .eshape )
@@ -2876,60 +3042,62 @@ rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
             .nsimEIM = nsimEIM ))),
   linkfun = eval(substitute(function(mu, extra = NULL) {
     alpha <- mu / sqrt(2/pi - mu^2)
-    theta2eta(alpha, .lshape, earg = .eshape)
+    theta2eta(alpha, .lshape , earg = .eshape )
   }, list( .eshape = eshape, .lshape = lshape ))),
   loglikelihood = eval(substitute(
-     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-        alpha <- eta2theta(eta, .lshape, earg = .eshape)
-    if (residuals) stop("loglikelihood residuals not ",
-                        "implemented yet") else {
-      sum(c(w) * dsnorm(x = y, location = 0, scale = 1,
-                        shape = alpha, log = TRUE))
-    }
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+      alpha <- eta2theta(eta, .lshape , earg = .eshape )
+      if (residuals) stop("loglikelihood residuals not ",
+                          "implemented yet") else {
+        sum(c(w) * dskewnorm(x = y, location = 0, scale = 1,
+                             shape = alpha, log = TRUE))
+      }
   }, list( .eshape = eshape, .lshape = lshape ))), 
-  vfamily = c("skewnormal1"),
+  vfamily = c("skewnormal"),
   deriv = eval(substitute(expression({
-    alpha <- eta2theta(eta, .lshape, earg = .eshape)
+    alpha <- eta2theta(eta, .lshape , earg = .eshape )
 
     zedd <- y*alpha
     tmp76 <- pnorm(zedd)
     tmp86 <- dnorm(zedd)
     dl.dshape <- tmp86 * y / tmp76
 
-    dshape.deta <- dtheta.deta(alpha, .lshape, earg = .eshape)
+    dshape.deta <- dtheta.deta(alpha, .lshape , earg = .eshape )
 
     c(w) * dl.dshape * dshape.deta
-  }), list( .eshape = eshape, .lshape = lshape ))),
+  }), list( .eshape = eshape,
+            .lshape = lshape ))),
   weight = eval(substitute(expression({
     if ( length( .nsimEIM )) {
       run.mean <- 0
-      for(ii in 1:( .nsimEIM)) {
-          ysim <- rsnorm(n, location = 0, scale = 1, shape = alpha)
-          zedd <- ysim*alpha
-          tmp76 <- pnorm(zedd)
-          tmp86 <- dnorm(zedd)
-          d2l.dshape2 <- -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
-          rm(ysim)
-          run.mean <- ((ii-1) * run.mean + d2l.dshape2) / ii
+      for (ii in 1:( .nsimEIM)) {
+        ysim <- rsnorm(n, location = 0, scale = 1, shape = alpha)
+        zedd <- ysim*alpha
+        tmp76 <- pnorm(zedd)
+        tmp86 <- dnorm(zedd)
+        d2l.dshape2 <- -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
+        rm(ysim)
+        run.mean <- ((ii-1) * run.mean + d2l.dshape2) / ii
       }
       if (intercept.only)
         run.mean <- mean(run.mean)
       wz <-  -c(w) * (dshape.deta^2) * run.mean
     } else {
-      d2shape.deta2 <- d2theta.deta2(alpha, .lshape, earg = .eshape)
+      d2shape.deta2 <- d2theta.deta2(alpha, .lshape , earg = .eshape )
       d2l.dshape2 <- -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
       wz <- -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
       wz <- c(w) * wz
     }
     wz
-  }), list( .eshape = eshape, .lshape = lshape, .nsimEIM = nsimEIM ))))
+  }), list( .eshape = eshape,
+            .lshape = lshape, .nsimEIM = nsimEIM ))))
 }
 
 
 
 
 if (FALSE)
- halfnormal1 <-
+ halfuninormal <-
   function(lsd = "loge", lvar = "loge",
            var.arg = FALSE,
            imethod = 1,
@@ -2962,7 +3130,7 @@ if (FALSE)
       stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
       stop("argument 'imethod' must be 1 or 2 or 3 or 4")
@@ -2991,7 +3159,9 @@ if (FALSE)
 
   constraints = eval(substitute(expression({
 
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = .apply.parint )
 
     dotzero <- .zero
@@ -3069,21 +3239,21 @@ if (FALSE)
         jfit <- lm.wfit(x = x,  y = y[, jay], w = w[, jay])
         mean.init[, jay] <- if ( .lmean == "loge")
                             pmax(1/1024, y[, jay]) else
-          if( .imethod == 1) median(y[, jay]) else
-          if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
-          if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
+          if ( .imethod == 1) median(y[, jay]) else
+          if ( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
+          if ( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
                              0.5 + y[, jay] * 0.5 else
                                  mean(jfit$fitted)
 
         sdev.init[, jay] <-
-          if( .imethod == 1) {
+          if ( .imethod == 1) {
             sqrt( sum(w[, jay] *
                 (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
-          } else if( .imethod == 2) {
+          } else if ( .imethod == 2) {
             if (jfit$df.resid > 0)
               sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
               sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
-          } else if( .imethod == 3) {
+          } else if ( .imethod == 3) {
             sqrt( sum(w[, jay] * 
                   (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
           } else {
@@ -3132,7 +3302,7 @@ if (FALSE)
 
     misc$earg <- vector("list", Musual * ncoly)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii  ]] <- if ( .var.arg ) .evar else .esd
     }
     names(misc$earg) <- temp.names
@@ -3166,7 +3336,7 @@ if (FALSE)
   }, list( .lsd = lsd, .lvar = lvar,
            .esd = esd, .evar = evar,
            .var.arg = var.arg ))),
-  vfamily = c("halfnormal1"),
+  vfamily = c("halfuninormal"),
   deriv = eval(substitute(expression({
     ncoly <- extra$ncoly
     Musual <- extra$Musual
@@ -3201,7 +3371,7 @@ if (FALSE)
             .emean = emean, .esd = esd, .evar = evar,
             .var.arg = var.arg ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is 1-column too
+    wz <- matrix(as.numeric(NA), n, M)  # diag matrix; y is 1-column too
 
     ned2l.dmu2 <- 1 / sdev^2
     if ( .var.arg ) {
diff --git a/R/family.others.R b/R/family.others.R
index b788d64..63dc0b8 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -317,7 +317,7 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 50)
       stop("argument 'nsimEIM' should be an integer greater than 50")
@@ -442,7 +442,7 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
 
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
         ysim <- rgenray(n = n, shape = shape, scale = Scale)
 
         temp1 <- ysim / Scale
@@ -572,7 +572,7 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -616,7 +616,7 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
       scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
                       rep( .iscale , len = n)
                     } else {
-                      stats::sd(c(y)) # The papers scale parameter beta
+                      stats::sd(c(y))  # The papers scale parameter beta
                     }
 
       shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
@@ -707,7 +707,7 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
         ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
 
         if (length( .nsimEIM )) {
-            for(ii in 1:( .nsimEIM )) {
+            for (ii in 1:( .nsimEIM )) {
                 ysim <- rexpgeom(n, scale=Scale, shape=shape)
 
                 temp2 <- exp(-ysim / Scale)
@@ -745,6 +745,8 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
 
 
 
+
+
 dexplog <- function(x, scale = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -752,7 +754,7 @@ dexplog <- function(x, scale = 1, shape, log = FALSE) {
 
 
   N <- max(length(x), length(scale), length(shape))
-  x <- rep(x, len = N)
+  x     <- rep(x,     len = N)
   scale <- rep(scale, len = N)
   shape <- rep(shape, len = N)
 
@@ -810,14 +812,16 @@ rexplog <- function(n, scale = 1, shape) {
 
 
 
-explogarithmic.control <- function(save.weight = TRUE, ...) {
+
+explogff.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
- explogarithmic <- function(lscale = "loge", lshape = "logit",
-                            iscale = NULL,   ishape = NULL,
-                            tol12 = 1.0e-05, zero = 1,
-                            nsimEIM = 400) {
+
+ explogff <- function(lscale = "loge", lshape = "logit",
+                      iscale = NULL,   ishape = NULL,
+                      tol12 = 1.0e-05, zero = 1,
+                      nsimEIM = 400) {
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -843,7 +847,7 @@ explogarithmic.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 50)
@@ -940,7 +944,7 @@ explogarithmic.control <- function(save.weight = TRUE, ...) {
   }, list( .lscale = lscale , .lshape = lshape ,
            .escale = escale , .eshape = eshape ))),
 
-  vfamily = c("explogarithmic"),
+  vfamily = c("explogff"),
 
   deriv = eval(substitute(expression({
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -971,31 +975,32 @@ explogarithmic.control <- function(save.weight = TRUE, ...) {
         ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
 
         if (length( .nsimEIM )) {
-            for(ii in 1:( .nsimEIM )) {
-                ysim <- rexplog(n, scale=Scale, shape=shape)
-
-                temp2 <- exp(-ysim / Scale)
-                temp3 <- ysim / Scale^2
-                temp4 <- 1 - shape
-                dl.dscale <- (-1 / Scale) + temp3 + (temp4 * temp3 *
-                             temp2) / (1 - temp4 * temp2)
-                dl.dshape <- -1 / (shape * log(shape)) - 1 / temp4 -
-                             temp2 / (1 - temp4 * temp2)
-
-                temp6 <- cbind(dl.dscale, dl.dshape)
-                run.varcov <- run.varcov +
-                           temp6[,ind1$row.index] *
-                           temp6[,ind1$col.index]
-            }
-
-            run.varcov <- run.varcov / .nsimEIM
-
-            wz <- if (intercept.only)
+          for (ii in 1:( .nsimEIM )) {
+            ysim <- rexplog(n, scale = Scale, shape = shape)
+
+            temp2 <- exp(-ysim / Scale)
+            temp3 <- ysim / Scale^2
+            temp4 <- 1 - shape
+            dl.dscale <- (-1 / Scale) + temp3 + (temp4 * temp3 *
+                         temp2) / (1 - temp4 * temp2)
+            dl.dshape <- -1 / (shape * log(shape)) - 1 / temp4 -
+                         temp2 / (1 - temp4 * temp2)
+
+            temp6 <- cbind(dl.dscale, dl.dshape)
+            run.varcov <- run.varcov +
+                       temp6[,ind1$row.index] *
+                       temp6[,ind1$col.index]
+          }
+
+          run.varcov <- run.varcov / .nsimEIM
+
+          wz <- if (intercept.only)
                 matrix(colMeans(run.varcov),
-                       n, ncol(run.varcov), byrow = TRUE) else run.varcov
+                       n, ncol(run.varcov), byrow = TRUE) else
+                run.varcov
 
-            wz <- wz * dthetas.detas[, ind1$row] *
-                      dthetas.detas[, ind1$col]
+          wz <- wz * dthetas.detas[, ind1$row] *
+                    dthetas.detas[, ind1$col]
         }
 
     c(w) * wz
@@ -1091,7 +1096,7 @@ ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
 
  zedd <- (q - location) / scale
 
-  s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar) #/ scale
+  s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar)  #/ scale
   s2 <- skewpar + (1 - skewpar) *
         pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
  
@@ -1153,12 +1158,12 @@ rtpn <- function(n, location = 0, scale = 1, skewpar = 0.5) {
 tpnff <- function(llocation = "identity", lscale = "loge",
                   pp = 0.5, method.init = 1,  zero = 2)
 {
-  if (!is.Numeric(method.init, allowable.length = 1,
+  if (!is.Numeric(method.init, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       method.init > 4)
      stop("argument 'imethod' must be 1 or 2 or 3 or 4")
 
-  if (!is.Numeric(pp, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(pp, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'pp'")
 
 
@@ -1289,7 +1294,7 @@ tpnff <- function(llocation = "identity", lscale = "loge",
             .elocat = elocat, .escale = escale,
             .pp      = pp ))),
   weight = eval(substitute(expression({
-    wz   <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-col too
+    wz   <- matrix(as.numeric(NA), n, M)  # diag matrix; y is one-col too
     temp10 <- mypp * (1 - mypp)
     ned2l.dlocat2        <- 1 / ((4 * temp10) * myscale^2)
     ned2l.dscale2        <- 2 /  myscale^2
@@ -1314,7 +1319,7 @@ tpnff3 <- function(llocation = "identity",
                     lskewpar = "identity",
                     method.init = 1,  zero = 2)
 {
-  if (!is.Numeric(method.init, allowable.length = 1,
+  if (!is.Numeric(method.init, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       method.init > 4)
     stop("argument 'imethod' must be 1 or 2 or 3 or 4")
@@ -1458,7 +1463,7 @@ tpnff3 <- function(llocation = "identity",
             .elocat = elocat, .escale = escale, .eskewp = eskewp
             ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # diag matrix; y is one-col too
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # diag matrix; y is one-col too
    
     temp10 <- myskew * (1 - myskew)
 
diff --git a/R/family.positive.R b/R/family.positive.R
index 5585ec0..17d08af 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -16,69 +16,96 @@
 N.hat.posbernoulli <-
   function(eta, link, earg = list(),
            R = NULL, w = NULL,
-           X_vlm = NULL, Hlist = NULL,
+           X.vlm = NULL, Hlist = NULL,
            extra = list(),
-           model.type = c("b", "t", "tb")
+           model.type = c("0", "b", "t", "tb")
           ) {
 
 
 
-  if (!is.null(w) && !all(1 == w))
-    warning("estimate of N may be wrong when prior weights ",
-            "are not all unity")
 
-  model.type <- match.arg(model.type, c("b", "t", "tb"))[1]
 
+  if (!is.null(w) && !all(w[1] == w))
+    warning("estimate of N may be wrong when prior weights ",
+            "are not all the same")
+
+  model.type <- match.arg(model.type, c("0", "b", "t", "tb"))[1]
+  if (!is.matrix(eta))
+    eta <- as.matrix(eta)  # May be needed for "0"
+ 
   tau <-
     switch(model.type,
+           "0"  = extra$tau,
            "b"  = extra$tau,
            "t"  = ncol(eta),
            "tb" = (ncol(eta) + 1) / 2)
   if (length(extra$tau) && extra$tau != tau)
     warning("variable 'tau' is mistaken")  # Checking only
 
+
   jay.index <-
     switch(model.type,
-           "b"  = rep(1, length = tau),  # Subset: 1 out of 1:2
+           "0"  = rep(1, length = tau),
+           "b"  = rep(1, length = tau),  # Subset: 2 out of 1:2
            "t"  = 1:tau,  # All of them
-           "tb" = 1:tau)  # Subset: first tau of them out of M = 2*tau-1
+           "tb" = 1:tau)  # Subset: first tau of them out of M = 2*tau-2
+
   prc <- eta2theta(eta[, jay.index], link, earg = earg)  # cap.probs
+  prc <- as.matrix(prc)  # Might be needed for Mtb(tau=2).
+
+
+ 
+  if (FALSE && model.type == "tb") {
+    if (tau == 2)
+      prc <- cbind(prc, 1 - prc)
+    if (tau >  3)
+      stop("cannot handle tau > 3 yet")
+    jay.index <- 1:tau  # 'Restore' it coz its used below. zz??
+  }
+  
   QQQ <- exp(rowSums(log1p(-prc)))
   pibbeta <- exp(log1p(-QQQ))  # One.minus.QQQ
   N.hat <- sum(1 / pibbeta)  # Point estimate
   ss2 <- sum(QQQ / pibbeta^2)  # Assumes bbeta is known
 
 
+  if (length(extra$p.small) &&
+     any(pibbeta < extra$p.small) &&
+     !extra$no.warning)
+    warning("The abundance estimation for this model can be unstable")
+
+
   if (length(R)) {
 
-    dvect <- matrix(0, length(pibbeta), ncol = ncol(X_vlm))
+    dvect <- matrix(0, length(pibbeta), ncol = ncol(X.vlm))
     M <- nrow(Hlist[[1]])
-    n_lm <- nrow(X_vlm) / M  # Number of rows of the LM matrix
+    n.lm <- nrow(X.vlm) / M  # Number of rows of the LM matrix
     dprc.deta <- dtheta.deta(prc, link, earg = earg)
     Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
     for (jay in 1:tau) {
-      lapred.index <- jay.index[jay]
-      Index0 <- Hmatrices[lapred.index, ] != 0
-      X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + lapred.index, Index0,
+      linpred.index <- jay.index[jay]
+      Index0 <- Hmatrices[linpred.index, ] != 0
+      X.lm.jay <- X.vlm[(0:(n.lm - 1)) * M + linpred.index, Index0,
                         drop = FALSE]
 
       dvect[, Index0] <-
-      dvect[, Index0] + (QQQ / (1-prc[, jay])) * dprc.deta[, jay] * X_lm_jay
+      dvect[, Index0] +
+        (QQQ / (1-prc[, jay])) * dprc.deta[, jay] * X.lm.jay
     }
 
 
    dvect <- dvect * (-1 / pibbeta^2)
    dvect <- colSums(dvect)  # Now a vector
 
-    ncol_X_vlm <- nrow(R)
-    rinv <- diag(ncol_X_vlm)
+    ncol.X.vlm <- nrow(R)
+    rinv <- diag(ncol.X.vlm)
     rinv <- backsolve(R, rinv)
-    rowlen <- drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
+    rowlen <- drop(((rinv^2) %*% rep(1, ncol.X.vlm))^0.5)
     covun <- rinv %*% t(rinv)
     vecTF <- FALSE
     for (jay in 1:tau) {
-      lapred.index <- jay.index[jay]
-      vecTF <- vecTF | (Hmatrices[lapred.index, ] != 0)
+      linpred.index <- jay.index[jay]
+      vecTF <- vecTF | (Hmatrices[linpred.index, ] != 0)
     }
     vecTF.index <- (1:length(vecTF))[vecTF]
     covun <- covun[vecTF.index, vecTF.index, drop = FALSE]
@@ -86,17 +113,19 @@ N.hat.posbernoulli <-
   }
  
   list(N.hat    = N.hat,
-       SE.N.hat = if (length(R)) sqrt(ss2 + t(dvect) %*% covun %*% dvect) else
-                                 sqrt(ss2)
+       SE.N.hat = if (length(R))
+                    c(sqrt(ss2 + t(dvect) %*% covun %*% dvect)) else
+                    c(sqrt(ss2))
       )
 }
 
 
 
 
-aux.posbernoulli <- function(y, check.y = FALSE) {
-
-
+ aux.posbernoulli.t <-
+  function(y, check.y = FALSE,
+           rename = TRUE,
+           name = "bei") {
 
 
 
@@ -111,10 +140,15 @@ aux.posbernoulli <- function(y, check.y = FALSE) {
       stop("response 'y' must contain 0s and 1s only")
   }
 
-  zeddij <- cbind(0, t(apply(y, 1, cumsum))) # tau + 1 columns
-  zij <- (0 + (zeddij > 0))[, 1:tau] # 0 or 1.
-  if (length(colnames(y)))
-    colnames(zij) <- colnames(y)
+  zeddij <- cbind(0, t(apply(y, 1, cumsum)))  # tau + 1 columns
+  zij <- (0 + (zeddij > 0))[, 1:tau]  # 0 or 1.
+  if (rename) {
+    colnames(zij) <- paste(name, 1:ncol(y), sep = "")
+  } else {
+    if (length(colnames(y)))
+      colnames(zij) <- colnames(y)
+  }
+
 
   cp1 <- numeric(nrow(y))
   for (jay in tau:1)
@@ -123,8 +157,8 @@ aux.posbernoulli <- function(y, check.y = FALSE) {
     warning("some individuals were never captured!")
 
   yr1i <- zeddij[, tau + 1] - 1
-  list(cap.hist1 = zij,
-       cap1      = cp1, # aka ti1
+  list(cap.hist1 = zij,  # A matrix of the same dimension as 'y'
+       cap1      = cp1,  # Aka ti1
        y0i       = cp1 - 1,
        yr0i      = tau - cp1 - yr1i,
        yr1i      = yr1i)
@@ -142,9 +176,9 @@ aux.posbernoulli <- function(y, check.y = FALSE) {
 rposbern <-
   function(n, nTimePts = 5, pvars = length(xcoeff),
            xcoeff = c(-2, 1, 2),
-           cap.effect = -1,
-           link = "logit",
+           cap.effect =  1,
            is.popn = FALSE,
+           link = "logit",
            earg.link = FALSE) {
 
 
@@ -154,9 +188,10 @@ rposbern <-
 
 
 
+
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
                stop("bad input for argument 'n'") else n
   orig.n <- use.n
   if (!is.popn)
@@ -185,7 +220,9 @@ rposbern <-
 
   CHmatrix <- matrix(0, use.n, nTimePts,
                      dimnames = list(as.character(1:use.n),
-                                     paste("ch", 0:(nTimePts-1), sep = "")))
+                                     paste("ch", 1:(nTimePts  ),
+                                           sep = "")))
+
 
   Xmatrix <- cbind(x1 = rep(1.0, len = use.n))
   if (pvars > 1)
@@ -216,6 +253,7 @@ rposbern <-
   }
 
 
+
   index0 <- (sumrowy == 0)
   if (all(!index0))
     stop("bug in this code: cannot handle no animals being caught")
@@ -223,13 +261,11 @@ rposbern <-
    Xmatrix <-  Xmatrix[!index0, , drop = FALSE]
   CHmatrix <- CHmatrix[!index0, , drop = FALSE]
 
-  zCHmatrix <- matrix(0, nrow(CHmatrix), ncol(CHmatrix),
-                      dimnames = list(as.character(1:nrow(CHmatrix)),
-                      paste("zch", 0:(ncol(CHmatrix)-1), sep = "")))
 
 
-  ans <- data.frame(Ymatrix, Xmatrix, CHmatrix, zCHmatrix,
-                    Chistory = rep(0, length = nrow(Ymatrix)))
+
+  ans <- data.frame(Ymatrix, Xmatrix, CHmatrix  # zCHmatrix,
+                   )
 
 
   if (!is.popn) {
@@ -303,9 +339,9 @@ dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
 
 
   LLL <- max(length(x), length(prob), length(size))
-  x    <- rep(x,    len = LLL);
-  prob <- rep(prob, len = LLL);
-  size <- rep(size, len = LLL);
+  if (length(x)    != LLL) x    <- rep(x,    len = LLL)
+  if (length(prob) != LLL) prob <- rep(prob, len = LLL)
+  if (length(size) != LLL) size <- rep(size, len = LLL)
 
   ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
   index0 <- (x == 0)
@@ -333,9 +369,9 @@ pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
   }
   L <- max(length(q), length(prob), length(size))
   if (length(q)    != L)
-    q    <- rep(q,    length.out = L);
+    q    <- rep(q,    length.out = L)
   if (length(prob) != L)
-    prob <- rep(prob, length.out = L);
+    prob <- rep(prob, length.out = L)
   if (length(size) != L)
     size <- rep(size, length.out = L)
 
@@ -384,13 +420,13 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
                             nsimEIM = 250,
                             shrinkage.init = 0.95, imethod = 1) {
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
       stop("bad input for argument 'isize'")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -405,7 +441,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
   lsize <- attr(esize, "function.name")
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE))
     stop("argument 'nsimEIM' must be a positive integer")
   if (nsimEIM <= 30)
@@ -473,7 +509,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
 
     if (!length(etastart)) {
       mu.init <- y
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         use.this <- if ( .imethod == 1) {
           weighted.mean(y[, iii], w[, iii])
         } else {
@@ -493,7 +529,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
               }
             k.grid <- 2^((-6):6)
             kmat0 <- matrix(0, nrow = n, ncol = NOS)
-            for(spp. in 1:NOS) {
+            for (spp. in 1:NOS) {
               kmat0[, spp.] <- getMaxMin(k.grid,
                                 objfun = posnegbinomial.Loglikfun,
                                 y = y[, spp.], x = x, w = w[, spp.],
@@ -533,7 +569,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", Musual*NOS)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .emunb
       misc$earg[[Musual*ii  ]] <- .esize
     }
@@ -618,7 +654,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
 
     {
       ind2 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
         dim(ysim) <- c(n, NOS)
 
@@ -628,7 +664,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
                     (ysim + kmat) / (munb + kmat) + 1 + log(tempk) +
                     df0.dkmat / oneminusf0
 
-        for(kk in 1:NOS) {
+        for (kk in 1:NOS) {
           temp2 <- cbind(dl.dmunb[, kk],
                          dl.dsize[, kk]) *
                    cbind(dmunb.deta[, kk],
@@ -669,8 +705,9 @@ pposgeom <- function(q, prob) {
   if (!is.Numeric(prob, positive = TRUE))
     stop("bad input for argument 'prob'")
   L <- max(length(q), length(prob))
-  if (length(q)    != L) q    = rep(q,    length.out = L);
-  if (length(prob) != L) prob = rep(prob, length.out = L);
+  if (length(q)    != L) q    <- rep(q,    length.out = L)
+  if (length(prob) != L) prob <- rep(prob, length.out = L)
+
   ifelse(q < 1, 0,
         (pgeom(q, prob) -
          dgeom(0, prob))
@@ -716,7 +753,8 @@ dpospois <- function(x, lambda, log = FALSE) {
   if (!is.Numeric(lambda, positive = TRUE))
     stop("bad input for argument 'lambda'")
   L <- max(length(x), length(lambda))
-  x <- rep(x, len = L); lambda <- rep(lambda, len = L); 
+  if (length(x)      != L) x      <- rep(x,      len = L)
+  if (length(lambda) != L) lambda <- rep(lambda, len = L)
 
   ans <- if (log.arg) {
     ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
@@ -732,8 +770,8 @@ ppospois <- function(q, lambda) {
   if (!is.Numeric(lambda, positive = TRUE))
     stop("bad input for argument 'lambda'")
   L <- max(length(q), length(lambda))
-  if (length(q)      != L) q      <- rep(q,      length.out = L);
-  if (length(lambda) != L) lambda <- rep(lambda, length.out = L);
+  if (length(q)      != L) q      <- rep(q,      length.out = L)
+  if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
 
   ifelse(q < 1, 0,
         (ppois(q, lambda) -
@@ -794,7 +832,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
   if (length( ilambda) && !is.Numeric(ilambda, positive = TRUE))
     stop("bad input for argument 'ilambda'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -874,7 +912,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$Musual <- Musual
@@ -928,9 +966,9 @@ pposbinom <- function(q, size, prob
   if (!is.Numeric(prob, positive = TRUE)) 
     stop("no zero or non-numeric values allowed for argument 'prob'")
   L <- max(length(q), length(size), length(prob))
-  if (length(q)      != L) q      <- rep(q,      length.out = L);
-  if (length(size)   != L) size   <- rep(size,   length.out = L);
-  if (length(prob)   != L) prob   <- rep(prob,   length.out = L);
+  if (length(q)      != L) q      <- rep(q,      length.out = L)
+  if (length(size)   != L) size   <- rep(size,   length.out = L)
+  if (length(prob)   != L) prob   <- rep(prob,   length.out = L)
 
   ifelse(q < 1, 0,
         (pbinom(q = q, size = size, prob = prob) -
@@ -970,9 +1008,9 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
   L <- max(length(x), length(size), length(prob))
-  x    <- rep(x,    len = L);
-  size <- rep(size, len = L);
-  prob <- rep(prob, len = L);
+  if (length(x)      != L) x    <- rep(x,    len = L)
+  if (length(size)   != L) size <- rep(size, len = L)
+  if (length(prob)   != L) prob <- rep(prob, len = L)
 
   answer <- NaN * x
   is0 <- (x == 0)
@@ -999,7 +1037,14 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
  posbinomial <-
   function(link = "logit",
-           mv = FALSE, parallel = FALSE, zero = NULL) {
+           mv = FALSE, parallel = FALSE,
+           omit.constant = FALSE,
+
+           p.small = 1e-4, no.warning = FALSE,
+
+           zero = NULL) {
+
+
 
 
   link <- as.list(substitute(link))
@@ -1011,11 +1056,18 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   if (!is.logical(mv) || length(mv) != 1)
     stop("bad input for argument 'mv'")
 
+  if (!is.logical(omit.constant) || length(omit.constant) != 1)
+    stop("bad input for argument 'omit.constant'")
+
   if (mv && length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
 
+  if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'p.small'")
+
+
   new("vglmff",
   blurb = c("Positive-binomial distribution\n\n",
             "Links:    ",
@@ -1026,7 +1078,9 @@ dposbinom <- function(x, size, prob, log = FALSE) {
             namesof("prob", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+                           bool = .parallel , 
+                           constraints = constraints)
 
     dotzero <- .zero
     Musual <- 1
@@ -1034,8 +1088,12 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(Musual = 1,
-         zero = .zero)
-  }, list( .zero = zero ))),
+         p.small    = .p.small ,
+         no.warning = .no.warning ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .p.small    = p.small,
+           .no.warning = no.warning ))),
 
   initialize = eval(substitute(expression({
 
@@ -1059,12 +1117,16 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     extra$Musual <- Musual
     M <- Musual * ncoly
 
+
+    extra$p.small    <- .p.small
+    extra$no.warning <- .no.warning
+
       extra$orig.w <- w
-      mustart <- matrix(colSums(y) / colSums(w), # Not colSums(y * w)...
+      mustart <- matrix(colSums(y) / colSums(w),  # Not colSums(y * w)...
                         n, ncoly, byrow = TRUE)
 
     } else {
-      eval(binomialff(link = .earg , # earg = .earg ,
+      eval(binomialff(link = .earg ,  # earg = .earg ,
                       earg.link = TRUE)@initialize)
     }
 
@@ -1077,11 +1139,12 @@ dposbinom <- function(x, size, prob, log = FALSE) {
       } else {
         paste("prob", 1:M, sep = "")
       }
-      predictors.names <- namesof(if (M > 1) dn2 else
-        "prob", .link , earg = .earg, short = TRUE)
+      predictors.names <-
+        namesof(if (M > 1) dn2 else "prob",
+                .link , earg = .earg, short = TRUE)
 
       w <- matrix(w, n, ncoly)
-      y <- y / w # Now sample proportion
+      y <- y / w  # Now sample proportion
     } else {
       predictors.names <-
         namesof("prob", .link , earg = .earg , tag = FALSE)
@@ -1094,7 +1157,20 @@ dposbinom <- function(x, size, prob, log = FALSE) {
       etastart <- cbind(theta2eta(mustart.use, .link , earg = .earg ))
     }
     mustart <- NULL
+
+
+
+    nvec <- if (ncol(as.matrix(y)) > 1) {
+              NULL
+            } else {
+              if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+              round(w)
+            }
+    extra$tau <- if (length(nvec) && length(unique(nvec) == 1))
+                   nvec[1] else NULL
   }), list( .link = link,
+            .p.small    = p.small,
+            .no.warning = no.warning,
             .earg = earg, .mv = mv ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -1119,39 +1195,65 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
-
+    misc$omit.constant <- .omit.constant
+    misc$needto.omit.constant <- TRUE  # Safety mechanism
+    
+    
     misc$mv   <- .mv
     w <- as.numeric(w)
-  }), list( .link = link, .earg = earg, .mv = mv ))),
+
+
+
+if (length(extra$tau)) {
+    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
+    R[lower.tri(R)] <- 0
+    tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
+                               R = R, w = w,
+                               X.vlm = X.vlm.save, Hlist = constraints,
+                               extra = extra, model.type = "0")
+    extra$N.hat    <- tmp6$N.hat
+    extra$SE.N.hat <- tmp6$SE.N.hat
+}
+
+    
+  }), list( .link = link, .earg = earg, .mv = mv,
+            .omit.constant = omit.constant ))),
 
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
 
       ycounts <- if ( .mv ) {
                   round(y * extra$orig.w)
-                } else {
-                  if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-                  y * w # Convert proportions to counts
-                }
+                 } else {
+                   if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+                   y * w  # Convert proportions to counts
+                 }
       nvec <- if ( .mv ) {
-               w
-             } else {
-               if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
-                 round(w)
-             }
+                w
+              } else {
+                if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+                  round(w)
+              }
       use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else 1
     binprob <- eta2theta(eta, .link , earg = .earg )
 
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
-      sum(use.orig.w * dposbinom(x = ycounts, size = nvec,
-                                 prob = binprob, log = TRUE))
+
+      answer <- sum(use.orig.w * dposbinom(x = ycounts, size = nvec,
+                                           prob = binprob, log = TRUE))
+      if ( .omit.constant ) {
+        answer <- answer - sum(use.orig.w * lchoose(n = nvec, k = ycounts))
+      }
+      answer
     }
-  }, list( .link = link, .earg = earg, .mv = mv ))),
+  }, list( .link = link, .earg = earg,
+          .mv = mv,
+          .omit.constant = omit.constant ))),
 
   vfamily = c("posbinomial"),
   deriv = eval(substitute(expression({
@@ -1198,15 +1300,25 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
  posbernoulli.t <-
   function(link = "logit",
-           parallel.t = FALSE,
-           apply.parint = TRUE,
-           iprob = NULL) {
 
+           parallel.t = FALSE ~ 1,
+
+
+
+           iprob = NULL,
+
+           p.small = 1e-4, no.warning = FALSE) {
 
 
 
 
 
+
+
+  apply.parint <- FALSE
+
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -1221,18 +1333,23 @@ dposbinom <- function(x, size, prob, log = FALSE) {
       length(apply.parint) != 1)
     stop("argument 'apply.parint' must be a single logical")
 
+  if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'p.small'")
+
 
   new("vglmff",
-  blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model ",
-            "with temporal effects (M_t)\n\n",
+  blurb = c("Positive-Bernoulli (capture-recapture) model ",
+            "with temporal effects (M_{t}/M_{th})\n\n",
             "Links:    ",
             namesof("prob1", link, earg = earg, tag = FALSE), ", ",
             namesof("prob2", link, earg = earg, tag = FALSE), ", ..., ",
             namesof("probM", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel.t , constraints,
-                           apply.int = .apply.parint , #  TRUE,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+                           bool = .parallel.t , 
+                           constraints = constraints,
+                           apply.int = .apply.parint ,  #  TRUE,
                            cm.default = diag(M),
                            cm.intercept.default = diag(M))
   }), list( .parallel.t = parallel.t,
@@ -1240,9 +1357,13 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   infos = eval(substitute(function(...) {
     list(Musual = 1,
          multipleResponses = TRUE,
+         p.small    = .p.small ,
+         no.warning = .no.warning ,
          apply.parint = .apply.parint ,
          parallel.t = .parallel.t )
-  }, list( .parallel.t = parallel.t,
+  }, list( .parallel.t   = parallel.t,
+           .p.small    = p.small,
+           .no.warning = no.warning,          
            .apply.parint = apply.parint ))),
 
   initialize = eval(substitute(expression({
@@ -1251,9 +1372,14 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     mustart.orig <- mustart
     y <- as.matrix(y)
     M <- ncoly <- ncol(y)
+    extra$ncoly       <- ncoly <- ncol(y)
     extra$tau <- tau <- ncol(y)
     extra$orig.w <- w
 
+    extra$p.small    <- .p.small
+    extra$no.warning <- .no.warning
+    
+
     w <- matrix(w, n, ncoly)
     mustart <- matrix(colSums(y) / colSums(w),
                     n, ncol(y), byrow = TRUE)
@@ -1297,13 +1423,21 @@ dposbinom <- function(x, size, prob, log = FALSE) {
       etastart <- cbind(theta2eta(mustart.use, .link , earg = .earg ))
     }
     mustart <- NULL
-  }), list( .link = link, .earg = earg ))),
+  }), list( .link = link, .earg = earg,
+            .p.small    = p.small,
+            .no.warning = no.warning
+           ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    tau <- extra$ncoly
     probs <- eta2theta(eta, .link , earg = .earg )
     logAA0 <- rowSums(log1p(-probs))
     AA0 <- exp(logAA0)
     AAA <- exp(log1p(-AA0))  # 1 - AA0
-    probs / AAA
+
+
+
+    fv <- probs / AAA
+    fv
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     extra$w   <- NULL   # Kill it off 
@@ -1314,19 +1448,18 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M) misc$earg[[ii]] <- .earg
+    for (ii in 1:M) misc$earg[[ii]] <- .earg
 
 
     misc$mv           <- TRUE
     misc$iprob        <- .iprob
 
 
-
-    R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
     R[lower.tri(R)] <- 0
     tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
                                R = R, w = w,
-                               X_vlm = X_vlm_save, Hlist = constraints,
+                               X.vlm = X.vlm.save, Hlist = constraints,
                                extra = extra, model.type = "t")
     extra$N.hat    <- tmp6$N.hat
     extra$SE.N.hat <- tmp6$SE.N.hat
@@ -1351,20 +1484,17 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
 
-      sum(dposbern(x = ycounts, # size = 1, # Bernoulli trials
+      sum(dposbern(x = ycounts,  # size = 1,  # Bernoulli trials
                    prob = probs, prob0 = probs, log = TRUE))
 
 
       sum(use.orig.w *
-          dposbern(x = ycounts, # size = 1, # Bernoulli trials
+          dposbern(x = ycounts,  # size = 1,  # Bernoulli trials
                    prob = probs, prob0 = probs, log = TRUE))
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.t"),
   deriv = eval(substitute(expression({
-
- 
-
     probs <- eta2theta(eta, .link , earg = .earg )
     dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
 
@@ -1372,38 +1502,34 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     AA0 <- exp(logAA0)
     AAA <- exp(log1p(-AA0))  # 1 - AA0
 
-    B_s <- AA0 / (1 - probs)
-    B_st <- array(AA0, c(n, M, M))
-    for(slocal in 1:(M-1))
-      for(tlocal in (slocal+1):M)
-        B_st[, slocal, tlocal] =
-        B_st[, tlocal, slocal] <- B_s[, slocal] / (1 - probs[, tlocal])
-
-
-
+    B.s <- AA0 / (1 - probs)
+    B.st <- array(AA0, c(n, M, M))
+    for (slocal in 1:(M-1))
+      for (tlocal in (slocal+1):M)
+        B.st[, slocal, tlocal] <-
+        B.st[, tlocal, slocal] <- B.s[, slocal] / (1 - probs[, tlocal])
 
     temp2 <-     (1 - probs)^2
+    dl.dprobs <- y / probs - (1 - y) / (1 - probs) - B.s / AAA
 
-    dl.dprobs <- y / probs - (1 - y) / (1 - probs) - B_s / AAA
-
-    deriv.ans <- w * dl.dprobs * dprobs.deta
+    deriv.ans <- c(w) * dl.dprobs * dprobs.deta
     deriv.ans
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
 
-    ed2l.dprobs2 <- 1 / (probs * AAA) + 1 / temp2 -
-                probs / (AAA * temp2) - (B_s / AAA)^2
+    ned2l.dprobs2 <- 1 / (probs * AAA) + 1 / temp2 -
+                     probs / (AAA * temp2) - (B.s / AAA)^2
 
     wz <- matrix(as.numeric(NA), n, dimm(M))
-    wz[, 1:M] <- ed2l.dprobs2 * (dprobs.deta^2)
+    wz[, 1:M] <- ned2l.dprobs2 * (dprobs.deta^2)
 
-    for(slocal in 1:(M-1))
-      for(tlocal in (slocal+1):M)
+    for (slocal in 1:(M-1))
+      for (tlocal in (slocal+1):M)
         wz[, iam(slocal, tlocal, M = M)] <- dprobs.deta[, slocal] *
                                             dprobs.deta[, tlocal] *
-                                            (B_st[,slocal,tlocal] +
-                                             B_s [,slocal] *
-                                             B_s [,tlocal] / AAA) / (-AAA)
+                                            (B.st[,slocal,tlocal] +
+                                             B.s [,slocal] *
+                                             B.s [,tlocal] / AAA) / (-AAA)
 
 
 
@@ -1415,63 +1541,89 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
 
+
  posbernoulli.b <-
   function(link = "logit",
-           parallel.b = FALSE,  # TRUE,
-           apply.parint = TRUE,
-           icap.prob = NULL,
-           irecap.prob = NULL
-          ) {
 
 
+           drop.b = FALSE ~ 1,
+
+
+           type.fitted = c("likelihood.cond", "mean.uncond"),
 
+           I2 = FALSE,
+           ipcapture = NULL,
+           iprecapture = NULL,
+           p.small = 1e-4, no.warning = FALSE
+           ) {
 
-  fit.type <- 1  # Currently only this is implemented
+
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("likelihood.cond", "mean.uncond"))[1]
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
+  apply.parint.b <- FALSE
+
+
+  if (length(ipcapture))
+  if (!is.Numeric(ipcapture, positive = TRUE) ||
+        max(ipcapture) >= 1)
+    stop("argument 'ipcapture' must have values in (0, 1)")
+  if (length(iprecapture))
+  if (!is.Numeric(iprecapture, positive = TRUE) ||
+        max(iprecapture) >= 1)
+    stop("argument 'iprecapture' must have values in (0, 1)")
 
-  if (length(icap.prob))
-  if (!is.Numeric(icap.prob, positive = TRUE) ||
-        max(icap.prob) >= 1)
-    stop("argument 'icap.prob' must have values in (0, 1)")
-  if (length(irecap.prob))
-  if (!is.Numeric(irecap.prob, positive = TRUE) ||
-        max(irecap.prob) >= 1)
-    stop("argument 'irecap.prob' must have values in (0, 1)")
+  if (!is.logical(I2) ||
+      length(I2) != 1)
+    stop("argument 'I2' must be a single logical")
 
-  if (!is.logical(parallel.b) ||
-      length(parallel.b) != 1)
-    stop("argument 'parallel.b' must be a single logical")
+
+  if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'p.small'")
+
+ 
 
 
   new("vglmff",
-  blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model ",
-            "with behavioural effects (M_b)\n\n",
+  blurb = c("Positive-Bernoulli (capture-recapture) model ",
+            "with behavioural effects (M_{b}/M_{bh})\n\n",
             "Links:    ",
-            namesof("cap.prob",   link, earg = earg, tag = FALSE), ", ",
-            namesof("recap.prob", link, earg = earg, tag = FALSE),
+            namesof("pcapture",   link, earg = earg, tag = FALSE), ", ",
+            namesof("precapture", link, earg = earg, tag = FALSE),
             "\n"),
 
   constraints = eval(substitute(expression({
 
+    cm.intercept.default <- if ( .I2 ) diag(2) else cbind(0:1, 1)
+
     constraints <- cm.vgam(matrix(1, 2, 1), x = x,
-                           bool = .parallel.b ,
+                           bool = .drop.b ,
                            constraints = constraints,
-                           apply.int = .apply.parint ,  # TRUE, 
-                           cm.default = matrix(1, 2, 1),
-                           cm.intercept.default = cbind(1, 0:1))
-  }), list( .parallel.b = parallel.b,
-            .apply.parint = apply.parint ))),
+                           apply.int = .apply.parint.b ,  # TRUE, 
+                           cm.default = cm.intercept.default,  # diag(2),
+                           cm.intercept.default = cm.intercept.default)
+  }), list( .drop.b = drop.b,
+            .I2 = I2,
+            .apply.parint.b = apply.parint.b ))),
 
   infos = eval(substitute(function(...) {
-    list( Musual = 2,
-         apply.parint = .apply.parint ,
+    list(Musual = 2,
+         p.small    = .p.small ,
+         no.warning = .no.warning ,
+         type.fitted = .type.fitted ,
+         apply.parint.b = .apply.parint.b ,
          multipleResponses = FALSE)
   }, list(
-           .apply.parint = apply.parint
+           .apply.parint.b = apply.parint.b,
+           .p.small    = p.small,
+           .no.warning = no.warning,
+           .type.fitted = type.fitted
          ))),
 
   initialize = eval(substitute(expression({
@@ -1484,12 +1636,23 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
     orig.y <- y
     extra$orig.w <- w
-    extra$tau <- tau <- ncol(y)
+    extra$tau     <- tau   <- ncol(y)
+    extra$ncoly   <- ncoly <- ncol(y)
+    extra$type.fitted      <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
+
+
+    extra$p.small    <- .p.small
+    extra$no.warning <- .no.warning
+
+
+    
+
     mustart.orig <- mustart
     M <- 2
 
 
-    tmp3 <- aux.posbernoulli(y)
+    tmp3 <- aux.posbernoulli.t(y, rename = FALSE)
     y0i        <- extra$y0i  <-       tmp3$y0i
     yr0i       <- extra$yr0i <-       tmp3$yr0i
     yr1i       <- extra$yr1i <-       tmp3$yr1i
@@ -1517,12 +1680,8 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
     predictors.names <-
-      c(namesof(  "cap.prob",  .link , earg = .earg, short = TRUE),
-        namesof("recap.prob",  .link , earg = .earg, short = TRUE))
-
-    if (tau >= 4) {
-      pbd <- posbern.aux(tau = tau)
-    }
+      c(namesof(  "pcapture",  .link , earg = .earg, short = TRUE),
+        namesof("precapture",  .link , earg = .earg, short = TRUE))
 
     if (!length(etastart)) {
       mustart.use <- if (length(mustart.orig)) {
@@ -1535,40 +1694,73 @@ dposbinom <- function(x, size, prob, log = FALSE) {
         cbind(theta2eta(rowMeans(mustart.use), .link , earg = .earg ),
               theta2eta(rowMeans(mustart.use), .link , earg = .earg ))
 
-      if (length(   .icap.prob ))
-        etastart[, 1] <- theta2eta(   .icap.prob , .link , earg = .earg )
-      if (length( .irecap.prob ))
-        etastart[, 2] <- theta2eta( .irecap.prob , .link , earg = .earg )
+      if (length(   .ipcapture ))
+        etastart[, 1] <- theta2eta(   .ipcapture , .link , earg = .earg )
+      if (length( .iprecapture ))
+        etastart[, 2] <- theta2eta( .iprecapture , .link , earg = .earg )
     }
     mustart <- NULL
   }), list( .link = link, .earg = earg,
-              .icap.prob =   icap.prob,
-            .irecap.prob = irecap.prob
+            .type.fitted = type.fitted,
+            .p.small    = p.small,
+            .no.warning = no.warning,
+            .ipcapture =   ipcapture,
+            .iprecapture = iprecapture
           ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
     rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
-    cap.probs <- matrix(cap.probs, nrow(eta), extra$tau)
-    rec.probs <- matrix(rec.probs, nrow(eta), extra$tau)
     tau <- extra$tau
+    prc <- matrix(cap.probs, nrow(eta), tau)
+    prr <- matrix(rec.probs, nrow(eta), tau)
+    logQQQ <- rowSums(log1p(-prc))
+    QQQ <- exp(logQQQ)
+    AAA <- exp(log1p(-QQQ))  # 1 - QQQ
+
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'likelihood.cond'.")
+                     "likelihood.cond"
+                   }
+
+
+    type.fitted <- match.arg(type.fitted,
+                             c("likelihood.cond", "mean.uncond"))[1]
+
+
+    if ( type.fitted == "likelihood.cond") {
+      probs.numer <- prr 
+      mat.index <- cbind(1:nrow(prc), extra$cap1)
+      probs.numer[mat.index] <- prc[mat.index]
+      probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
+      fv <- probs.numer / AAA
 
-    if ( .fit.type == 1) {
-      fv <- rec.probs
-      mat.index <- cbind(1:nrow(fv), extra$cap1)
-      fv[mat.index] <- cap.probs[mat.index]
-      fv[extra$cap.hist1 == 0] <- cap.probs[extra$cap.hist1 == 0]
-    } else if ( .fit.type == 2) {
-      fv <- cap.probs
-    } else if ( .fit.type == 3) {
-      fv <- rec.probs
-    } else if ( .fit.type == 4) {
-      stop("argument 'fit.type' unmatched")
     } else {
-      stop("argument 'fit.type' unmatched")
+
+
+      fv <- prc - prr
+      for (jay in 2:tau)
+        fv[, jay] <- fv[, jay-1] * (1 - cap.probs)
+      fv <- (fv + prr) / AAA
     }
-    fv
+
+
+
+    ans <- fv
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
   }, list( .link = link,
-           .fit.type = fit.type,
+           .type.fitted = type.fitted,
            .earg = earg ))),
   last = eval(substitute(expression({
 
@@ -1582,51 +1774,52 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
     misc$expected    <- TRUE
     misc$mv          <- TRUE
-    misc$icap.prob   <- .icap.prob
-    misc$irecap.prob <- .irecap.prob
-    misc$parallel.b  <- .parallel.b
-    misc$fit.type    <- .fit.type
+    misc$ipcapture   <- .ipcapture
+    misc$iprecapture <- .iprecapture
+    misc$drop.b      <- .drop.b
     misc$multipleResponses <- FALSE
-    if (tau >= 4) {
-      misc$pbd       <- pbd  # Needed for vcov() post-analysis.
-    }
-    misc$apply.parint <- .apply.parint
+    misc$apply.parint.b <- .apply.parint.b
 
 
 
-    R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
     R[lower.tri(R)] <- 0
     tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
                                R = R, w = w,
-                               X_vlm = X_vlm_save, Hlist = constraints,
+                               X.vlm = X.vlm.save, Hlist = constraints,
                                extra = extra, model.type = "b")
     extra$N.hat    <- tmp6$N.hat
     extra$SE.N.hat <- tmp6$SE.N.hat
 
 
   }), list( .link = link, .earg = earg,
-            .fit.type = fit.type,
-            .parallel.b = parallel.b,
-            .icap.prob =   icap.prob,
-            .irecap.prob = irecap.prob,
-            .apply.parint = apply.parint
+            .drop.b = drop.b,
+            .ipcapture =   ipcapture,
+            .iprecapture = iprecapture,
+            .apply.parint.b = apply.parint.b
           ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
 
+    tau <- extra$ncoly
     ycounts <- y
     use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
 
     cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
     rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
-    cap.probs <- matrix(cap.probs, nrow(eta), extra$tau)
-    rec.probs <- matrix(rec.probs, nrow(eta), extra$tau)
+    prc <- matrix(cap.probs, nrow(eta), tau)
+    prr <- matrix(rec.probs, nrow(eta), tau)
 
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
+      probs.numer <- prr 
+      mat.index <- cbind(1:nrow(prc), extra$cap1)
+      probs.numer[mat.index] <- prc[mat.index]
+      probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
+
       sum(use.orig.w *
           dposbern(x = ycounts,  # Bernoulli trials
-                   prob = mu, prob0 = cap.probs, log = TRUE))
+                   prob = probs.numer, prob0 = prc, log = TRUE))
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.b"),
@@ -1658,310 +1851,196 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
   weight = eval(substitute(expression({
 
-    wz <- matrix(0, n, M) # Diagonal EIM
-
-
-    if (tau == 2)
-      wz[, iam(2, 2, M = M)] <- (cap.probs / (rec.probs * (1 - rec.probs) *
-                                 (1 - QQQ))) * drecprobs.deta^2
-    if (tau == 3)
-      wz[, iam(2, 2, M = M)] <- (cap.probs * (3 - cap.probs) / (
-                                 rec.probs * (1 - rec.probs) *
-                                 (1 - QQQ))) * drecprobs.deta^2
-
-
-    if (tau >= 4) {
-                                   # rec.probs = rec.probs)
-      eim.rec.tot <- 0
-      for (ii in 1:nrow(pbd$part1.rec)) {
-        if (pbd$ml..konst.rec[ii, 1] != 0)
-          eim.rec.tot <- eim.rec.tot +
-          pbd$ml..konst.rec[ii, 1] * ((  cap.probs)^pbd$part1.rec[ii, 1] *
-                                      (1-cap.probs)^pbd$part1.rec[ii, 2] *
-                                      (  rec.probs)^pbd$part1.rec[ii, 3] *
-                                      (1-rec.probs)^pbd$part1.rec[ii, 4])
-        if (pbd$ml..konst.rec[ii, 2] != 0)
-          eim.rec.tot <- eim.rec.tot +
-          pbd$ml..konst.rec[ii, 2] * ((  cap.probs)^pbd$part2.rec[ii, 1] *
-                                      (1-cap.probs)^pbd$part2.rec[ii, 2] *
-                                      (  rec.probs)^pbd$part2.rec[ii, 3] *
-                                      (1-rec.probs)^pbd$part2.rec[ii, 4])
-        if (pbd$ml..konst.rec[ii, 3] != 0)
-          eim.rec.tot <- eim.rec.tot +
-          pbd$ml..konst.rec[ii, 3] * ((  cap.probs)^pbd$part3.rec[ii, 1] *
-                                      (1-cap.probs)^pbd$part3.rec[ii, 2] *
-                                      (  rec.probs)^pbd$part3.rec[ii, 3] *
-                                      (1-rec.probs)^pbd$part3.rec[ii, 4])
-        if (pbd$ml..konst.rec[ii, 4] != 0)
-          eim.rec.tot <- eim.rec.tot +
-          pbd$ml..konst.rec[ii, 4] * ((  cap.probs)^pbd$part4.rec[ii, 1] *
-                                      (1-cap.probs)^pbd$part4.rec[ii, 2] *
-                                      (  rec.probs)^pbd$part4.rec[ii, 3] *
-                                      (1-rec.probs)^pbd$part4.rec[ii, 4])
-      }
-      eim.rec.tot <- (eim.rec.tot / (1 - QQQ)) * drecprobs.deta^2
-      wz[, iam(2, 2, M = M)] <- eim.rec.tot
-    }
-
-
+    wz <- matrix(0, n, M)  # Diagonal EIM
 
 
 
     dA.dcapprobs <- -tau * ((1 - QQQ) * (tau-1) * (1 - cap.probs)^(tau-2) +
-                            tau * (1 - cap.probs)^(2*tau -2)) / (1 - QQQ)^2
-
-    if (tau == 2)
-      wz[, iam(1, 1, M = M)] <-
-        ((2 - 3 * cap.probs + 2 * cap.probs^2) / ((1 - QQQ) *
-        cap.probs * (1 - cap.probs)) + dA.dcapprobs) *
-        dcapprobs.deta^2
-    if (tau == 3)
-      wz[, iam(1, 1, M = M)] <-
-        ((3 + cap.probs * (-6 + cap.probs * (7 + cap.probs * (-3)))) / (
-         (1 - QQQ) * cap.probs * (1 - cap.probs)) + dA.dcapprobs) *
-        dcapprobs.deta^2
-
-
-    if (tau >= 4) {
-
-      eim.cap.tot <- 0
-      for (ii in 1:nrow(pbd$part1.cap)) {
-        if (pbd$ml..konst.cap[ii, 1] != 0)
-          eim.cap.tot <- eim.cap.tot +
-          pbd$ml..konst.cap[ii, 1] * ((  cap.probs)^pbd$part1.cap[ii, 1] *
-                                      (1-cap.probs)^pbd$part1.cap[ii, 2] *
-                                      (  rec.probs)^pbd$part1.cap[ii, 3] *
-                                      (1-rec.probs)^pbd$part1.cap[ii, 4])
-        if (pbd$ml..konst.cap[ii, 2] != 0)
-          eim.cap.tot <- eim.cap.tot +
-          pbd$ml..konst.cap[ii, 2] * ((  cap.probs)^pbd$part2.cap[ii, 1] *
-                                      (1-cap.probs)^pbd$part2.cap[ii, 2] *
-                                      (  rec.probs)^pbd$part2.cap[ii, 3] *
-                                      (1-rec.probs)^pbd$part2.cap[ii, 4])
-        if (pbd$ml..konst.cap[ii, 3] != 0)
-          eim.cap.tot <- eim.cap.tot +
-          pbd$ml..konst.cap[ii, 3] * ((  cap.probs)^pbd$part3.cap[ii, 1] *
-                                      (1-cap.probs)^pbd$part3.cap[ii, 2] *
-                                      (  rec.probs)^pbd$part3.cap[ii, 3] *
-                                      (1-rec.probs)^pbd$part3.cap[ii, 4])
-        if (pbd$ml..konst.cap[ii, 4] != 0)
-          eim.cap.tot <- eim.cap.tot +
-          pbd$ml..konst.cap[ii, 4] * ((  cap.probs)^pbd$part4.cap[ii, 1] *
-                                      (1-cap.probs)^pbd$part4.cap[ii, 2] *
-                                      (  rec.probs)^pbd$part4.cap[ii, 3] *
-                                      (1-rec.probs)^pbd$part4.cap[ii, 4])
-      }
-      eim.cap.tot <- (eim.cap.tot / (1 - QQQ) + dA.dcapprobs) *
-                     dcapprobs.deta^2
-      wz[, iam(1, 1, M = M)] <- eim.cap.tot
-    }
-
-
-    wz <- c(w) * wz
-    wz
-  }), list( .link = link, .earg = earg ))))
-}
-
-
-
-posbern.aux <- function(tau) {
-
-  y.all <- matrix(0, 2^tau - 0, tau)
-  for (jlocal in 1:tau)
-    y.all[, jlocal] <- c(rep(0, len = 2^(tau-jlocal)),
-                         rep(1, len = 2^(tau-jlocal)))
-  y.all <- y.all[-1, ]
-
-  aux <- aux.posbernoulli(y.all, check.y = FALSE)
-
-
-  nstar <- nrow(y.all)
-    l.power.cap <- matrix(0, nstar, 4)
-    l.konst.cap <- matrix(0, nstar, 4)
-  ml..power.cap <- matrix(0, nstar, 4)
-  ml..konst.cap <- matrix(0, nstar, 4)
-    l.power.rec <- matrix(0, nstar, 4)
-    l.konst.rec <- matrix(0, nstar, 4)
-  ml..power.rec <- matrix(0, nstar, 4)
-  ml..konst.rec <- matrix(0, nstar, 4)
-
-
-
-  l.power.rec[, 3] <- -1
-  l.power.rec[, 4] <- -1
-  for (jlocal in 1:tau) {
-    l.konst.rec[, 3] <-
-    l.konst.rec[, 3] + ifelse(y.all[, jlocal] >  0 & jlocal > aux$cap1, 1, 0)
-    l.konst.rec[, 4] <-
-    l.konst.rec[, 4] - ifelse(y.all[, jlocal] == 0 & jlocal > aux$cap1, 1, 0)
-  }
-
-
-
-  ml..power.rec[, 3] <- -2
-  ml..power.rec[, 4] <- -2
-  ml..konst.rec[, 3] <-  l.konst.rec[, 3]
-  ml..konst.rec[, 4] <- -l.konst.rec[, 4]
-
+                     tau * (1 - cap.probs)^(2*tau -2)) / (1 - QQQ)^2
 
 
-  mux.mat <- cbind(1, aux$y0i, aux$yr1i, aux$yr0i)
-  part1.rec <- mux.mat + cbind(ml..power.rec[, 1], 0, 0, 0)
-  part2.rec <- mux.mat + cbind(0, ml..power.rec[, 2], 0, 0)
-  part3.rec <- mux.mat + cbind(0, 0, ml..power.rec[, 3], 0)
-  part4.rec <- mux.mat + cbind(0, 0, 0, ml..power.rec[, 4])
 
 
 
+    prc <- matrix(cap.probs, n, tau)
+    prr <- matrix(rec.probs, n, tau)
 
+    dQ.dprc   <- -QQQ / (1 - prc)
+    QQQcummat <- exp(t( apply(log1p(-prc), 1, cumsum)))
 
 
 
-  l.power.cap[, 1] <-  1
-  l.power.cap[, 2] <- -1
-  l.konst.cap[, 1] <-  1
-  l.konst.cap[, 2] <- -aux$y0i
-
-
-
-  ml..power.cap[, 1] <- -2
-  ml..power.cap[, 2] <- -2
-  ml..konst.cap[, 1] <-  1
-  ml..konst.cap[, 2] <-  aux$y0i
+    GGG <- (1 - QQQ - cap.probs * (1 + (tau-1) * QQQ)) / (
+            cap.probs * (1-cap.probs)^2)
+    wz.pc <- GGG / (1 - QQQ) + 1 / cap.probs^2 + dA.dcapprobs
+    wz[, iam(1, 1, M = M)] <- wz.pc * dcapprobs.deta^2  # Efficient
 
 
 
-  mux.mat <- cbind(1, aux$y0i, aux$yr1i, aux$yr0i)
-  part1.cap <- mux.mat + cbind(ml..power.cap[, 1], 0, 0, 0)
-  part2.cap <- mux.mat + cbind(0, ml..power.cap[, 2], 0, 0)
-  part3.cap <- mux.mat + cbind(0, 0, ml..power.cap[, 3], 0)
-  part4.cap <- mux.mat + cbind(0, 0, 0, ml..power.cap[, 4])
 
 
+    wz.pr <- (tau - (1 - QQQ) / cap.probs) / (
+              rec.probs * (1 - rec.probs) * (1 - QQQ))
+    wz[, iam(2, 2, M = M)] <- wz.pr * drecprobs.deta^2
 
+  
 
-  list(   y.all       =  y.all,
-          part1.cap   =  part1.cap,
-          part2.cap   =  part2.cap,
-          part3.cap   =  part3.cap,
-          part4.cap   =  part4.cap,
 
-          part1.rec   =  part1.rec,
-          part2.rec   =  part2.rec,
-          part3.rec   =  part3.rec,
-          part4.rec   =  part4.rec,
-          l.konst.cap =    l.konst.cap,
-          l.power.cap =    l.power.cap,
-        ml..konst.cap =  ml..konst.cap,
-        ml..power.cap =  ml..power.cap,
-          l.konst.rec =    l.konst.rec,
-          l.power.rec =    l.power.rec,
-        ml..konst.rec =  ml..konst.rec,
-        ml..power.rec =  ml..power.rec)
+    wz <- c(w) * wz
+    wz
+  }), list( .link = link, .earg = earg ))))
 }
 
 
 
 
+
  posbernoulli.tb <-
   function(link = "logit",
-           parallel.t = FALSE,
-           parallel.b = FALSE,
-           apply.parint = FALSE,
+           parallel.t = FALSE ~  1,
+           parallel.b = FALSE ~  0,
+           drop.b     = FALSE ~  1,
+           type.fitted = c("likelihood.cond", "mean.uncond"),
            imethod = 1,
            iprob = NULL,
-           dconst = 0.1,
-           dpower = -2) {
-
+           p.small = 1e-4, no.warning = FALSE,  
+           ridge.constant = 0.01,
+           ridge.power = -4) {
 
 
 
+  apply.parint.t <- FALSE
+  apply.parint.b <- TRUE
+  apply.parint.d <- FALSE  # For 'drop.b' actually.
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  type.fitted <- match.arg(type.fitted,
+                           c("likelihood.cond", "mean.uncond"))[1]
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
+      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
+
+  if (!is.Numeric(ridge.constant) ||
+      ridge.constant < 0)
+    warning("argument 'ridge.constant' should be non-negative")
+  if (!is.Numeric(ridge.power) ||
+      ridge.power > 0)
+    warning("argument 'ridge.power' should be non-positive")
+
+
   if (length(iprob))
     if (!is.Numeric(iprob, positive = TRUE) ||
           max(iprob) >= 1)
       stop("argument 'iprob' must have values in (0, 1)")
 
-  if (!is.logical(parallel.t) ||
-      length(parallel.t) != 1)
-    stop("argument 'parallel.t' must be a single logical")
 
-  if (!is.logical(parallel.b) ||
-      length(parallel.b) != 1)
-    stop("argument 'parallel.b' must be a single logical")
-
-  if (!is.logical(apply.parint) ||
-      length(apply.parint) != 1)
-    stop("argument 'apply.parint' must be a single logical")
+  if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'p.small'")
 
 
+  
   new("vglmff",
-  blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model\n",
-            "with temporal and behavioural effects (M_{tb})\n\n",
+  blurb = c("Positive-Bernoulli (capture-recapture) model\n",
+            "with temporal and behavioural effects (M_{tb}/M_{tbh})\n\n",
             "Links:    ",
-            namesof("cap.prob.1",     link, earg = earg, tag = FALSE), ", ",
-            namesof("cap.prob.2",     link, earg = earg, tag = FALSE), ", ",
-            ", ...,\n",
-            namesof("cap.prob.tau",   link, earg = earg, tag = FALSE), ", ",
-            namesof("recap.prob.2",   link, earg = earg, tag = FALSE),
-            ", ...,\n",
-            namesof("recap.prob.tau", link, earg = earg, tag = FALSE),
-            "\n"),
+            namesof("pcapture.1",     link, earg = earg, tag = FALSE),
+            ", ..., ",
+            namesof("pcapture.tau",   link, earg = earg, tag = FALSE), ", ",
+            namesof("precapture.2",   link, earg = earg, tag = FALSE),
+            ", ..., ",
+            namesof("precapture.tau", link, earg = earg, tag = FALSE)),
   constraints = eval(substitute(expression({
+ 
 
-    tmp8.mat <- cbind(c(1, rep(0, len = 2*(tau-1))),
-                      rbind(rep(0, len = tau-1), diag(tau-1), diag(tau-1)))
-    tmp9.mat <- cbind(c(rep(0, len = tau), rep(1, len = tau-1)))
-
-    cmk_tb <- if ( .parallel.t ) matrix(1, M, 1) else tmp8.mat
-
-    cm1_tb <-
-      if ( ( .parallel.t ) &&  ( .parallel.b )) matrix(1, M, 1) else
-      if ( ( .parallel.t ) && !( .parallel.b )) cbind(1, tmp9.mat) else
-      if (!( .parallel.t ) &&  ( .parallel.b )) tmp8.mat else
-      if (!( .parallel.t ) && !( .parallel.b )) cbind(tmp8.mat, tmp9.mat)
-
-
-    constraints <- cm.vgam(cmk_tb, x = x,
+    constraints.orig <- constraints
+    cm1.d <-
+    cmk.d <- matrix(0, M, 1)  # All 0s inside
+    con.d <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .drop.b ,
+                           constraints = constraints.orig,
+                           apply.int = .apply.parint.d ,  # FALSE,  
+                           cm.default           = cmk.d,
+                           cm.intercept.default = cm1.d)
+   
+
+
+    cm1.t <-
+    cmk.t <- rbind(diag(tau), diag(tau)[-1, ])  # More readable
+    con.t <- cm.vgam(matrix(1, M, 1), x = x,
                            bool = .parallel.t ,  # Same as .parallel.b
-                           constraints = constraints,
-                           apply.int = .apply.parint ,  # FALSE,  
-                           cm.default = cmk_tb,
-                           cm.intercept.default = cm1_tb)
+                           constraints = constraints.orig,
+                           apply.int = .apply.parint.t ,  # FALSE,  
+                           cm.default           = cmk.t,
+                           cm.intercept.default = cm1.t)
+   
+    
+
+    cm1.b <-
+    cmk.b <- rbind(matrix(0, tau, tau-1), diag(tau-1))
+    con.b <- cm.vgam(matrix(c(rep(0, len = tau  ),
+                              rep(1, len = tau-1)), M, 1), x = x,
+                           bool = .parallel.b ,  # Same as .parallel.b
+                           constraints = constraints.orig,
+                           apply.int = .apply.parint.b ,  # FALSE,  
+                           cm.default           = cmk.b,
+                           cm.intercept.default = cm1.b)
+   
+    con.use <- con.b
+    con.names <- names(con.use)
+    for (klocal in 1:length(con.b)) {
+      con.use[[klocal]] <-
+        cbind(if (any(con.d[[klocal]] == 1)) NULL else con.b[[klocal]],
+              con.t[[klocal]])
+
+    }
 
+    
+    constraints <- con.use
+    
   }), list( .parallel.t = parallel.t,
             .parallel.b = parallel.b,
-            .apply.parint = apply.parint ))),
+            .drop.b     = drop.b,
+            .apply.parint.b = apply.parint.b,
+            .type.fitted    = type.fitted,
+            .apply.parint.d = apply.parint.d,
+            .apply.parint.t = apply.parint.t ))),
   infos = eval(substitute(function(...) {
     list(Musual = 2,
-         multipleResponses = TRUE,
-         imethod = .imethod ,
-         dconst  = .dconst ,
-         dpower  = .dpower ,
-         apply.parint = .apply.parint ,
-         parallel.t = .parallel.t ,
-         parallel.b = .parallel.b )
-  }, list( .parallel.t = parallel.t,
-           .parallel.b = parallel.b,
-           .imethod = imethod,
-           .dconst = dconst,
-           .dpower = dpower,
-           .apply.parint = apply.parint ))),
+         multipleResponses  = TRUE,
+         ridge.constant     = .ridge.constant ,
+         ridge.power        = .ridge.power ,
+         drop.b             = .drop.b,
+         imethod            = .imethod ,
+         type.fitted        = .type.fitted ,
+         p.small    = .p.small ,
+         no.warning = .no.warning ,
+         apply.parint.b     = .apply.parint.b ,
+         apply.parint.t     = .apply.parint.t ,
+         parallel.t         = .parallel.t ,
+         parallel.b         = .parallel.b )
+  }, list( .parallel.t         = parallel.t,
+           .parallel.b         = parallel.b,
+           .drop.b             = drop.b,
+           .type.fitted        = type.fitted,
+           .p.small    = p.small,
+           .no.warning = no.warning,
+           .imethod            = imethod,
+           .ridge.constant     = ridge.constant,
+           .ridge.power        = ridge.power,
+           .apply.parint.b     = apply.parint.b,
+           .apply.parint.t     = apply.parint.t ))),
 
   initialize = eval(substitute(expression({
     Musual <- 2  # Not quite true
 
 
-
     if (ncol(cbind(w)) > 1)
       stop("variable 'w' should be a vector or one-column matrix")
     w <- c(w)  # Make it a vector
@@ -1972,22 +2051,25 @@ posbern.aux <- function(tau) {
     extra$ncoly   <- ncoly <- ncol(y)
     extra$orig.w  <- w
     extra$ycounts <- y
+    extra$type.fitted <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
     M <- Musual * tau - 1  # recap.prob.1 is unused
 
 
-    if (!(ncoly %in% 2:3))
-      stop("the response currently must be a two- or three-column matrix")
+    mustart <- (y + matrix(apply(y, 2, weighted.mean, w = w),
+                           n, tau, byrow = TRUE)) / 2
+    mustart[mustart < 0.01] <- 0.01
+    mustart[mustart > 0.99] <- 0.99
 
+    mustart <- cbind(mustart, mustart[, -1])
 
 
-    mustart <- matrix(c(weighted.mean(y[, 1], w),
-                        weighted.mean(y[, 2], w),
-                        if (tau == 3) weighted.mean(y[, 3], w) else NULL),
-                      n, tau, byrow = TRUE)
-    mustart[mustart == 0] <- 0.05
-    mustart[mustart == 1] <- 0.95
 
+ 
+    extra$p.small    <- .p.small
+    extra$no.warning <- .no.warning
 
+   
 
 
 
@@ -1995,63 +2077,104 @@ posbern.aux <- function(tau) {
       stop("response must contain 0s and 1s only")
 
 
-    tmp3 <- aux.posbernoulli(y)
+    tmp3 <- aux.posbernoulli.t(y)
     cap.hist1  <- extra$cap.hist1  <- tmp3$cap.hist1
-    if (tau > 2) {
-      yindex <- 4 * y[, 1] + 2 * y[, 2] + 1 * y[, 3]
-      if (length(table(yindex)) != 2^tau - 1)
-        warning("there should be ", 2^tau - 1, " patterns of 0s and 1s ",
-                "in the response matrix. May crash.")
-
-    }
+    
 
-
-    dn2.cap   <- paste("cap.prob.",   1:ncoly, sep = "")
-    dn2.recap <- paste("recap.prob.", 2:ncoly, sep = "")
+    dn2.cap   <- paste("pcapture.",   1:ncoly, sep = "")
+    dn2.recap <- paste("precapture.", 2:ncoly, sep = "")
 
     predictors.names <- c(
       namesof(dn2.cap,   .link , earg = .earg, short = TRUE),
       namesof(dn2.recap, .link , earg = .earg, short = TRUE))
 
+
     if (length(extra)) extra$w <- w else extra <- list(w = w)
 
     if (!length(etastart)) {
-      if ( .imethod == 1) {
-
-
-        mu.init <- if (length( .iprob ))
-                     matrix( .iprob , n, M, byrow = TRUE) else
-                   if (length(mustart.orig))
-                     matrix(rep(mustart.orig, length = n * M), n, M) else
-                     matrix(rep(mustart, length = n * M), n, M)
-        etastart <- theta2eta(mu.init, .link , earg = .earg ) # n x M
-      } else {
-        mu.init <- matrix(runif(n * M), n, M)
-        etastart <- theta2eta(mu.init, .link , earg = .earg ) # n x M
-      }
+      mu.init <-
+        if ( .imethod == 1) {
+          if (length( .iprob ))
+            matrix( .iprob , n, M, byrow = TRUE) else
+          if (length(mustart.orig))
+            matrix(rep(mustart.orig, length = n * M), n, M) else
+            mustart  # Already n x M
+        } else {
+          matrix(runif(n * M), n, M)
+        }
+      etastart <- theta2eta(mu.init, .link , earg = .earg )  # n x M
     }
     mustart <- NULL
   }), list( .link = link, .earg = earg,
+            .type.fitted = type.fitted,
+            .p.small    = p.small,
+            .no.warning = no.warning,
             .iprob = iprob,
             .imethod = imethod ))),
+
   linkinv = eval(substitute(function(eta, extra = NULL) {
     tau <- extra$ncoly
+    taup1 <- tau + 1
     probs <- eta2theta(eta, .link , earg = .earg )
     prc <- probs[, 1:tau]
-    prr <- cbind(0, probs[, (1+tau):ncol(probs)])  # 1st coln ignored
-
-    probs.numer <- cbind(probs[, 1],
-                         ifelse(extra$cap.hist1[, 2] == 1, prr[, 2], prc[, 2]))
-
-    if (tau == 3)
-      probs.numer <- cbind(probs.numer,
-                           ifelse(extra$cap.hist1[, 3] == 1, prr[, 3], prc[, 3]))
+    prr <- cbind(0,  # == pr1.ignored
+                 probs[, taup1:ncol(probs)])  # 1st coln ignored
 
     logQQQ <- rowSums(log1p(-prc))
     QQQ <- exp(logQQQ)
     AAA <- exp(log1p(-QQQ))  # 1 - QQQ
-    probs.numer / AAA
-  }, list( .link = link, .earg = earg ))),
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'likelihood.cond'.")
+                     "likelihood.cond"
+                   }
+
+  type.fitted <- match.arg(type.fitted,
+                           c("likelihood.cond", "mean.uncond"))[1]
+
+
+
+    if ( type.fitted == "likelihood.cond") {
+      probs.numer <- prr 
+      mat.index <- cbind(1:nrow(prc), extra$cap1)
+      probs.numer[mat.index] <- prc[mat.index]
+      probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
+      fv <- probs.numer / AAA
+    } else {
+      fv <- matrix(prc[, 1] / AAA, nrow(prc), ncol(prc))
+
+      fv[, 2] <- (prc[, 2] + prc[, 1] * (prr[, 2] - prc[, 2])) / AAA
+
+      if (tau >= 3) {
+        QQQcummat <- exp(t( apply(log1p(-prc), 1, cumsum)))
+        for (jay in 3:tau) {
+          sum1 <- prc[, 1]
+          for (kay in 2:(jay-1))
+            sum1 <- sum1 + prc[, kay] * QQQcummat[, kay-1]
+          fv[, jay] <- prc[, jay] * QQQcummat[, jay-1] +
+                       prr[, jay] * sum1
+        }
+        fv[, 3:tau] <- fv[, 3:tau] / AAA
+      }
+    }
+
+
+
+    ans <- fv
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .link = link,
+           .earg = earg ))),
   last = eval(substitute(expression({
     extra$w   <- NULL   # Kill it off 
 
@@ -2061,84 +2184,86 @@ posbern.aux <- function(tau) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
 
-    misc$mv       <- TRUE
-    misc$iprob    <- .iprob
+    misc$mv           <- TRUE
+    misc$iprob        <- .iprob
+
 
 
-    R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
     R[lower.tri(R)] <- 0
     tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
                                R = R, w = w,
-                               X_vlm = X_vlm_save, Hlist = constraints,
+                               X.vlm = X.vlm.save, Hlist = constraints,
                                extra = extra, model.type = "tb")
     extra$N.hat    <- tmp6$N.hat
     extra$SE.N.hat <- tmp6$SE.N.hat
 
 
-    misc$parallel.t   <- .parallel.t
-    misc$parallel.b   <- .parallel.b
-
-
-    misc$dconst <- .dconst
-    misc$dpower <- .dpower
-    misc$working.ridge  <- c(rep(adjustment.posbern_tb, length = tau),
-                             rep(0,                     length = tau-1))
-
-    misc$apply.parint <- .apply.parint
+    misc$drop.b             <- .drop.b
+    misc$parallel.t         <- .parallel.t
+    misc$parallel.b         <- .parallel.b
+    misc$apply.parint.b     <- .apply.parint.b
+    misc$apply.parint.t     <- .apply.parint.t
+    misc$ridge.constant <- .ridge.constant
+    misc$ridge.power    <- .ridge.power
 
   }), list( .link = link, .earg = earg,
-            .apply.parint = apply.parint,
+            .apply.parint.b = apply.parint.b,
+            .apply.parint.t = apply.parint.t,
             .parallel.t = parallel.t,
             .parallel.b = parallel.b,
-            .dconst = dconst,
-            .dpower = dpower,
+            .drop.b     = drop.b,
+            .ridge.constant = ridge.constant,
+            .ridge.power = ridge.power,
             .iprob = iprob ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
 
     tau <- extra$ncoly
+    taup1 <- tau + 1
     ycounts <- y
     use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
 
     probs <- eta2theta(eta, .link , earg = .earg )
     prc <- probs[, 1:tau]
-    prr <- cbind(0, probs[, (1+tau):ncol(probs)])  # 1st coln ignored
+
+    prr <- cbind(0,  # pr1.ignored
+                 probs[, taup1:ncol(probs)])  # 1st coln ignored
+
 
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
 
-    probs.numer <- cbind(probs[, 1],
-                         ifelse(extra$cap.hist1[, 2] == 1, prr[, 2], prc[, 2]))
-    if (tau == 3)
-      probs.numer <- cbind(probs.numer,
-                           ifelse(extra$cap.hist1[, 3] == 1, prr[, 3], prc[, 3]))
+      probs.numer <- prr 
+      mat.index <- cbind(1:nrow(prc), extra$cap1)
+      probs.numer[mat.index] <- prc[mat.index]
+      probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
 
       sum(use.orig.w *
-          dposbern(x = ycounts, # size = 1, # Bernoulli trials
-                   prob = probs.numer, prob0 = prc, log = TRUE))
+            dposbern(x = ycounts,  # size = 1,  # Bernoulli trials
+                     prob = probs.numer, prob0 = prc, log = TRUE))
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.tb"),
   deriv = eval(substitute(expression({
     tau <- extra$ncoly
+    taup1 <- tau + 1
     probs <- eta2theta(eta, .link , earg = .earg )
-    prc <- probs[, 1:tau]
-    prr <- cbind(0, probs[, (1+tau):ncol(probs)])  # 1st coln ignored
 
+    prc <- probs[, 1:tau]
+    prr <- cbind(pr1.ignored = 0,
+                 probs[, taup1:ncol(probs)])  # 1st coln ignored
     logQQQ <- rowSums(log1p(-prc))
     QQQ <- exp(logQQQ)
-
+ 
+    
+    
     dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
-    dprc.deta <- dprobs.deta[, 1:tau]
-    dprr.deta <- cbind(0, dprobs.deta[, (1+tau):ncol(probs)])  # 1st coln ignored
-
     dQ.dprc   <- -QQQ / (1 - prc)
-
-
     d2Q.dprc <- array(0, c(n, tau, tau))
     for (jay in 1:(tau-1))
       for (kay in (jay+1):tau)
@@ -2146,122 +2271,64 @@ posbern.aux <- function(tau) {
         d2Q.dprc[, kay, jay] <-  QQQ / ((1 - prc[, jay]) *
                                         (1 - prc[, kay]))
 
-    if (tau == 2)
-    dl.dpr <-  cbind(y[, 1] / prc[, 1] - (1 - y[, 1]) / (1 - prc[, 1]) +
-                     dQ.dprc[, 1] / (1 - QQQ),
-                     (1 - y[, 1]) *
-                    (y[, 2] / prc[, 2] - (1 - y[, 2]) / (1 - prc[, 2])) +
-                     dQ.dprc[, 2] / (1 - QQQ),
-                          y[, 1]  *
-                    (y[, 2] / prr[, 2] - (1 - y[, 2]) / (1 - prr[, 2])))
-
-    if (tau == 3)
-    dl.dpr <-  cbind(y[, 1] / prc[, 1] - (1 - y[, 1]) / (1 - prc[, 1]) +
-                     dQ.dprc[, 1] / (1 - QQQ),
-
-                     (1 - extra$cap.hist1[, 2]) *  # (1 - y[, 1]) *
-                    (y[, 2] / prc[, 2] - (1 - y[, 2]) / (1 - prc[, 2])) +
-                     dQ.dprc[, 2] / (1 - QQQ),
-
-                     (1 - extra$cap.hist1[, 3]) *  # (1 - y[, 1]) * (1 - y[, 2]) *
-                     y[, 3] / prc[, 3] +
-                     dQ.dprc[, 3] / (1 - QQQ),
-
-                     extra$cap.hist1[, 2] *  # y[, 1]  *
-                    (y[, 2] / prr[, 2] - (1 - y[, 2]) / (1 - prr[, 2])),
-
-                     extra$cap.hist1[, 3] *
-                    (y[, 3] / prr[, 3] - (1 - y[, 3]) / (1 - prr[, 3]))
-                    )
-
-    deriv.ans <- c(w) * dl.dpr * dprobs.deta
+    dl.dpc <- dl.dpr <- matrix(0, n, tau)  # First coln of dl.dpr is ignored
+    for (jay in 1:tau) {
+      dl.dpc[, jay] <- (1 - extra$cap.hist1[, jay]) *
+        (    y[, jay]  /      prc[, jay]   -
+        (1 - y[, jay]) / (1 - prc[, jay])) +
+            dQ.dprc[, jay] / (1 - QQQ)
+    }
+    for (jay in 2:tau) {
+      dl.dpr[, jay] <- extra$cap.hist1[, jay] *
+        (    y[, jay]  /      prr[, jay] -
+        (1 - y[, jay]) / (1 - prr[, jay]))
+    }
 
+    deriv.ans <- c(w) * cbind(dl.dpc, dl.dpr[, -1]) * dprobs.deta
     deriv.ans
-  }), list( .link = link, .earg = earg ))),
+  }), list( .link = link,
+            .earg = earg ))),
 
   weight = eval(substitute(expression({
     wz <- matrix(0, n, sum(M:(M - (tau - 1))))
 
-    cindex <- iam(NA, NA, M = M, both = TRUE)
-    cindex$row.index <- rep(cindex$row.index, length = ncol(wz))
-    cindex$col.index <- rep(cindex$col.index, length = ncol(wz))
 
 
-    if (tau == 2) {
-      wz[, iam(1, 1, M = M)] <-
-               (1 - prc[, 1] * (1 - prc[, 2])) / (prc[, 1] * (1 - prc[, 1]) *
-               (1 - QQQ)) -
-              ((1 - prc[, 2]) / (1 - QQQ))^2
-      wz[, iam(1, 1, M = M)] <- wz[, iam(1, 1, M = M)] * dprc.deta[, 1]^2
-
-      wz[, iam(2, 2, M = M)] <- 
-              (prc[, 1] * (1 - prc[, 1]) / (prc[, 2] * (1 - QQQ)^2)) *
-               dprc.deta[, 2]^2
-
-      wz[, iam(3, 3, M = M)] <-
-              (prc[, 1] / (prr[, 2] * (1 - prr[, 2]) * (1 - QQQ))) *
-               dprr.deta[, 2]^2
+    QQQcummat <- exp(t( apply(log1p(-prc), 1, cumsum)))
+    wz.pc <- (QQQcummat / prc - QQQ / (1 - QQQ)) / ((1 - QQQ) *
+              (1 - prc)^2)
+    wz[, 1:tau] <- wz.pc
   
-      wz[, iam(1, 2, M = M)] <- -dprc.deta[, 1] * dprc.deta[, 2] / (1 - QQQ)^2
-    } else if (tau == 3) {
-
-      wz[, iam(1, 1, M = M)] <-
-        ((1 - prc[, 2]) * prc[, 3] + prc[, 2]) / ((1 - prc[, 1]) * (1 - QQQ)) +
-         1 / (prc[, 1] * (1 - QQQ)) -
-        (dQ.dprc[, 1] / (1 - QQQ))^2
-
 
-      wz[, iam(2, 2, M = M)] <- 
-        (1 - prc[, 1]) * (1 - prc[, 2] * (1 - prc[, 3])) / (
-         prc[, 2] * (1 - prc[, 2]) * (1 - QQQ)) -
-        (dQ.dprc[, 2] / (1 - QQQ))^2
-
-
-      wz[, iam(3, 3, M = M)] <-
-        (1 - prc[, 1]) * (1 - prc[, 2]) / (prc[, 3] * (1 - QQQ)) -
-        (dQ.dprc[, 3] / (1 - QQQ))^2
-
-
-      wz[, iam(4, 4, M = M)] <-
-        prc[, 1] / (prr[, 2] * (1 - prr[, 2]) * (1 - QQQ))
+    wz.pr <- as.matrix((1 - QQQcummat / (1 - prc)) / (
+                        prr * (1 - prr) * (1 - QQQ)))
+    wz[, taup1:M] <- wz.pr[, -1]
   
 
-      wz[, iam(5, 5, M = M)] <-
-        (prc[, 1] + prc[, 2] * (1 - prc[, 1])) / (
-         prr[, 3] * (1 - prr[, 3]) * (1 - QQQ))
-
-
-      for (jay in 1:(tau-1))
-        for (kay in (jay+1):tau)
-          wz[, iam(jay, kay, M = M)] <-
-            -(d2Q.dprc[, jay, kay] +
-               dQ.dprc[, jay] *
-               dQ.dprc[, kay] / (1 - QQQ)) / (1 - QQQ)
-
-
-      wz <- wz * dprobs.deta[, cindex$row.index] *
-                 dprobs.deta[, cindex$col.index]
-
-
-    } else {
-      stop("tau must equal 2 or 3")
-    }
-
+    for (jay in 1:(tau-1))
+      for (kay in (jay+1):tau)
+        wz[, iam(jay, kay, M = M)] <-
+          -(d2Q.dprc[, jay, kay] +
+             dQ.dprc[, jay] *
+             dQ.dprc[, kay] / (1 - QQQ)) / (1 - QQQ)
 
-    adjustment.posbern_tb <- .dconst * iter^( .dpower )
 
+    cindex <- iam(NA, NA, M = M, both = TRUE)
+    cindex$row.index <- cindex$row.index[1:ncol(wz)]
+    cindex$col.index <- cindex$col.index[1:ncol(wz)]
 
-     for (jay in 1:tau)
-      wz[, iam(jay, jay, M = M)] <- wz[, iam(jay, jay, M = M)] +
-                                    adjustment.posbern_tb
+    wz <- wz * dprobs.deta[, cindex$row.index] *
+               dprobs.deta[, cindex$col.index]
 
 
+      wz.mean <- mean(wz[, 1:tau])
+      wz.adjustment <- wz.mean * .ridge.constant * iter^( .ridge.power )
+      wz[, 1:tau] <- wz[, 1:tau] + wz.adjustment
 
     c(w) * wz
   }), list( .link = link, .earg = earg,
-            .dconst = dconst,
-            .dpower = dpower
-          ))))
+            .ridge.constant = ridge.constant,
+            .ridge.power = ridge.power ))))
 }
 
 
@@ -2272,3 +2339,4 @@ posbern.aux <- function(tau) {
 
 
 
+
diff --git a/R/family.qreg.R b/R/family.qreg.R
index d7e1279..f4ebc85 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -51,7 +51,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
   lsigma <- attr(esigma, "function.name")
 
 
-  if (!is.Numeric(tol0, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(tol0, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'tol0'")
 
   if (!is.Numeric(ilambda))
@@ -122,9 +122,9 @@ lms.yjn.control <- function(trace = TRUE, ...)
       eta[, 2] <- eta2theta(eta[, 2], .lmu,     earg = .emu)
       eta[, 3] <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
       if ( .expectiles ) {
-        explot.lms.bcn(percentiles= .percentiles, eta = eta)
+        explot.lms.bcn(percentiles = .percentiles, eta = eta)
       } else {
-        qtplot.lms.bcn(percentiles= .percentiles, eta = eta)
+        qtplot.lms.bcn(percentiles = .percentiles, eta = eta)
       }
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
            .elambda = elambda, .emu = emu, .esigma = esigma, 
@@ -291,7 +291,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
     eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
     eta[, 2] <- eta2theta(eta[, 2], .lmu,     earg = .emu)
     eta[, 3] <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
-    qtplot.lms.bcg(percentiles= .percentiles, eta = eta)
+    qtplot.lms.bcg(percentiles = .percentiles, eta = eta)
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
            .elambda = elambda, .emu = emu, .esigma = esigma, 
            .percentiles = percentiles ))),
@@ -380,8 +380,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
 dy.dpsi.yeojohnson <- function(psi, lambda) {
 
     L <- max(length(psi), length(lambda))
-    psi <- rep(psi, length.out = L);
-    lambda <- rep(lambda, length.out = L);
+    if (length(psi)    != L) psi    <- rep(psi,    length.out = L)
+    if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
 
     ifelse(psi > 0, (1 + psi * lambda)^(1/lambda - 1),
                     (1 - (2-lambda) * psi)^((lambda - 1) / (2-lambda)))
@@ -390,8 +390,8 @@ dy.dpsi.yeojohnson <- function(psi, lambda) {
 
 dyj.dy.yeojohnson <- function(y, lambda) {
     L <- max(length(y), length(lambda))
-    y <- rep(y, length.out = L);
-    lambda <- rep(lambda, length.out = L);
+    if (length(y)      != L) y      <- rep(y,      length.out = L)
+    if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
 
     ifelse(y>0, (1 + y)^(lambda - 1), (1 - y)^(1 - lambda))
 }
@@ -401,19 +401,19 @@ dyj.dy.yeojohnson <- function(y, lambda) {
                         epsilon = sqrt(.Machine$double.eps),
                         inverse = FALSE) {
 
-    if (!is.Numeric(derivative, allowable.length = 1,
+    if (!is.Numeric(derivative, length.arg = 1,
                     integer.valued = TRUE) ||
         derivative < 0)
       stop("argument 'derivative' must be a non-negative integer")
 
     ans <- y
-    if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE))
+    if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE))
       stop("argument 'epsilon' must be a single positive number")
     L <- max(length(lambda), length(y))
     if (length(y) != L)
       y <- rep(y, length.out = L)
     if (length(lambda) != L)
-      lambda <- rep(lambda, length.out = L) # lambda may be of length 1
+      lambda <- rep(lambda, length.out = L)  # lambda may be of length 1
 
     if (inverse) {
         if (derivative != 0)
@@ -462,18 +462,18 @@ dyj.dy.yeojohnson <- function(y, lambda) {
 dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
                             derivative = 0, smallno = 1.0e-8) {
 
-    if (!is.Numeric(derivative, allowable.length = 1,
+    if (!is.Numeric(derivative, length.arg = 1,
                     integer.valued = TRUE) ||
         derivative < 0)
       stop("argument 'derivative' must be a non-negative integer")
-    if (!is.Numeric(smallno, allowable.length = 1, positive = TRUE))
+    if (!is.Numeric(smallno, length.arg = 1, positive = TRUE))
       stop("argument 'smallno' must be a single positive number")
 
     L <- max(length(psi), length(lambda), length(mymu), length(sigma))
-    if (length(psi) != L) psi <- rep(psi, length.out = L)
+    if (length(psi)    != L) psi    <- rep(psi,    length.out = L)
     if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
-    if (length(mymu) != L) mymu <- rep(mymu, length.out = L)
-    if (length(sigma) != L) sigma <- rep(sigma, length.out = L)
+    if (length(mymu)   != L) mymu   <- rep(mymu,   length.out = L)
+    if (length(sigma)  != L) sigma  <- rep(sigma,  length.out = L)
 
     answer <- matrix(as.numeric(NA), L, derivative+1)
     CC <- psi >= 0
@@ -801,7 +801,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
 
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
         psi <- rnorm(n, mymu, sigma)
         ysim <- yeo.johnson(y=psi, lam=lambda, inv = TRUE)
         d1 <- yeo.johnson(ysim, lambda, deriv = 1)
@@ -855,7 +855,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
 
 
 
-  rule <- rule[1] # Number of points (common) for all the quadrature schemes
+  rule <- rule[1]  # Number of points (common) for all the quadrature schemes
   if (rule != 5 && rule != 10)
     stop("only rule=5 or 10 is supported")
 
@@ -1068,53 +1068,53 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
     LL <- pmin(discontinuity, 0)
     UU <- pmax(discontinuity, 0)
     if (FALSE) {
-        AA <- (UU-LL)/2
-        for(kk in 1:length(gleg.wts)) {
-          temp1 <- AA * gleg.wts[kk] 
-          abscissae <- (UU+LL)/2 + AA * gleg.abs[kk]
-          psi <- mymu + sqrt(2) * sigma * abscissae
-          temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
-                                   derivative = 2)
-          temp9 <- cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
-
-          wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp1 *
+      AA <- (UU-LL)/2
+      for (kk in 1:length(gleg.wts)) {
+        temp1 <- AA * gleg.wts[kk] 
+        abscissae <- (UU+LL)/2 + AA * gleg.abs[kk]
+        psi <- mymu + sqrt(2) * sigma * abscissae
+        temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+                                  derivative = 2)
+        temp9 <- cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
+
+        wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp1 *
               gleg.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
-          wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp1 *
+        wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp1 *
               gleg.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
-          wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp1 *
+        wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp1 *
               gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
-        }
-        } else {
-        temp9 = dotFortran(name = "yjngintf", as.double(LL),
+      }
+    } else {
+      temp9 <- .Fortran("yjngintf", as.double(LL),
                  as.double(UU),
                  as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
                  as.integer(length(gleg.abs)), as.double(lambda),
-                 as.double(mymu), as.double(sigma), answer=double(3*n),
-                     eps=as.double(1.0e-5))$ans
-            dim(temp9) <- c(3,n)
-            wz[,iam(1, 1, M)] <- temp9[1,]
-            wz[,iam(1, 2, M)] <- temp9[2,]
-            wz[,iam(1, 3, M)] <- temp9[3,]
-        }
+                 as.double(mymu), as.double(sigma), answer = double(3*n),
+                     eps=as.double(1.0e-5), PACKAGE = "VGAM")$ans
+      dim(temp9) <- c(3,n)
+      wz[,iam(1, 1, M)] <- temp9[1,]
+      wz[,iam(1, 2, M)] <- temp9[2,]
+      wz[,iam(1, 3, M)] <- temp9[3,]
+    }
 
 
 
-    for(kk in 1:length(sgh.wts)) {
+    for (kk in 1:length(sgh.wts)) {
 
-        abscissae <- sign(-discontinuity) * sgh.abs[kk]
-        psi <- mymu + sqrt(2) * sigma * abscissae   # abscissae = z
-        temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+      abscissae <- sign(-discontinuity) * sgh.abs[kk]
+      psi <- mymu + sqrt(2) * sigma * abscissae   # abscissae = z
+      temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
                                  derivative = 2)
-        wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + sgh.wts[kk] * 
             gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
-        wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + sgh.wts[kk] * 
             gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
-        wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + sgh.wts[kk] * 
             gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
     }
 
     temp1 <- exp(-discontinuity^2)
-    for(kk in 1:length(glag.wts)) {
+    for (kk in 1:length(glag.wts)) {
       abscissae <- sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
       psi <- mymu + sqrt(2) * sigma * abscissae
       temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
@@ -1131,21 +1131,21 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
           glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
     }
 
-    wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] * dlambda.deta^2
-    wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] * dlambda.deta
-    wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] * dsigma.deta * dlambda.deta
+    wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dlambda.deta^2
+    wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dlambda.deta
+    wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dsigma.deta * dlambda.deta
     if ( .diagW && iter <= .iters.diagW) {
-        wz[,iam(1, 2, M)] <- wz[,iam(1, 3, M)] <- 0
+      wz[,iam(1, 2, M)] <- wz[, iam(1, 3, M)] <- 0
     }
-    wz[,iam(2, 3, M)] <- wz[,iam(2, 3, M)] * dsigma.deta
-    wz[,iam(3, 3, M)] <- wz[,iam(3, 3, M)] * dsigma.deta^2
+    wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsigma.deta
+    wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dsigma.deta^2
 
         c(w) * wz
   }), list(.lsigma = lsigma,
            .esigma = esigma, .elambda = elambda,
-           .rule=rule,
-           .diagW=diagW,
-           .iters.diagW=iters.diagW,
+           .rule = rule,
+           .diagW = diagW,
+           .iters.diagW = iters.diagW,
            .llambda = llambda))))
 }
 
@@ -1185,7 +1185,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
   } else {
     all.deviances <- numeric(M)
     myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
-    for(ii in 1:M)
+    for (ii in 1:M)
         all.deviances[ii] <- sum(c(w) * devi[, ii] *
                                  Wr1(myresid[, ii], w=extra$w.aml[ii]))
   }
@@ -1201,9 +1201,11 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
                        imethod = 1, digw = 4) {
 
 
+
+
   if (!is.Numeric(w.aml, positive = TRUE))
     stop("argument 'w.aml' must be a vector of positive values")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1, 2 or 3")
@@ -1223,7 +1225,9 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
             "Links:    ",
             namesof("expectile", link = lexpectile, earg = eexpectile)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
   }), list( .parallel = parallel ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     amlnormal.deviance(mu = mu, y = y, w = w, residuals = residuals,
@@ -1253,13 +1257,20 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
                earg = .eexpectile, tag = FALSE))
 
     if (!length(etastart)) {
-        mean.init <- if ( .imethod == 1)
-              rep(median(y), length = n) else
-            if ( .imethod == 2)
-              rep(weighted.mean(y, w), length = n) else {
-                  junk <- lm.wfit(x = x, y = c(y), w = c(w))
-                  junk$fitted
-            }
+      mean.init <-
+        if ( .imethod == 1)
+          rep(median(y), length = n) else
+        if ( .imethod == 2 || .imethod == 3)
+          rep(weighted.mean(y, w), length = n) else {
+              junk <- lm.wfit(x = x, y = c(y), w = c(w))
+              junk$fitted
+        }
+
+
+        if ( .imethod == 3)
+          mean.init <- abs(mean.init) + 0.01
+
+
         if (length( .iexpectile))
           mean.init <- matrix( .iexpectile, n, M, byrow = TRUE)
         etastart <-
@@ -1271,7 +1282,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
             .imethod = imethod, .digw = digw, .w.aml = w.aml ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     ans <- eta <- as.matrix(eta)
-    for(ii in 1:ncol(eta))
+    for (ii in 1:ncol(eta))
       ans[, ii] <- eta2theta(eta[, ii], .lexpectile, earg = .eexpectile)
     dimnames(ans) <- list(dimnames(eta)[[1]], extra$y.names)
     ans
@@ -1291,7 +1302,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
     misc$multipleResponses <- TRUE
 
 
-    for(ii in 1:M) {
+    for (ii in 1:M) {
         use.w <- if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
         extra$percentile[ii] <- 100 *
           weighted.mean(myresid[, ii] <= 0, use.w)
@@ -1351,7 +1362,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
     } else {
         all.deviances <- numeric(M)
         myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
-        for(ii in 1:M) all.deviances[ii] <- 2 * sum(c(w) * devi[, ii] *
+        for (ii in 1:M) all.deviances[ii] <- 2 * sum(c(w) * devi[, ii] *
                                Wr1(myresid[, ii], w=extra$w.aml[ii]))
     }
     if (is.logical(extra$individual) && extra$individual)
@@ -1375,7 +1386,9 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
             " asymmetric maximum likelihood estimation\n\n",
             "Link:     ", namesof("expectile", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
   }), list( .parallel = parallel ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     amlpoisson.deviance(mu = mu, y = y, w = w, residuals = residuals,
@@ -1418,7 +1431,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
             .digw = digw, .w.aml = w.aml ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     mu.ans <- eta <- as.matrix(eta)
-    for(ii in 1:ncol(eta))
+    for (ii in 1:ncol(eta))
       mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
     dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
     mu.ans
@@ -1438,7 +1451,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
     names(misc$earg) <- names(misc$link)
 
     extra$percentile <- numeric(M)
-    for(ii in 1:M)
+    for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
 
@@ -1504,7 +1517,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
     } else {
       all.deviances <- numeric(M)
       myresid <- matrix(y,extra$n,extra$M) - matrix(mu,extra$n,extra$M)
-      for(ii in 1:M) all.deviances[ii] <- sum(c(w) * devi[, ii] *
+      for (ii in 1:M) all.deviances[ii] <- sum(c(w) * devi[, ii] *
                              Wr1(myresid[, ii], w=extra$w.aml[ii]))
     }
     if (is.logical(extra$individual) && extra$individual)
@@ -1529,7 +1542,9 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
             "asymmetric maximum likelihood estimation\n\n",
             "Link:     ", namesof("expectile", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
   }), list( .parallel = parallel ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     amlbinomial.deviance(mu = mu, y = y, w = w, residuals = residuals,
@@ -1587,7 +1602,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
             .digw = digw, .w.aml = w.aml ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     mu.ans <- eta <- as.matrix(eta)
-    for(ii in 1:ncol(eta))
+    for (ii in 1:ncol(eta))
       mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
     dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
     mu.ans
@@ -1605,7 +1620,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
     misc$expected <- TRUE
 
     extra$percentile <- numeric(M)
-    for(ii in 1:M)
+    for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
 
@@ -1659,7 +1674,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
   } else {
     all.deviances <- numeric(M)
     myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
-    for(ii in 1:M) all.deviances[ii] = 2 * sum(c(w) *
+    for (ii in 1:M) all.deviances[ii] = 2 * sum(c(w) *
                            (devy[, ii] - devi[, ii]) *
                            Wr1(myresid[, ii], w=extra$w.aml[ii]))
   }
@@ -1674,7 +1689,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
                             digw = 4, link = "loge") {
   if (!is.Numeric(w.aml, positive = TRUE))
     stop("'w.aml' must be a vector of positive values")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1, 2 or 3")
@@ -1696,7 +1711,9 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
             " asymmetric maximum likelihood estimation\n\n",
             "Link:     ", predictors.names),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
   }), list( .parallel = parallel ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     amlexponential.deviance(mu = mu, y = y, w = w,
@@ -1743,7 +1760,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
             .digw = digw, .w.aml = w.aml ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     mu.ans <- eta <- as.matrix(eta)
-    for(ii in 1:ncol(eta))
+    for (ii in 1:ncol(eta))
       mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
     dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
     mu.ans
@@ -1763,7 +1780,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
 
 
     extra$percentile <- numeric(M)
-    for(ii in 1:M)
+    for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
 
@@ -1815,18 +1832,19 @@ dalap <- function(x, location = 0, scale = 1, tau = 0.5,
 
 
 
-  NN <- max(length(x), length(location), length(scale), length(kappa))
-  location <- rep(location, length.out = NN);
-  scale <- rep(scale, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  x <- rep(x, length.out = NN)
-  tau <- rep(tau, length.out = NN)
+  NN <- max(length(x), length(location), length(scale), length(kappa),
+            length(tau))
+  if (length(x)        != NN) x        <- rep(x,        length.out = NN)
+  if (length(location) != NN) location <- rep(location, length.out = NN)
+  if (length(scale)    != NN) scale    <- rep(scale,    length.out = NN)
+  if (length(kappa)    != NN) kappa    <- rep(kappa,    length.out = NN)
+  if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
 
   logconst <- 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
   exponent <- -(sqrt(2) / scale) * abs(x - location) *
              ifelse(x >= location, kappa, 1/kappa)
 
-  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   logconst[!indexTF] <- NaN
 
   if (log.arg) logconst + exponent else exp(logconst + exponent)
@@ -1837,7 +1855,7 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
                  kappa = sqrt(tau/(1-tau))) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   location <- rep(location, length.out = use.n);
@@ -1846,7 +1864,7 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
   kappa    <- rep(kappa,    length.out = use.n);
   ans <- location + scale *
         log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
-  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -1854,12 +1872,14 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
 
 palap <- function(q, location = 0, scale = 1, tau = 0.5,
                  kappa = sqrt(tau/(1-tau))) {
-  NN <- max(length(q), length(location), length(scale), length(kappa))
-  location <- rep(location, length.out = NN);
-  scale <- rep(scale, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  q <- rep(q, length.out = NN)
-  tau <- rep(tau, length.out = NN);
+
+  NN <- max(length(q), length(location), length(scale), length(kappa),
+            length(tau))
+  if (length(q)        != NN) q        <- rep(q,        length.out = NN)
+  if (length(location) != NN) location <- rep(location, length.out = NN)
+  if (length(scale)    != NN) scale    <- rep(scale,    length.out = NN)
+  if (length(kappa)    != NN) kappa    <- rep(kappa,    length.out = NN)
+  if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
 
   exponent <- -(sqrt(2) / scale) * abs(q - location) *
              ifelse(q >= location, kappa, 1/kappa)
@@ -1868,7 +1888,7 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
   index1 <- (q < location)
   ans[index1] <- (kappa[index1])^2 * temp5[index1]
 
-  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -1876,12 +1896,15 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
 
 qalap <- function(p, location = 0, scale = 1, tau = 0.5,
                  kappa = sqrt(tau / (1 - tau))) {
-  NN <- max(length(p), length(location), length(scale), length(kappa))
-  location <- rep(location, length.out = NN);
-  scale <- rep(scale, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  p <- rep(p, length.out = NN)
-  tau <- rep(tau, length.out = NN)
+
+  NN <- max(length(p), length(location), length(scale), length(kappa),
+            length(tau))
+  if (length(p)        != NN) p        <- rep(p,        length.out = NN)
+  if (length(location) != NN) location <- rep(location, length.out = NN)
+  if (length(scale)    != NN) scale    <- rep(scale,    length.out = NN)
+  if (length(kappa)    != NN) kappa    <- rep(kappa,    length.out = NN)
+  if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
+
 
   ans <- p
   temp5 <- kappa^2 / (1 + kappa^2)
@@ -1910,7 +1933,7 @@ rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
                    kappa = sqrt(tau/(1-tau))) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
             stop("bad input for argument 'n'") else n
   location.ald <- rep(location.ald, length.out = use.n);
   scale.ald    <- rep(scale.ald,    length.out = use.n);
@@ -1918,7 +1941,7 @@ rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
   kappa        <- rep(kappa,        length.out = use.n);
   ans <- exp(location.ald) *
      (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -1931,14 +1954,17 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
   rm(log)
 
 
+  scale    <- scale.ald
+  location <- location.ald
+  NN <- max(length(x), length(location),
+           length(scale), length(kappa), length(tau))
+
+  if (length(x)        != NN) x        <- rep(x,        length.out = NN)
+  if (length(location) != NN) location <- rep(location, length.out = NN)
+  if (length(scale)    != NN) scale    <- rep(scale,    length.out = NN)
+  if (length(kappa)    != NN) kappa    <- rep(kappa,    length.out = NN)
+  if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
 
-  NN <- max(length(x), length(location.ald),
-           length(scale.ald), length(kappa))
-  location <- rep(location.ald, length.out = NN);
-  scale <- rep(scale.ald, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  x <- rep(x, length.out = NN)
-  tau <- rep(tau, length.out = NN)
 
   Alpha <- sqrt(2) * kappa / scale.ald
   Beta  <- sqrt(2) / (scale.ald * kappa)
@@ -1947,22 +1973,25 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
              (log(x) - location.ald)
   logdensity <- -location.ald + log(Alpha) + log(Beta) -
                log(Alpha + Beta) + exponent
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   logdensity[!indexTF] <- NaN
   logdensity[x <  0 & indexTF] <- -Inf
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
+
+
 qloglap <- function(p, location.ald = 0, scale.ald = 1,
                    tau = 0.5, kappa = sqrt(tau/(1-tau))) {
   NN <- max(length(p), length(location.ald), length(scale.ald),
             length(kappa))
+  p        <- rep(p,            length.out = NN)
   location <- rep(location.ald, length.out = NN);
-  scale <- rep(scale.ald, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  p <- rep(p, length.out = NN)
-  tau <- rep(tau, length.out = NN)
+  scale    <- rep(scale.ald,    length.out = NN)
+  kappa    <- rep(kappa,        length.out = NN);
+  tau      <- rep(tau,          length.out = NN)
+
 
   Alpha <- sqrt(2) * kappa / scale.ald
   Beta  <- sqrt(2) / (scale.ald * kappa)
@@ -1976,7 +2005,7 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
   ans[p == 1] <- Inf
 
   indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
-            (p >= 0) & (p <= 1) # &
+            (p >= 0) & (p <= 1)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -2003,7 +2032,7 @@ ploglap <- function(q, location.ald = 0, scale.ald = 1,
   index1 <- (q >= Delta)
   ans[index1] <- (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
 
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -2015,7 +2044,7 @@ rlogitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
                       kappa = sqrt(tau/(1-tau))) {
   logit(ralap(n = n, location = location.ald, scale = scale.ald,
               tau = tau, kappa = kappa),
-        inverse = TRUE) # earg = earg
+        inverse = TRUE)  # earg = earg
 }
 
 
@@ -2037,14 +2066,14 @@ dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 
   Alpha <- sqrt(2) * kappa / scale.ald
   Beta  <- sqrt(2) / (scale.ald * kappa)
-  Delta <- logit(location.ald, inverse = TRUE) # earg = earg
+  Delta <- logit(location.ald, inverse = TRUE)  # earg = earg
 
   exponent <- ifelse(x >= Delta, -Alpha, Beta) *
              (logit(x) - # earg = earg
               location.ald)
   logdensity <- log(Alpha) + log(Beta) - log(Alpha + Beta) -
                log(x) - log1p(-x) + exponent
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   logdensity[!indexTF] <- NaN
   logdensity[x <  0 & indexTF] <- -Inf
   logdensity[x >  1 & indexTF] <- -Inf
@@ -2056,7 +2085,7 @@ qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
                      tau = 0.5, kappa = sqrt(tau/(1-tau))) {
   qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
               tau = tau, kappa = kappa)
-  ans <- logit(qqq, inverse = TRUE) # earg = earg
+  ans <- logit(qqq, inverse = TRUE)  # earg = earg
   ans[(p < 0) | (p > 1)] <- NaN
   ans[p == 0] <- 0
   ans[p == 1] <- 1
@@ -2075,7 +2104,7 @@ plogitlap <- function(q, location.ald = 0, scale.ald = 1,
   tau <- rep(tau, length.out = NN);
 
   indexTF <- (q > 0) & (q < 1)
-  qqq <- logit(q[indexTF]) # earg = earg
+  qqq <- logit(q[indexTF])  # earg = earg
   ans <- q
   ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
                        scale = scale.ald[indexTF],
@@ -2118,10 +2147,10 @@ dprobitlap <-
 
   logdensity <- x * NaN
   index1 <- (x > 0) & (x < 1)
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   if (meth2) {
     dx.dy <- x
-    use.x <- probit(x[index1]) # earg = earg
+    use.x <- probit(x[index1])  # earg = earg
     logdensity[index1] =
       dalap(x = use.x, location = location.ald[index1],
             scale = scale.ald[index1], tau = tau[index1],
@@ -2130,7 +2159,7 @@ dprobitlap <-
     Alpha <- sqrt(2) * kappa / scale.ald
     Beta  <- sqrt(2) / (scale.ald * kappa)
     Delta <- pnorm(location.ald)
-    use.x  <- qnorm(x) # qnorm(x[index1])
+    use.x  <- qnorm(x)  # qnorm(x[index1])
     log.dy.dw <- dnorm(use.x, log = TRUE)
 
     exponent <- ifelse(x >= Delta, -Alpha, Beta) *
@@ -2144,7 +2173,7 @@ dprobitlap <-
   logdensity[x >  1 & indexTF] <- -Inf
 
   if (meth2) {
-    dx.dy[index1] <- probit(x[index1], # earg = earg,
+    dx.dy[index1] <- probit(x[index1],  # earg = earg,
                            inverse = FALSE, deriv = 1)
     dx.dy[!index1] <- 0
     dx.dy[!indexTF] <- NaN
@@ -2160,7 +2189,7 @@ qprobitlap <- function(p, location.ald = 0, scale.ald = 1,
                        tau = 0.5, kappa = sqrt(tau/(1-tau))) {
   qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
               tau = tau, kappa = kappa)
-  ans <- probit(qqq, inverse = TRUE) # , earg = earg
+  ans <- probit(qqq, inverse = TRUE)  # , earg = earg
   ans[(p < 0) | (p > 1)] = NaN
   ans[p == 0] <- 0
   ans[p == 1] <- 1
@@ -2180,7 +2209,7 @@ pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
   tau <- rep(tau, length.out = NN);
 
   indexTF <- (q > 0) & (q < 1)
-  qqq <- probit(q[indexTF]) # earg = earg
+  qqq <- probit(q[indexTF])  # earg = earg
   ans <- q
   ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
                        scale = scale.ald[indexTF],
@@ -2196,7 +2225,7 @@ pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
 rclogloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
                         kappa = sqrt(tau/(1-tau))) {
   cloglog(ralap(n = n, location = location.ald, scale = scale.ald,
-                tau = tau, kappa = kappa), # earg = earg,
+                tau = tau, kappa = kappa),  # earg = earg,
           inverse = TRUE)
 }
 
@@ -2220,10 +2249,10 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 
   logdensity <- x * NaN
   index1 <- (x > 0) & (x < 1)
-  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+  indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   if (meth2) {
     dx.dy <- x
-    use.w <- cloglog(x[index1]) # earg = earg
+    use.w <- cloglog(x[index1])  # earg = earg
     logdensity[index1] <-
       dalap(x = use.w, location = location.ald[index1],
             scale = scale.ald[index1],
@@ -2245,7 +2274,7 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
   logdensity[x >  1 & indexTF] <- -Inf
 
   if (meth2) {
-    dx.dy[index1] <- cloglog(x[index1], # earg = earg,
+    dx.dy[index1] <- cloglog(x[index1],  # earg = earg,
                             inverse = FALSE, deriv = 1)
     dx.dy[!index1] <- 0
     dx.dy[!indexTF] <- NaN
@@ -2262,7 +2291,7 @@ qclogloglap <- function(p, location.ald = 0, scale.ald = 1,
                        tau = 0.5, kappa = sqrt(tau/(1-tau))) {
   qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
               tau = tau, kappa = kappa)
-  ans <- cloglog(qqq, inverse = TRUE) # , earg = earg
+  ans <- cloglog(qqq, inverse = TRUE)  # , earg = earg
   ans[(p < 0) | (p > 1)] <- NaN
   ans[p == 0] <- 0
   ans[p == 1] <- 1
@@ -2282,7 +2311,7 @@ pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
   tau <- rep(tau, length.out = NN);
 
   indexTF <- (q > 0) & (q < 1)
-  qqq <- cloglog(q[indexTF]) # earg = earg
+  qqq <- cloglog(q[indexTF])  # earg = earg
   ans <- q
   ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
                        scale = scale.ald[indexTF],
@@ -2333,14 +2362,14 @@ alaplace2.control <- function(maxit = 100, ...) {
 
   if (!is.Numeric(kappa, positive = TRUE))
     stop("bad input for argument 'kappa'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
   if (length(iscale) &&
       !is.Numeric(iscale, positive = TRUE))
     stop("bad input for argument 'iscale'")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
     shrinkage.init < 0 ||
     shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -2397,7 +2426,9 @@ alaplace2.control <- function(maxit = 100, ...) {
 
 
     constraints <- cm.vgam(cbind(locatHmatk, scaleHmatk),
-                           x, .PARALLEL , constraints,
+                           x = x,
+                           bool = .PARALLEL , 
+                           constraints = constraints,
                            apply.int = FALSE)
 
       if (names(constraints)[1] == "(Intercept)") {
@@ -2408,7 +2439,7 @@ alaplace2.control <- function(maxit = 100, ...) {
       dotzero <- .zero
       Musual <- 2
       eval(negzero.expression)
-      constraints <- cm.zero.vgam(constraints, x, z_Index, M)
+      constraints <- cm.zero.vgam(constraints, x, z.Index, M)
 
 
 
@@ -2484,7 +2515,7 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     locat.init <- scale.init <- matrix(0, n, Mdiv2)
     if (!length(etastart)) {
-      for(jay in 1:Mdiv2) {
+      for (jay in 1:Mdiv2) {
         y.use <- if (ncoly > 1) y[, jay] else y
         if ( .imethod == 1) {
           locat.init[, jay] <- weighted.mean(y.use, w[, jay])
@@ -2561,7 +2592,7 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     misc$earg <- vector("list", M)
     misc$Musual <- Musual
-    for(ii in 1:Mdiv2) {
+    for (ii in 1:Mdiv2) {
       misc$earg[[Musual * ii - 1]] <- .elocat
       misc$earg[[Musual * ii    ]] <- .escale
     }
@@ -2577,7 +2608,7 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     extra$percentile <- numeric(Mdiv2)  # length(misc$kappa)
     locat <- as.matrix(locat)
-    for(ii in 1:Mdiv2) {
+    for (ii in 1:Mdiv2) {
       y.use <- if (ncoly > 1) y[, ii] else y
       extra$percentile[ii] <- 100 * weighted.mean(y.use <= locat[, ii],
                                                  w[, ii])
@@ -2689,7 +2720,7 @@ alaplace1.control <- function(maxit = 100, ...) {
       max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
     stop("arguments 'kappa' and 'tau' do not match")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
@@ -2703,7 +2734,7 @@ alaplace1.control <- function(maxit = 100, ...) {
   ilocat <- ilocation
 
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -2745,7 +2776,9 @@ alaplace1.control <- function(maxit = 100, ...) {
     locatHmat1 <- if ( .intparloc ) onemat else diag(M)
     locatHmatk <- if ( .parallelLocation ) onemat else diag(M)
 
-      constraints <- cm.vgam(locatHmatk, x, .parallelLocation, constraints,
+      constraints <- cm.vgam(locatHmatk, x = x,
+                             bool = .parallelLocation, 
+                             constraints = constraints,
                              apply.int = FALSE)
 
       if (names(constraints)[1] == "(Intercept)") {
@@ -2799,7 +2832,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
         extra$M <- M <- max(length( .Scale.arg ),
                           ncoly,
-                          length( .kappa )) # Recycle
+                          length( .kappa ))  # Recycle
         extra$Scale <- rep( .Scale.arg, length = M)
         extra$kappa <- rep( .kappa, length = M)
         extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -2826,7 +2859,7 @@ alaplace1.control <- function(maxit = 100, ...) {
     locat.init <- matrix(0, n, M)
     if (!length(etastart)) {
 
-      for(jay in 1:M) {
+      for (jay in 1:M) {
         y.use <- if (ncoly > 1) y[, jay] else y
         if ( .imethod == 1) {
           locat.init[, jay] <- weighted.mean(y.use, w)
@@ -2883,7 +2916,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       misc$earg[[ii]] <- .elocat
     }
 
@@ -2895,7 +2928,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
     extra$percentile <- numeric(M)
     locat <- as.matrix(locat)
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       y.use <- if (ncoly > 1) y[, ii] else y
       extra$percentile[ii] =
         100 * weighted.mean(y.use <= locat[, ii], w)
@@ -2984,7 +3017,7 @@ alaplace3.control <- function(maxit = 100, ...) {
   lkappa <- attr(ekappa, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -3073,7 +3106,7 @@ alaplace3.control <- function(maxit = 100, ...) {
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa ) # a matrix
+    kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa )  # a matrix
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -3131,13 +3164,14 @@ alaplace3.control <- function(maxit = 100, ...) {
 
 
 
+
+
 dlaplace <- function(x, location = 0, scale = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-
   logdensity <- (-abs(x-location)/scale) - log(2*scale)
   if (log.arg) logdensity else exp(logdensity)
 }
@@ -3147,37 +3181,47 @@ plaplace <- function(q, location = 0, scale = 1) {
   if (!is.Numeric(scale, positive = TRUE)) 
     stop("argument 'scale' must be positive")
   zedd <- (q-location) / scale
+
   L <- max(length(q), length(location), length(scale))
-  q <- rep(q, length.out = L);
-  location <- rep(location, length.out = L);
-  scale <- rep(scale, length.out = L)
+  if (length(q)        != L) q        <- rep(q,        length.out = L)
+  if (length(location) != L) location <- rep(location, length.out = L)
+  if (length(scale)    != L) scale    <- rep(scale,    length.out = L)
 
-  ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
+  ifelse(q < location, 0.5 * exp(zedd), 1 - 0.5 * exp(-zedd))
 }
 
 
 qlaplace <- function(p, location = 0, scale = 1) {
   if (!is.Numeric(scale, positive = TRUE)) 
     stop("argument 'scale' must be positive")
+
   L <- max(length(p), length(location), length(scale))
-  p <- rep(p, length.out = L);
-  location <- rep(location, length.out = L);
-  scale <- rep(scale, length.out = L)
+  if (length(p)        != L) p        <- rep(p,        length.out = L)
+  if (length(location) != L) location <- rep(location, length.out = L)
+  if (length(scale)    != L) scale    <- rep(scale,    length.out = L)
 
-  location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
+  location - sign(p-0.5) * scale * log(2 * ifelse(p < 0.5, p, 1-p))
 }
 
 
 rlaplace <- function(n, location = 0, scale = 1) {
-  if (!is.Numeric(n, positive = TRUE,
-                  integer.valued = TRUE, allowable.length = 1))
-    stop("bad input for argument 'n'")
+
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+              stop("bad input for argument 'n'") else n
+
   if (!is.Numeric(scale, positive = TRUE))
     stop("'scale' must be positive")
-  location <- rep(location, length.out = n);
-  scale <- rep(scale, length.out = n)
-  r <- runif(n)
-  location - sign(r-0.5) * scale * log(2 * ifelse(r < 0.5, r, 1-r))
+
+  location <- rep(location, length.out = use.n)
+  scale    <- rep(scale,    length.out = use.n)
+  rrrr     <- runif(use.n)
+
+
+
+  location - sign(rrrr - 0.5) * scale *
+  (log(2) + ifelse(rrrr < 0.5, log(rrrr), log1p(-rrrr)))
 }
 
 
@@ -3196,7 +3240,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3302,7 +3346,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
             .elocat = elocat, .llocat = llocat ))),
   weight = eval(substitute(expression({
     d2l.dLocat2 <- d2l.dscale2 <- 1 / Scale^2
-    wz <- matrix(0, nrow = n, ncol = M) # diagonal
+    wz <- matrix(0, nrow = n, ncol = M)  # diagonal
     wz[,iam(1, 1, M)] <- d2l.dLocat2 * dLocat.deta^2
     wz[,iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
     c(w) * wz
@@ -3318,14 +3362,14 @@ fff.control <- function(save.weight = TRUE, ...) {
 
 
  fff <- function(link = "loge",
-                 idf1 = NULL, idf2 = NULL, nsimEIM = 100, # ncp = 0,
+                 idf1 = NULL, idf2 = NULL, nsimEIM = 100,  # ncp = 0,
                  imethod = 1, zero = NULL) {
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -3334,7 +3378,7 @@ fff.control <- function(save.weight = TRUE, ...) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10)
     stop("argument 'nsimEIM' should be an integer greater than 10")
@@ -3441,7 +3485,7 @@ fff.control <- function(save.weight = TRUE, ...) {
   weight = eval(substitute(expression({
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rf(n = n, df1=df1, df2=df2)
       dl.ddf1 <- 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
                 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
@@ -3628,9 +3672,9 @@ dbenini <- function(x, shape, y0, log = FALSE) {
 
 
   N <- max(length(x), length(shape), length(y0))
-  x <- rep(x, length.out = N);
-  shape <- rep(shape, length.out = N);
-  y0 <- rep(y0, length.out = N); 
+  if (length(x)        != N) x        <- rep(x,        length.out = N)
+  if (length(shape)    != N) shape    <- rep(shape,    length.out = N)
+  if (length(y0)       != N) y0       <- rep(y0,       length.out = N)
 
   logdensity <- rep(log(0), length.out = N)
   xok <- (x > y0)
@@ -3649,9 +3693,9 @@ pbenini <- function(q, shape, y0) {
   if (!is.Numeric(y0, positive = TRUE))
     stop("bad input for argument 'y0'")
   N <- max(length(q), length(shape), length(y0))
-  q <- rep(q, length.out = N);
-  shape <- rep(shape, length.out = N);
-  y0 <- rep(y0, length.out = N); 
+  if (length(q)        != N) q        <- rep(q,        length.out = N)
+  if (length(shape)    != N) shape    <- rep(shape,    length.out = N)
+  if (length(y0)       != N) y0       <- rep(y0,       length.out = N)
 
   ans <- y0 * 0
   ok <- q > y0
@@ -3688,7 +3732,7 @@ rbenini <- function(n, shape, y0) {
   lshape <- attr(eshape, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -3779,7 +3823,7 @@ rbenini <- function(n, shape, y0) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .eshape
     }
 
@@ -3884,6 +3928,9 @@ rpolono <- function(n, meanlog = 0, sdlog = 1) {
 
 
 
+
+
+
 dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -3891,10 +3938,10 @@ dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
 
 
   N <- max(length(x), length(theta), length(lower), length(upper))
-  x <- rep(x, length.out = N);
-  lower <- rep(lower, length.out = N);
-  upper <- rep(upper, length.out = N);
-  theta <- rep(theta, length.out = N)
+  if (length(x)     != N) x     <- rep(x,     length.out = N)
+  if (length(theta) != N) theta <- rep(theta, length.out = N)
+  if (length(lower) != N) lower <- rep(lower, length.out = N)
+  if (length(upper) != N) upper <- rep(upper, length.out = N)
 
   denom1 <- ((upper-lower)*(theta-lower))
   denom2 <- ((upper-lower)*(upper-theta))
@@ -3913,8 +3960,14 @@ dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
 
 
 rtriangle <- function(n, theta, lower = 0, upper = 1) {
-  if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
-    stop("bad input for argument 'n'")
+
+
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+              stop("bad input for argument 'n'") else n
+
+
   if (!is.Numeric(theta))
     stop("bad input for argument 'theta'")
   if (!is.Numeric(lower))
@@ -3924,9 +3977,9 @@ rtriangle <- function(n, theta, lower = 0, upper = 1) {
   if (!all(lower < theta & theta < upper))
     stop("lower < theta < upper values are required")
 
-  N <- n
-  lower <- rep(lower, length.out = N);
-  upper <- rep(upper, length.out = N);
+  N <- use.n
+  lower <- rep(lower, length.out = N)
+  upper <- rep(upper, length.out = N)
   theta <- rep(theta, length.out = N)
   t1 <- sqrt(runif(n))
   t2 <- sqrt(runif(n))
@@ -3949,10 +4002,10 @@ qtriangle <- function(p, theta, lower = 0, upper = 1) {
     stop("lower < theta < upper values are required")
 
   N <- max(length(p), length(theta), length(lower), length(upper))
-  p <- rep(p, length.out = N);
-  lower <- rep(lower, length.out = N);
-  upper <- rep(upper, length.out = N);
-  theta <- rep(theta, length.out = N)
+  if (length(p)     != N) p     <- rep(p,     length.out = N)
+  if (length(theta) != N) theta <- rep(theta, length.out = N)
+  if (length(lower) != N) lower <- rep(lower, length.out = N)
+  if (length(upper) != N) upper <- rep(upper, length.out = N)
 
   bad <- (p < 0) | (p > 1)
   if (any(bad))
@@ -3991,10 +4044,11 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
     stop("lower < theta < upper values are required")
 
   N <- max(length(q), length(theta), length(lower), length(upper))
-  q <- rep(q, length.out = N);
-  lower <- rep(lower, length.out = N);
-  upper <- rep(upper, length.out = N);
-  theta <- rep(theta, length.out = N)
+  if (length(q)     != N) q     <- rep(q,     length.out = N)
+  if (length(theta) != N) theta <- rep(theta, length.out = N)
+  if (length(lower) != N) lower <- rep(lower, length.out = N)
+  if (length(upper) != N) upper <- rep(upper, length.out = N)
+
   ans <- q * 0
 
   qstar <- (q - lower)^2 / ((upper-lower) * (theta-lower))
@@ -4011,9 +4065,20 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
 
 
 
- triangle <- function(lower = 0, upper = 1,
-                      link = elogit(min = lower, max = upper),
-                      itheta = NULL) {
+
+
+ triangle <-
+  function(lower = 0, upper = 1,
+           link = elogit(min = 0, max = 1),
+           itheta = NULL) {
+
+
+
+
+
+
+
+
   if (!is.Numeric(lower))
     stop("bad input for argument 'lower'")
   if (!is.Numeric(upper))
@@ -4025,11 +4090,20 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
     stop("bad input for 'itheta'")
 
 
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
+  if (length(earg$min) && any(earg$min != lower))
+    stop("argument 'lower' does not match the 'link'")
+  if (length(earg$max) && any(earg$max != upper))
+    stop("argument 'upper' does not match the 'link'")
+
+
+
   new("vglmff",
   blurb = c("Triangle distribution\n\n",
             "Link:    ",
@@ -4047,6 +4121,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
 
 
 
+
     extra$lower <- rep( .lower, length.out = n)
     extra$upper <- rep( .upper, length.out = n)
 
@@ -4070,11 +4145,10 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
     Theta <- eta2theta(eta, .link , earg = .earg )
     lower <- extra$lower
     upper <- extra$upper
-    mu <-  ((Theta^3 / 3 - lower * Theta^2 / 2 +
-          lower^3 / 6) / (Theta - lower) + 
-           ((Theta^3 / 3 - upper * Theta^2 / 2 +
-          upper^3 / 6) / (upper - Theta))) * 2  / (upper-lower)
-    mu
+
+    mu1<-  (lower + upper + Theta) / 3
+
+    mu1
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c(theta = .link )
@@ -4144,7 +4218,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
                      shrinkage.init = 0.95,
                      parallelLocation = FALSE, digt = 4,
                      dfmu.init = 3,
-                     rep0 = 0.5, # 0.0001,
+                     rep0 = 0.5,  # 0.0001,
                      minquantile = 0, maxquantile = Inf,
                      imethod = 1, zero = NULL) {
 
@@ -4154,7 +4228,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
     stop("bad input for argument 'maxquantile'")
 
 
-  if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(rep0, positive = TRUE, length.arg = 1) ||
       rep0 > 1)
     stop("bad input for argument 'rep0'")
   if (!is.Numeric(kappa, positive = TRUE))
@@ -4175,13 +4249,13 @@ loglaplace1.control <- function(maxit = 300, ...) {
   llocat.identity <- attr(elocat.identity, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
 
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -4218,13 +4292,14 @@ loglaplace1.control <- function(maxit = 300, ...) {
             "Links:      ", mystring0, "\n", "\n",
           "Quantiles:  ", mystring1),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallelLocation,
-                           constraints, apply.int = FALSE)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallelLocation ,
+                           constraints = constraints, apply.int = FALSE)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .parallelLocation = parallelLocation,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
-    extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
+    extra$M <- M <- max(length( .Scale.arg ), length( .kappa ))  # Recycle
     extra$Scale <- rep( .Scale.arg, length = M)
     extra$kappa <- rep( .kappa, length = M)
     extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -4333,7 +4408,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
 
     extra$percentile <- numeric(length(misc$kappa))
     locat.y <- as.matrix(locat.y)
-    for(ii in 1:length(misc$kappa))
+    for (ii in 1:length(misc$kappa))
       extra$percentile[ii] <- 100 * weighted.mean(y <= locat.y[, ii], w)
   }), list( .elocat = elocat, .llocat = llocat,
             .Scale.arg = Scale.arg, .fittedMean = fittedMean,
@@ -4348,7 +4423,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
 
     if ( .llocat == "loge")
       ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
-        w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logoff()
+        w.mat <- theta2eta(ymat, .llocat , earg = .elocat )  # e.g., logoff()
         if (residuals) {
           stop("loglikelihood residuals not implemented yet")
         } else {
@@ -4369,7 +4444,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
     kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
 
     ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
-    w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+    w.mat <- theta2eta(ymat, .llocat , earg = .elocat )  # e.g., logit()
     zedd <- abs(w.mat-locat.w) / Scale.w
     dl.dlocat <- ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
                    sqrt(2) * sign(w.mat-locat.w) / Scale.w
@@ -4416,11 +4491,11 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
  warning("it is best to use loglaplace1()")
 
   if (length(nsimEIM) &&
-     (!is.Numeric(nsimEIM, allowable.length = 1,
+     (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE) ||
       nsimEIM <= 10))
     stop("argument 'nsimEIM' should be an integer greater than 10")
-  if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(rep0, positive = TRUE, length.arg = 1) ||
       rep0 > 1)
     stop("bad input for argument 'rep0'")
   if (!is.Numeric(kappa, positive = TRUE))
@@ -4441,7 +4516,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
@@ -4449,7 +4524,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'iscale'")
 
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -4490,7 +4565,9 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
                   matrix(1, M/2, 1) else diag(M/2)
       mycmatrix <- cbind(rbind(  parelHmat, 0*parelHmat),
                         rbind(0*scaleHmat,   scaleHmat))
-      constraints <- cm.vgam(mycmatrix, x, .PARALLEL, constraints,
+      constraints <- cm.vgam(mycmatrix, x = x,
+                             bool = .PARALLEL ,
+                             constraints = constraints,
                              apply.int = FALSE)
       constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
 
@@ -4607,7 +4684,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
     misc$rep0 <- .rep0
         extra$percentile <- numeric(length(misc$kappa))
         locat <- as.matrix(locat.y)
-        for(ii in 1:length(misc$kappa))
+        for (ii in 1:length(misc$kappa))
           extra$percentile[ii] <- 100 *
                                  weighted.mean(y <= locat.y[, ii], w)
   }), list( .elocat = elocat, .llocat = llocat,
@@ -4644,8 +4721,8 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
     locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
     kappamat <- matrix(extra$kappa, n, M/2, byrow = TRUE)
     w.mat <- ymat
-    w.mat[w.mat <= 0] <- min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
-    w.mat <- theta2eta(w.mat, .llocat , earg = .elocat ) # w.mat=log(w.mat)
+    w.mat[w.mat <= 0] <- min(min(w.mat[w.mat > 0]), .rep0)  # Adjust for 0s
+    w.mat <- theta2eta(w.mat, .llocat , earg = .elocat )  # w.mat=log(w.mat)
     zedd <- abs(w.mat-locat.w) / Scale.w
     dl.dlocat <- sqrt(2) *
                    ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
@@ -4665,7 +4742,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     dthetas.detas <- cbind(dlocat.deta, dscale.deta)
     if (length( .nsimEIM )) {
-        for(ii in 1:( .nsimEIM )) {
+        for (ii in 1:( .nsimEIM )) {
             wsim <- matrix(rloglap(n*M/2, loc = c(locat.w),
                                   sca = c(Scale.w),
                                   kappa = c(kappamat)), n, M/2)
@@ -4732,7 +4809,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
         rep01 = 0.5,
         imethod = 1, zero = NULL) {
 
-  if (!is.Numeric(rep01, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(rep01, positive = TRUE, length.arg = 1) ||
       rep01 > 0.5)
     stop("bad input for argument 'rep01'")
   if (!is.Numeric(kappa, positive = TRUE))
@@ -4755,12 +4832,12 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -4794,13 +4871,14 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
             "Links:      ", mystring0, "\n", "\n",
           "Quantiles:  ", mystring1),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallelLocation,
-                           constraints, apply.int = FALSE)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallelLocation ,
+                           constraints = constraints, apply.int = FALSE)
     constraints <- cm.zero.vgam(constraints, x, .zero, M)
   }), list( .parallelLocation = parallelLocation,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
-    extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
+    extra$M <- M <- max(length( .Scale.arg ), length( .kappa ))  # Recycle
     extra$Scale <- rep( .Scale.arg, length = M)
     extra$kappa <- rep( .kappa, length = M)
     extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -4900,7 +4978,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
     extra$percentile <- numeric(length(misc$kappa))
     locat.y <- eta2theta(eta, .llocat , earg = .elocat )
     locat.y <- as.matrix(locat.y)
-    for(ii in 1:length(misc$kappa))
+    for (ii in 1:length(misc$kappa))
       extra$percentile[ii] <- 100 *
                              weighted.mean(y <= locat.y[, ii], w)
 
@@ -4915,7 +4993,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
     ymat <- matrix(y, extra$n, extra$M)
     ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
                                   rep01 = .rep01)
-    w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+    w.mat <- theta2eta(ymat, .llocat , earg = .elocat )  # e.g., logit()
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -4936,7 +5014,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
     kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
     ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
                                   rep01 = .rep01)
-    w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+    w.mat <- theta2eta(ymat, .llocat , earg = .elocat )  # e.g., logit()
     zedd <- abs(w.mat-locat.w) / Scale.w
     dl.dlocat <- ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
                    sqrt(2) * sign(w.mat-locat.w) / Scale.w
diff --git a/R/family.quantal.R b/R/family.quantal.R
index 5836683..704f147 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -19,12 +19,12 @@
  abbott <- function(link0 = "logit",
                     link1 = "logit",
                     iprob0 = NULL, iprob1 = NULL,
-                    fitted.type = c("observed", "treatment", "control"),
+                    type.fitted = c("observed", "treatment", "control"),
                     mux.offdiagonal = 0.98,
                     zero = 1) {
 
 
-  fitted.type <- match.arg(fitted.type,
+  type.fitted <- match.arg(type.fitted,
                            c("observed", "treatment", "control"),
                            several.ok = TRUE)
 
@@ -40,7 +40,7 @@
 
 
 
-  if (!is.Numeric(mux.offdiagonal, allowable.length = 1) ||
+  if (!is.Numeric(mux.offdiagonal, length.arg = 1) ||
       mux.offdiagonal >= 1 ||
       mux.offdiagonal <  0)
     stop("argument 'mux.offdiagonal' must be in the interval [0, 1)")
@@ -61,7 +61,7 @@
            ))),
 
   initialize = eval(substitute(expression({
-    eval(binomialff(link = .link0 )@initialize) # w, y, mustart are assigned
+    eval(binomialff(link = .link0 )@initialize)  # w, y, mustart are assigned
 
 
     predictors.names <-
@@ -109,10 +109,10 @@
                 "control"   = con.fv)
 
                    
-    ans[, .fitted.type , drop = FALSE]
+    ans[, .type.fitted , drop = FALSE]
   }, list( .link0 = link0, .earg0 = earg0,
            .link1 = link1, .earg1 = earg1,
-           .fitted.type = fitted.type ))),
+           .type.fitted = type.fitted ))),
 
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -147,14 +147,14 @@
     misc$earg <- list(prob0 = .earg0 , prob1 = .earg1 )
 
     misc$mux.offdiagonal <- .mux.offdiagonal
-    misc$fitted.type <- .fitted.type
-    misc$true.mu <- ( .fitted.type == "observed")
+    misc$type.fitted <- .type.fitted
+    misc$true.mu <- ( .type.fitted == "observed")
 
 
   }), list( .link0 = link0, .earg0 = earg0,
             .link1 = link1, .earg1 = earg1,
             .mux.offdiagonal = mux.offdiagonal,
-            .fitted.type = fitted.type
+            .type.fitted = type.fitted
           ))),
   vfamily = c("abbott", "vquantal"),
   deriv = eval(substitute(expression({
@@ -184,9 +184,9 @@
     ned2l.dmu2 <- 1 / (mymu * (1-mymu))
     ned2l.dprob02     <- ned2l.dmu2 * dmu.dprob0^2
     ned2l.dprob12     <- ned2l.dmu2 * dmu.dprob1^2
-    ned2l.dprob1prob2 <-              ( 1) # seems sort of ok but slow cvgc
-    ned2l.dprob1prob2 <-              ( 0) # kill it
-    ned2l.dprob1prob2 <- ned2l.dmu2 * ( 1) # dont seem to work
+    ned2l.dprob1prob2 <-              ( 1)  # seems sort of ok but slow cvgc
+    ned2l.dprob1prob2 <-              ( 0)  # kill it
+    ned2l.dprob1prob2 <- ned2l.dmu2 * ( 1)  # dont seem to work
 
     ned2l.dprob1prob2 <- ned2l.dmu2 * dmu.dprob1 * dmu.dprob0 *
                          .mux.offdiagonal
@@ -218,10 +218,10 @@
 
 
 if (FALSE)
- Abbott <- function(lprob1 = elogit(min = 0, max = 1), # For now, that is
+ Abbott <- function(lprob1 = elogit(min = 0, max = 1),  # For now, that is
                    lprob0 = "logit",
                    iprob0 = NULL, iprob1 = NULL,
-                   nointercept = 2, # NULL,
+                   nointercept = 2,  # NULL,
                    zero = 1) {
 
 
@@ -258,7 +258,7 @@ if (FALSE)
   initialize = eval(substitute(expression({
  print("here1")
 
-    eval(binomialff(link = .lprob1)@initialize) # w, y, mustart are assigned
+    eval(binomialff(link = .lprob1)@initialize)  # w, y, mustart are assigned
 
  print("here2")
  print("summary(mustart)")
@@ -388,19 +388,19 @@ abbott.EM.control <- function(maxit = 1000, ...) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(b1.arg, # allowable.length = 1,
+  if (!is.Numeric(b1.arg,  # length.arg = 1,
                   integer.valued = TRUE) ||
       b1.arg < 0)
     stop("argument 'b1.arg' must be a vector of non-negative integers")
 
 
-  if (!is.Numeric(b2.arg, # allowable.length = 1,
+  if (!is.Numeric(b2.arg,  # length.arg = 1,
                   integer.valued = TRUE) ||
       b2.arg < 0)
     stop("argument 'b2.arg' must be a vector of non-negative integers")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -496,7 +496,7 @@ abbott.EM.control <- function(maxit = 1000, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 53d0bb5..2ea2863 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -16,36 +16,58 @@
 
 
 
- rcim <- function(y,
-         family = poissonff,
-         Rank = 0,
-         Musual = NULL,
-         weights = NULL,
-         which.lp = 1,
-         Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
-         rprefix = "Row.",
-         cprefix = "Col.",
-         offset = 0,
-         szero = if (!Rank) NULL else
-                           { if (Musual == 1) 1 else
-                                  setdiff(1:(Musual*ncol(y)),
-                                          c( # 1:Musual,
-                                            1 + (1:ncol(y)) * Musual,
-                                            Index.corner))},
-         summary.arg = FALSE, h.step = 0.0001,
-         rbaseline = 1, cbaseline = 1,
-         ...) {
+ rcim <-
+  function(y,
+           family = poissonff,
+           Rank = 0,
+           Musual = NULL,
+           weights = NULL,
+           which.linpred = 1,
+           Index.corner = ifelse(is.null(str0), 0, max(str0)) + 1:Rank,
+           rprefix = "Row.",
+           cprefix = "Col.",
+           iprefix = "X2.",
+           offset = 0,
+
+           str0 = if (Rank) 1 else NULL,  # Ignored if Rank == 0
+           summary.arg = FALSE, h.step = 0.0001,
+           rbaseline = 1, cbaseline = 1,
+
+           has.intercept = TRUE,
+
+           M = NULL,
+
+           rindex = 2:nrow(y),  # Row index
+           cindex = 2:ncol(y),  # Col index
+           iindex = 2:nrow(y),  # Interaction index
+
+
+
+           ...) {
                            
 
 
 
 
+  rindex <- unique(sort(rindex))
+  cindex <- unique(sort(cindex))
+  iindex <- unique(sort(iindex))
+
+
+  if (Rank == 0 && !has.intercept)
+    warning("probably 'has.intercept == TRUE' is better for a rank-0 model")
+
+
+
+  ncoly <- ncol(y)
+
+
   noroweffects <- FALSE
   nocoleffects <- FALSE
 
-  if (!is.Numeric(which.lp, allowable.length = 1,
+  if (!is.Numeric(which.linpred, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'which.lp'")
+    stop("bad input for argument 'which.linpred'")
 
   if (!is.character(rprefix))
     stop("argument 'rprefix' must be character")
@@ -64,19 +86,30 @@
 
   if (!is.Numeric(Musual)) {
     iefamily <- efamily at infos
+
     if (is.function(iefamily))
       Musual <- (iefamily())$Musual
+      if (is.Numeric(Musual))
+        Musual <- abs(Musual)
   }
   if (!is.Numeric(Musual)) {
-    warning("cannot determine the value of 'Musual'.",
-            "Assuming the value one.")
+    if (!is.Numeric(M))
+      warning("cannot determine the value of 'Musual'.",
+              "Assuming the value one.")
     Musual <- 1
   }
 
 
+
+  M <- if (is.null(M)) Musual * ncol(y) else M
+
+  special <- (M > 1) && (Musual == 1)
+
+
+
   object.save <- y
   y <- if (is(y, "rrvglm")) {
-    object.save at y
+    depvar(object.save)
   } else {
     as(as.matrix(y), "matrix")
   }
@@ -85,23 +118,30 @@
          "a rrvglm() object")
 
 
-
-  eifun <- function(i, n) diag(n)[, i, drop = FALSE]
-
   .rcim.df <-
-    if (!noroweffects) data.frame("Row.2" = eifun(2, nrow(y))) else
-    if (!nocoleffects) data.frame("Col.2" = eifun(2, nrow(y))) else
+    if (!noroweffects) data.frame("Row.2" = I.col(2, nrow(y))) else  # See below
+    if (!nocoleffects) data.frame("Col.2" = I.col(2, nrow(y))) else  # See below
     stop("at least one of 'noroweffects' and 'nocoleffects' must be FALSE")
-  colnames( .rcim.df ) <- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
+
+
+  min.row.val <- rindex[1]  # == min(rindex) since it is sorted # Usually 2
+  min.col.val <- cindex[1]  # == min(cindex) since it is sorted # Usually 2
+  if (!noroweffects) {
+    colnames( .rcim.df ) <- paste(rprefix, as.character(min.row.val),  # "2",
+                                  sep = "")  # Overwrite "Row.2"
+  } else if (!nocoleffects) {
+    colnames( .rcim.df ) <- paste(cprefix, as.character(min.col.val),  # "2",
+                                  sep = "")  # Overwrite "Col.2"
+  }
 
 
 
   yn1 <- if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
-            paste("X2.", 1:nrow(y), sep = "")
+             paste(iprefix, 1:nrow(y), sep = "")
   warn.save <- options()$warn
-  options(warn = -3)    # Suppress the warnings (hopefully, temporarily)
+  options(warn = -3)  # Suppress the warnings (hopefully, temporarily)
   if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
-    yn1 <- paste("X2.", 1:nrow(y), sep = "")
+    yn1 <- paste(iprefix, 1:nrow(y), sep = "")
   options(warn = warn.save)
 
 
@@ -112,7 +152,11 @@
   assign(rprefix, factor(1:nrow(y)))
   modmat.row <- substitute(
            model.matrix( ~ .rprefix ), list( .rprefix = nrprefix ))
-  assign(cprefix, factor(1:ncol(y)))
+
+  LLL <- ifelse(special, M, ncol(y))
+  assign(cprefix, factor(1:LLL))
+
+
   modmat.col <- substitute(
            model.matrix( ~ .cprefix ), list( .cprefix = ncprefix ))
   modmat.row <- eval( modmat.row )
@@ -121,70 +165,80 @@
 
 
 
+  Hlist <-
+    if (has.intercept) {
+      list("(Intercept)" = matrix(1, LLL, 1))
+    } else {
+      temp <- list("Row.2" = matrix(1, LLL, 1))  # Overwrite this name:
+      names(temp) <- paste(rprefix, as.character(min.row.val), sep = "")
+      temp
+    }
 
 
-
-
-  Hlist <- list("(Intercept)" = matrix(1, ncol(y), 1))
-
   if (!noroweffects)
-    for(ii in 2:nrow(y)) {
-      Hlist[[paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
-
-
+    for (ii in rindex) {
+         Hlist[[paste(rprefix, ii, sep = "")]] <- matrix(1, LLL, 1)
       .rcim.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
     }
 
 
   if (!nocoleffects)
-    for(ii in 2:ncol(y)) {
-
-
-      Hlist[[   paste(cprefix, ii, sep = "")]] <- modmat.col[, ii, drop = FALSE]
+    for (ii in cindex) {
+      temp6.mat <- modmat.col[, ii, drop = FALSE]
+         Hlist[[paste(cprefix, ii, sep = "")]] <- temp6.mat
       .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
     }
 
+
   if (Rank > 0) {
-    for(ii in 2:nrow(y)) {
-      Hlist[[yn1[ii]]] <- diag(ncol(y))
-      .rcim.df[[yn1[ii]]] <- eifun(ii, nrow(y))
+    for (ii in iindex) {
+
+      Hlist[[yn1[ii]]] <- diag(LLL)
+
+      .rcim.df[[yn1[ii]]] <- I.col(ii, nrow(y))
     }
   }
 
 
   dimnames(.rcim.df) <- list(if (length(dimnames(y)[[1]]))
-                             dimnames(y)[[1]] else
-                             as.character(1:nrow(y)),
-                             dimnames(.rcim.df)[[2]])
+                               dimnames(y)[[1]] else
+                               as.character(iindex),
+                             dimnames( .rcim.df )[[2]])
 
-  str1 <- paste("~ ", rprefix, "2", sep = "")
+  str1 <- paste(if (has.intercept) "~ 1 + " else "~ -1 + ", rprefix,
+                as.character(min.row.val),  # "2",
+                sep = "")
+  
 
   if (nrow(y) > 2) 
-    for(ii in 3:nrow(y)) {
-      str1 <- paste(str1, paste(rprefix, ii, sep = ""), sep = " + ")
-    }
+    str1 <- paste(str1,
+                  paste(rprefix, rindex[-1], sep = "", collapse = " + "),
+                  sep = " + ")
 
 
+    str1 <- paste(str1,
+                  paste(cprefix, cindex, sep = "", collapse = " + "),
+                  sep = " + ")
 
-  for(ii in 2:ncol(y)) {
-    str1 <- paste(str1, paste(cprefix, ii, sep = ""), sep = " + ")
-  }
 
 
-  str2 <- paste("y ", str1)
+
+  str2 <- paste("y", str1)
   if (Rank > 0) {
-    for(ii in 2:nrow(y))
-      str2 <- paste(str2, yn1[ii], sep = " + ")
+    str2 <- paste(str2,
+                  paste(yn1[iindex], sep = "", collapse = " + "),
+                  sep = " + ")
   }
 
 
-  controlfun <- if (Rank == 0) rrvglm.control else rrvglm.control
+
+
   controlfun <- if (Rank == 0)   vglm.control else rrvglm.control  # orig.
 
 
   mycontrol <- controlfun(Rank = Rank,
                           Index.corner = Index.corner,
-                          szero = szero, ...)
+                          str0 = str0, ...)
 
   if (mycontrol$trace) {
   }
@@ -212,34 +266,31 @@
 
   if (Musual > 1) {
     orig.Hlist <- Hlist
-    kmat1 <- rbind(1, 0)
-    kmat0 <- rbind(0, 1)
 
     kmat1 <- matrix(0, nrow = Musual, ncol = 1)
-    kmat1[which.lp, 1] <- 1
-    kmat0 <- matrix(1, nrow = Musual, ncol = 1)
-    kmat0[which.lp, 1] <- 0
+    kmat1[which.linpred, 1] <- 1
+    kmat0 <- (diag(Musual))[, -which.linpred, drop = FALSE]
 
     for (ii in 1:length(Hlist)) {
-      Hlist[[ii]] <- kronecker(Hlist[[ii]],
-                               kmat1)
+      Hlist[[ii]] <- kronecker(Hlist[[ii]], kmat1)
     }
-    Hlist[["(Intercept)"]] <-
-      cbind(Hlist[["(Intercept)"]],
-            kronecker(matrix(1, nrow(orig.Hlist[[1]]), 1),
-                      kmat0))
-
+    if (has.intercept)
+      Hlist[["(Intercept)"]] <- cbind(Hlist[["(Intercept)"]],
+                                      kronecker(matrix(1, ncoly, 1),
+                                                kmat0))
 
 
     if (mycontrol$trace) {
     }
-
   }
 
 
 
-  offset.matrix <- matrix(offset, nrow = nrow(y),
-                                 ncol = ncol(y) * Musual) # byrow = TRUE
+  offset.matrix <-
+    matrix(offset, nrow = nrow(y),
+                   ncol = M)  # byrow = TRUE
+
+
 
   answer <- if (Rank > 0) {
     if (is(object.save, "rrvglm")) object.save else
@@ -250,7 +301,7 @@
              weights = if (length(weights))
                        weights else rep(1, length = nrow(y)),
              ...,
-             control = mycontrol, data = .rcim.df)
+             control = mycontrol, data = .rcim.df )
   } else {
     if (is(object.save, "vglm")) object.save else
         vglm(as.formula(str2),
@@ -260,10 +311,10 @@
              weights = if (length(weights))
                        weights else rep(1, length = nrow(y)),
              ...,
-             control = mycontrol, data = .rcim.df)
+             control = mycontrol, data = .rcim.df )
   }
 
-  options(warn = warn.save)
+  options(warn = warn.save)  # Restore warnings back to prior state
 
 
   answer <- if (summary.arg) {
@@ -273,14 +324,14 @@
       summary(answer)
     }
   } else {
-    as(answer, ifelse(Rank > 0, "rcim", "rcim0"))
+    as(answer, ifelse(Rank > 0, "rcim",  "rcim0"))
   }
 
 
-  answer at misc$rbaseline <- rbaseline
-  answer at misc$cbaseline <- cbaseline
-  answer at misc$which.lp  <- which.lp
-  answer at misc$offset    <- offset.matrix
+  answer at misc$rbaseline     <- rbaseline
+  answer at misc$cbaseline     <- cbaseline
+  answer at misc$which.linpred <- which.linpred
+  answer at misc$offset        <- offset.matrix
 
   answer
 }
@@ -293,7 +344,7 @@
 
 
 summaryrcim <- function(object, ...) {
-    rcim(object, summary.arg = TRUE, ...)
+  rcim(depvar(object), summary.arg = TRUE, ...)
 }
 
 
@@ -394,19 +445,20 @@ setMethod("summary", "rcim",
 
 
  plotrcim0  <- function(object,
-     centered = TRUE, whichplots = c(1, 2),
+     centered = TRUE, which.plots = c(1, 2),
      hline0 = TRUE, hlty = "dashed", hcol = par()$col, hlwd = par()$lwd,
      rfirst = 1, cfirst = 1,
      rtype = "h", ctype = "h",
-     rcex.lab = 1, rcex.axis = 1, # rlabels = FALSE,
+     rcex.lab = 1, rcex.axis = 1,  # rlabels = FALSE,
      rtick = FALSE,
-     ccex.lab = 1, ccex.axis = 1, # clabels = FALSE,
+     ccex.lab = 1, ccex.axis = 1,  # clabels = FALSE,
      ctick = FALSE,
      rmain = "Row effects", rsub = "",
      rxlab = "", rylab = "Row effects",
      cmain = "Column effects", csub = "",
      cxlab = "", cylab = "Column effects",
      rcol = par()$col, ccol = par()$col,
+     no.warning = FALSE,
      ...) {
 
  
@@ -418,11 +470,13 @@ setMethod("summary", "rcim",
 
 
 
-  if (is.numeric(object at control$Rank) && object at control$Rank != 0)
+  if (!no.warning &&
+      is.numeric(object at control$Rank) &&
+      object at control$Rank != 0)
     warning("argument 'object' is not Rank-0")
 
 
-  n_lm  <- nrow(object at y)
+  n.lm  <- nrow(object at y)
 
   cobj <- coefficients(object)
 
@@ -430,8 +484,8 @@ setMethod("summary", "rcim",
                    object at control$Rank == 0) length(cobj) else
                length(object at control$colx1.index)
 
-  orig.roweff <- c("Row.1" = 0, cobj[(nparff + 1) : (nparff + n_lm - 1)])
-  orig.coleff <- c("Col.1" = 0, cobj[(nparff + n_lm) : upperbound])
+  orig.roweff <- c("Row.1" = 0, cobj[(nparff + 1) : (nparff + n.lm - 1)])
+  orig.coleff <- c("Col.1" = 0, cobj[(nparff + n.lm) : upperbound])
   last.r <- length(orig.roweff)
   last.c <- length(orig.coleff)
 
@@ -462,7 +516,7 @@ setMethod("summary", "rcim",
                           if (cfirst > 1) 1:(cfirst-1) else NULL)]
 
 
-  if (any(whichplots == 1, na.rm = TRUE)) {
+  if (any(which.plots == 1, na.rm = TRUE)) {
     plot(roweff, type = rtype, 
          axes = FALSE, col = rcol, main = rmain,
          sub  = rsub, xlab = rxlab, ylab = rylab, ...)
@@ -478,9 +532,9 @@ setMethod("summary", "rcim",
   }
 
 
-  if (any(whichplots == 2, na.rm = TRUE)) {
+  if (any(which.plots == 2, na.rm = TRUE)) {
     plot(coleff, type = ctype, 
-         axes = FALSE, col = ccol, main = cmain, # lwd = 2, xpd = FALSE,
+         axes = FALSE, col = ccol, main = cmain,  # lwd = 2, xpd = FALSE,
          sub  = csub, xlab = cxlab, ylab = cylab, ...)
 
     axis(1, at = 1:length(caxisl),
@@ -529,7 +583,11 @@ setMethod("plot", "rcim",
 
 
 
-moffset <- function(mat, roffset = 0, coffset = 0, postfix = "") {
+ moffset <-
+  function(mat, roffset = 0, coffset = 0, postfix = "",
+           rprefix = "Row.",
+           cprefix = "Col."
+          ) {
 
 
 
@@ -557,9 +615,9 @@ moffset <- function(mat, roffset = 0, coffset = 0, postfix = "") {
                            "column names of the response")
 
   if (!is.Numeric(ind1, positive = TRUE,
-                  integer.valued = TRUE, allowable.length = 1) ||
+                  integer.valued = TRUE, length.arg = 1) ||
       !is.Numeric(ind2, positive = TRUE,
-                  integer.valued = TRUE, allowable.length = 1))
+                  integer.valued = TRUE, length.arg = 1))
     stop("bad input for arguments 'roffset' and/or 'coffset'")
   if (ind1 > nrow(mat))
     stop("too large a value for argument 'roffset'")
@@ -575,11 +633,11 @@ moffset <- function(mat, roffset = 0, coffset = 0, postfix = "") {
 
   rownames.mat <- rownames(mat)
   if (length(rownames.mat) != nrow(mat))
-    rownames.mat <- paste("Row.", 1:nrow(mat), sep = "")
+    rownames.mat <- paste(rprefix, 1:nrow(mat), sep = "")
 
   colnames.mat <- colnames(mat)
   if (length(colnames.mat) != ncol(mat))
-    colnames.mat <- paste("Col.", 1:ncol(mat), sep = "")
+    colnames.mat <- paste(cprefix, 1:ncol(mat), sep = "")
 
 
   newrn <- if (roffset > 0)
@@ -637,11 +695,11 @@ Confint.rrnb <- function(rrnb2, level = 0.95) {
   delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat)
   delta2.hat <- 2 - a21.hat
 
-  SE.a21.hat <- sqrt(vcovrrvglm(rrnb2)["I(lv.mat)", "I(lv.mat)"])
+  SE.a21.hat <- sqrt(vcovrrvglm(rrnb2)["I(latvar.mat)", "I(latvar.mat)"])
 
 
   ci.a21 <- a21.hat +  c(-1, 1) * qnorm(1 - (1 - level)/2) * SE.a21.hat
-  (ci.delta2 <- 2 - rev(ci.a21)) # e.g., the 95 percent CI
+  (ci.delta2 <- 2 - rev(ci.a21))  # e.g., the 95 percent CI
 
   list(a21.hat    = a21.hat,
        beta11.hat = beta11.hat,
@@ -706,8 +764,9 @@ Confint.nb1 <- function(nb1, level = 0.95) {
 
 
 
-plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
-                    se.eachway = c(5, 5), # == c(LHS, RHS),
+
+plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
+                    se.eachway = c(5, 5),  # == c(LHS, RHS),
                     trace.arg = TRUE, ...) {
 
 
@@ -725,13 +784,13 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
 
   loglik.orig <- logLik(rrvglm2)
-  temp1 <- Confint.rrnb(rrvglm2) # zz
+  temp1 <- Confint.rrnb(rrvglm2)  # zz
 
   a21.hat <- (Coef(rrvglm2)@A)[2, 1]
   SE.a21.hat <- temp1$SE.a21.hat
 
 
-  SE.a21.hat <- sqrt(vcov(rrvglm2)["I(lv.mat)", "I(lv.mat)"])
+  SE.a21.hat <- sqrt(vcov(rrvglm2)["I(latvar.mat)", "I(latvar.mat)"])
 
 
   big.ci.a21 <- a21.hat +  c(-1, 1) * se.eachway * SE.a21.hat
@@ -744,7 +803,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
   a21.matrix <- if (alreadyComputed) rrvglm2 at post$a21.matrix else
                 cbind(a21 = seq.a21, loglikelihood = 0)
-  prev.etastart <- predict(rrvglm2) # Halves the computing time
+  prev.etastart <- predict(rrvglm2)  # Halves the computing time
   funname <- "vglm"
   listcall <- as.list(rrvglm2 at call)
 
@@ -752,7 +811,6 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
   if (!alreadyComputed)
   for (ii in 1:nseq.a21) {
     if (trace.arg)
-      print(ii)
 
        argslist <- vector("list", length(listcall) - 1)
        for (kay in 2:(length(listcall)))
@@ -779,10 +837,10 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
 
 
-  if (plot.it) {
+  if (show.plot) {
     plot(a21.matrix[ ,1], a21.matrix[ ,2], type = "l",
             col = "blue",
-            xlab = expression(a[21]), ylab = "Log-likelihood") # ...
+            xlab = expression(a[21]), ylab = "Log-likelihood")  # ...
 
     abline(v = (Hlist.orig[[length(Hlist.orig)]])[2, 1],
            col = "darkorange", lty = "dashed")
@@ -796,7 +854,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
     abline(v = a21.hat +  c(-1, 1) * 1.96 * SE.a21.hat,
            col = "gray50", lty = "dashed", lwd = 2.0)
 
-  } # End of (plot.it)
+  }  # End of (show.plot)
 
   rrvglm2 at post <- list(a21.matrix = a21.matrix)
   invisible(rrvglm2)
@@ -817,7 +875,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
  Qvar <- function(object,
                   factorname = NULL,
-                  which.eta = 1,
+                  which.linpred = 1,
                   coef.indices = NULL,
                   labels = NULL, dispersion = NULL,
                   reference.name = "(reference)",
@@ -831,9 +889,9 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
 
 
-  if (!is.Numeric(which.eta, allowable.length = 1, integer.valued = TRUE,
-                  positive = TRUE))
-    stop("argument 'which.eta' must be a positive integer")
+  if (!is.Numeric(which.linpred, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE))
+    stop("argument 'which.linpred' must be a positive integer")
 
 
 
@@ -850,8 +908,9 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
     if (is.null(coef.indices)) {
 
       M <- npred(model)
-      if (M < which.eta)
-        stop("argument 'which.eta' must be a value from the set 1:", M)
+      if (M < which.linpred)
+        stop("argument 'which.linpred' must be a value from the set 1:",
+             M)
 
 
       newfactorname <- if (M > 1) {
@@ -859,7 +918,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
         Hk <- clist[[factorname]]
         Mdot <- ncol(Hk)
-        Hk.row <- Hk[which.eta, ]
+        Hk.row <- Hk[which.linpred, ]
         if (sum(Hk.row != 0) > 1)
           stop("cannot handle rows of constraint matrices with more ",
                "than one nonzero value")
@@ -869,7 +928,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
                  paste(ii, "th", sep = ""))
         if (sum(Hk.row != 0) == 0)
           stop("factor '", factorname, "' is not used the ",
-               foo(which.eta), " eta (linear predictor)")
+               foo(which.linpred), " eta (linear predictor)")
 
         row.index <- (1:Mdot)[Hk.row != 0]
 
@@ -952,7 +1011,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
     return(Recall(covmat,
                   factorname = factorname,
-                  which.eta = which.eta,
+                  which.linpred = which.linpred,
                   coef.indices = coef.indices.saved,
                   labels = labels,
                   dispersion = dispersion,
@@ -993,7 +1052,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
   attr(logAllvcov, "coef.indices")  <- coef.indices
   attr(logAllvcov, "factorname")    <- factorname
   attr(logAllvcov, "regularVar")    <- diag(covmat)
-  attr(logAllvcov, "which.eta")     <- which.eta
+  attr(logAllvcov, "which.linpred") <- which.linpred
 
   logAllvcov
 }
@@ -1004,6 +1063,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
 
 
 
+
 WorstErrors <- function(qv.object) {
   stop("20110729; does not work")
 
@@ -1057,14 +1117,15 @@ Print.qv <- function(x, ...) {
 
 
 
+
 summary.qvar <- function(object, ...) {
 
 
   relerrs <- 1 - sqrt(exp(residuals(object, type = "response")))
   diag(relerrs) <- NA
 
-    minErrSimple <- round(100 * min(relerrs, na.rm = TRUE), 1)
-    maxErrSimple <- round(100 * max(relerrs, na.rm = TRUE), 1)
+  minErrSimple <- round(100 * min(relerrs, na.rm = TRUE), 1)
+  maxErrSimple <- round(100 * max(relerrs, na.rm = TRUE), 1)
 
 
 
@@ -1105,26 +1166,26 @@ print.summary.qvar <- function(x, ...) {
   x$object <- NULL
 
 
-    if (length(cl <- object at call)) {
-        cat("Call:\n")
-        dput(cl)
-    }
+  if (length(cl <- object at call)) {
+      cat("Call:\n")
+      dput(cl)
+  }
 
 
-    facname <- c(object at extra$attributes.y$factorname)
-    if (length(facname))
-      cat("Factor name: ", facname, "\n")
+  facname <- c(object at extra$attributes.y$factorname)
+  if (length(facname))
+    cat("Factor name: ", facname, "\n")
 
 
-    if (length(object at dispersion))
-        cat("\nDispersion: ", object at dispersion, "\n\n")
+  if (length(object at dispersion))
+    cat("\nDispersion: ", object at dispersion, "\n\n")
 
   x <- as.data.frame(c(x))
   print.data.frame(x)
 
 
-        cat("\nWorst relative errors in SEs of simple contrasts (%): ",
-            minErrSimple, ", ", maxErrSimple, "\n")
+    cat("\nWorst relative errors in SEs of simple contrasts (%): ",
+        minErrSimple, ", ", maxErrSimple, "\n")
 
   invisible(x)
 }
@@ -1132,18 +1193,46 @@ print.summary.qvar <- function(x, ...) {
 
 
 
+qvar <- function(object, se = FALSE, ...) {
+
+
+
+  if (!inherits(object, "rcim") && !inherits(object, "rcim0"))
+    warning("argument 'object' should be an 'rcim' or 'rcim0' object. ",
+            "This call may fail.")
+
+  if (!(object at family@vfamily %in% c("uninormal", "normal1")))
+    warning("argument 'object' does not seem to have used ",
+            "a 'uninormal' family.")
+
+  if (!any(object at misc$link == "explink"))
+    warning("argument 'object' does not seem to have used ",
+            "a 'explink' link function.")
+
+  quasiVar <- diag(predict(object)[, c(TRUE, FALSE)]) / 2
+  if (se) sqrt(quasiVar) else quasiVar
+}
+
+
+
+
+
+
+
+
 plotqvar <- function(object,
-                     intervalWidth = 2,
+                     interval.width = 2,
                      ylab = "Estimate",
-                     xlab = NULL, # x$factorname,
+                     xlab = NULL,  # x$factorname,
                      ylim = NULL,
                      main = "",
-                     levelNames = NULL,
+                     level.names = NULL,
                      conf.level = 0.95,
                      warn.ratio = 10,
                      border = "transparent",  # None
                      points.arg = TRUE,
                      length.arrows = 0.25, angle = 30,
+                     lwd = par()$lwd,
                      scol = par()$col,
                      slwd = par()$lwd,
                      slty = par()$lty,
@@ -1152,25 +1241,29 @@ plotqvar <- function(object,
 
 
 
-    if (!is.numeric(intervalWidth) &&
-        !is.numeric(conf.level))
-      stop("at least one of arguments 'intervalWidth' and 'conf.level' ",
-            "should be numeric")
+  if (!is.numeric(interval.width) &&
+      !is.numeric(conf.level))
+    stop("at least one of arguments 'interval.width' and 'conf.level' ",
+          "should be numeric")
 
 
 
 
 
-  if (!any("normal1" %in% object at family@vfamily))
+  if (!any("uninormal" %in% object at family@vfamily))
     stop("argument 'object' dos not appear to be a ",
-         "rcim(, normal1) object")
+         "rcim(, uninormal) object")
 
   estimates <- c(object at extra$attributes.y$estimates)
   if (!length(names(estimates)) &&
       is.matrix(object at extra$attributes.y$estimates))
-    names( estimates) <- rownames(object at extra$attributes.y$estimates)
-  if (!length(names(estimates)))
-    names( estimates) <- paste("Level", 1:length(estimates),
+    names(estimates) <- rownames(object at extra$attributes.y$estimates)
+
+
+  if (length(level.names) == length(estimates)) {
+    names(estimates) <- level.names
+  } else if (!length(names(estimates)))
+    names(estimates) <- paste("Level", 1:length(estimates),
                               sep = "")
 
 
@@ -1179,88 +1272,89 @@ plotqvar <- function(object,
   QuasiVar <- exp(diag(fitted(object))) / 2
   QuasiSE  <- sqrt(QuasiVar)
 
-    if (!is.numeric(estimates))
-      stop("Cannot plot, because there are no 'proper' ",
-            "parameter estimates")
-    if (!is.numeric(QuasiSE))
-      stop("Cannot plot, because there are no ",
-           "quasi standard errors")
+  if (!is.numeric(estimates))
+    stop("Cannot plot, because there are no 'proper' ",
+          "parameter estimates")
+  if (!is.numeric(QuasiSE))
+    stop("Cannot plot, because there are no ",
+         "quasi standard errors")
 
 
 
-    faclevels <- factor(names(estimates), levels = names(estimates))
+  faclevels <- factor(names(estimates), levels = names(estimates))
 
 
-    xvalues <- seq(along = faclevels)
-    tops  <- estimates + intervalWidth * QuasiSE
-    tails <- estimates - intervalWidth * QuasiSE
+  xvalues <- seq(along = faclevels)
+  tops  <- estimates + interval.width * QuasiSE
+  tails <- estimates - interval.width * QuasiSE
 
 
 
 
-    if (is.numeric(conf.level)) {
-      zedd <- abs(qnorm((1 - conf.level) / 2))
-      lsd.tops  <- estimates + zedd * QuasiSE / sqrt(2)
-      lsd.tails <- estimates - zedd * QuasiSE / sqrt(2)
-      if (max(QuasiSE) / min(QuasiSE) > warn.ratio)
-        warning("Quasi SEs appear to be quite different... the ",
-                "LSD intervals may not be very accurate")
-    } else {
-      lsd.tops  <- NULL
-      lsd.tails <- NULL
-    }
+  if (is.numeric(conf.level)) {
+    zedd <- abs(qnorm((1 - conf.level) / 2))
+    lsd.tops  <- estimates + zedd * QuasiSE / sqrt(2)
+    lsd.tails <- estimates - zedd * QuasiSE / sqrt(2)
+    if (max(QuasiSE) / min(QuasiSE) > warn.ratio)
+      warning("Quasi SEs appear to be quite different... the ",
+              "LSD intervals may not be very accurate")
+  } else {
+    lsd.tops  <- NULL
+    lsd.tails <- NULL
+  }
 
 
 
 
-    if (is.null(ylim))
-      ylim <- range(c(tails, tops, lsd.tails, lsd.tops),
-                    na.rm = TRUE)
+  if (is.null(ylim))
+    ylim <- range(c(tails, tops, lsd.tails, lsd.tops),
+                  na.rm = TRUE)
 
-    if (is.null(xlab))
-      xlab <- "Factor level"
+  if (is.null(xlab))
+    xlab <- "Factor level"
 
-    plot(faclevels, estimates,
-         border = border,
-         ylim = ylim, xlab = xlab, ylab = ylab,
-         main = main, ...)
+  plot(faclevels, estimates,
+       border = border,
+       ylim = ylim, xlab = xlab, ylab = ylab,
+       lwd = lwd,
+       main = main, ...)
 
 
-    if (points.arg)
-      points(estimates, ...)
+  if (points.arg)
+    points(estimates, ...)
 
 
-    if (is.numeric(intervalWidth)) {
-      segments(xvalues, tails, xvalues, tops,
-               col = scol, lty = slty, lwd = slwd)
-    }
+  if (is.numeric(interval.width)) {
+    segments(xvalues, tails, xvalues, tops,
+             col = scol, lty = slty, lwd = slwd)
+  }
 
 
-    if (is.numeric(conf.level)) {
-      arrows(xvalues, lsd.tails, xvalues, lsd.tops,
-             col = scol, lty = slty, lwd = slwd, code = 3,
-             length = length.arrows, angle = angle)
+  if (is.numeric(conf.level)) {
+    arrows(xvalues, lsd.tails, xvalues, lsd.tops,
+           col = scol, lty = slty, lwd = slwd, code = 3,
+           length = length.arrows, angle = angle)
 
-    }
+  }
 
 
 
 
   if (any(slotNames(object) == "post")) {
-    object at post$estimates  = estimates 
-    object at post$xvalues    = xvalues  
-    if (is.numeric(intervalWidth)) {
-      object at post$tails = tails
-      object at post$tops  = tops
+    object at post$estimates  <- estimates 
+    object at post$xvalues    <- xvalues  
+    if (is.numeric(interval.width)) {
+      object at post$tails <- tails
+      object at post$tops  <- tops
     }
     if (is.numeric(conf.level)) {
-      object at post$lsd.tails = lsd.tails
-      object at post$lsd.tops  = lsd.tops
+      object at post$lsd.tails <- lsd.tails
+      object at post$lsd.tops  <- lsd.tops
     }
   }
 
 
-    invisible(object)
+  invisible(object)
 }
 
 
@@ -1272,3 +1366,7 @@ plotqvar <- function(object,
 
 
 
+
+
+
+
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index c3269bf..e648772 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -9,291 +9,311 @@
 
 rcqo <- function(n, p, S,
                  Rank = 1,
-                 family = c("poisson", "negbinomial", "binomial-poisson", 
+                 family = c("poisson", "negbinomial", "binomial-poisson",
                             "Binomial-negbinomial", "ordinal-poisson",
-                            "Ordinal-negbinomial","gamma2"),
-                 EqualMaxima = FALSE,
-                 EqualTolerances = TRUE,
-                 ESOptima = FALSE,
-                 loabundance = if (EqualMaxima) hiabundance else 10,
-                 hiabundance = 100,
-                 sdlv = head(1.5/2^(0:3), Rank),
-                 sdOptima = ifelse(ESOptima, 1.5/Rank, 1) *
-                            ifelse(scalelv, sdlv, 1),
-                 sdTolerances = 0.25,
+                            "Ordinal-negbinomial", "gamma2"),
+                 eq.maxima = FALSE,
+                 eq.tolerances = TRUE,
+                 es.optima = FALSE,
+                 lo.abundance = if (eq.maxima) hi.abundance else 10,
+                 hi.abundance = 100,
+                 sd.latvar = head(1.5/2^(0:3), Rank),
+                 sd.optima = ifelse(es.optima, 1.5/Rank, 1) *
+                            ifelse(scale.latvar, sd.latvar, 1),
+                 sd.tolerances = 0.25,
                  Kvector = 1,
                  Shape = 1,
-                 sqrt = FALSE,
+                 sqrt.arg = FALSE,
                  Log = FALSE,
                  rhox = 0.5,
-                 breaks = 4, # ignored unless family="ordinal"
+                 breaks = 4,  # ignored unless family = "ordinal"
                  seed = NULL,
+                 optima1.arg = NULL,
                  Crow1positive = TRUE,
-                 xmat = NULL, # Can be input
-                 scalelv = TRUE 
-                 ) {
-    family = match.arg(family, c("poisson","negbinomial", "binomial-poisson",
-         "Binomial-negbinomial", "ordinal-poisson",
-         "Ordinal-negbinomial","gamma2"))[1]
-
-    if (!is.Numeric(n, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1))
-      stop("bad input for argument 'n'")
-    if (!is.Numeric(p, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1) ||
-        p < 1 + Rank)
-      stop("bad input for argument 'p'")
-    if (!is.Numeric(S, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1))
-      stop("bad input for argument 'S'")
-    if (!is.Numeric(Rank, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1) ||
-        Rank > 4)
-      stop("bad input for argument 'Rank'")
-    if (!is.Numeric(Kvector, positive = TRUE))
-      stop("bad input for argument 'Kvector'")
-    if (!is.Numeric(rhox) || abs(rhox) >= 1)
-      stop("bad input for argument 'rhox'")
-    if (length(seed) &&
-        !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'seed'")
-    if (!is.logical(EqualTolerances) ||
-        length(EqualTolerances) > 1)
-      stop("bad input for argument 'EqualTolerances)'")
-    if (!is.logical(sqrt) || length(sqrt)>1)
-      stop("bad input for argument 'sqrt)'")
-    if (family != "negbinomial" && sqrt)
-        warning("argument 'sqrt' is used only with family='negbinomial'")
-    if (!EqualTolerances && !is.Numeric(sdTolerances, positive = TRUE))
-        stop("bad input for argument 'sdTolerances'")
-    if (!is.Numeric(loabundance, positive = TRUE))
-        stop("bad input for argument 'loabundance'")
-    if (!is.Numeric(sdlv, positive = TRUE))
-        stop("bad input for argument 'sdlv'")
-    if (!is.Numeric(sdOptima, positive = TRUE))
-        stop("bad input for argument 'sdOptima'")
-    if (EqualMaxima && loabundance != hiabundance)
-        stop("arguments 'loabundance' and 'hiabundance' must ",
-                   "be equal when 'EqualTolerances = TRUE'")
-    if (any(loabundance > hiabundance))
-        stop("loabundance > hiabundance is not allowed")
-    if (!is.logical(Crow1positive)) {
-        stop("bad input for argument 'Crow1positive)'")
-    } else {
-        Crow1positive <- rep(Crow1positive, len=Rank)
+                 xmat = NULL,  # Can be input
+                 scale.latvar = TRUE) {
+  family <- match.arg(family,
+                      c("poisson", "negbinomial", "binomial-poisson",
+                        "Binomial-negbinomial", "ordinal-poisson",
+                        "Ordinal-negbinomial", "gamma2"))[1]
+
+  if (!is.Numeric(n, integer.valued = TRUE,
+                  positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'n'")
+  if (!is.Numeric(p, integer.valued = TRUE,
+                  positive = TRUE, length.arg = 1) ||
+      p < 1 + Rank)
+    stop("bad input for argument 'p'")
+  if (!is.Numeric(S, integer.valued = TRUE,
+                  positive = TRUE, length.arg = 1))
+    stop("bad input for argument 'S'")
+  if (!is.Numeric(Rank, integer.valued = TRUE,
+                  positive = TRUE, length.arg = 1) ||
+      Rank > 4)
+    stop("bad input for argument 'Rank'")
+  if (!is.Numeric(Kvector, positive = TRUE))
+    stop("bad input for argument 'Kvector'")
+  if (!is.Numeric(rhox) || abs(rhox) >= 1)
+    stop("bad input for argument 'rhox'")
+  if (length(seed) &&
+      !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'seed'")
+  if (!is.logical(eq.tolerances) ||
+      length(eq.tolerances) > 1)
+    stop("bad input for argument 'eq.tolerances)'")
+  if (!is.logical(sqrt.arg) || length(sqrt.arg) > 1)
+    stop("bad input for argument 'sqrt.arg)'")
+  if (family != "negbinomial" && sqrt.arg)
+    warning("argument 'sqrt.arg' is used only with family='negbinomial'")
+  if (!eq.tolerances && !is.Numeric(sd.tolerances, positive = TRUE))
+    stop("bad input for argument 'sd.tolerances'")
+  if (!is.Numeric(lo.abundance, positive = TRUE))
+    stop("bad input for argument 'lo.abundance'")
+  if (!is.Numeric(sd.latvar, positive = TRUE))
+    stop("bad input for argument 'sd.latvar'")
+  if (!is.Numeric(sd.optima, positive = TRUE))
+    stop("bad input for argument 'sd.optima'")
+  if (eq.maxima && lo.abundance != hi.abundance)
+    stop("arguments 'lo.abundance' and 'hi.abundance' must ",
+         "be equal when 'eq.tolerances = TRUE'")
+  if (any(lo.abundance > hi.abundance))
+    stop("lo.abundance > hi.abundance is not allowed")
+  if (!is.logical(Crow1positive)) {
+    stop("bad input for argument 'Crow1positive)'")
+  } else {
+    Crow1positive <- rep(Crow1positive, len = Rank)
+  }
+  Shape <- rep(Shape, len = S)
+  sd.latvar <- rep(sd.latvar, len = Rank)
+  sd.optima <- rep(sd.optima, len = Rank)
+  sd.tolerances <- rep(sd.tolerances, len = Rank)
+  AA <- sd.optima / 3^0.5
+  if (Rank > 1 && any(diff(sd.latvar) > 0))
+   stop("argument 'sd.latvar)' must be a vector with decreasing values")
+
+  if (FALSE)
+  change.seed.expression <- expression({
+    if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) {
+      runif(1)  # initialize the RNG if necessary
     }
-    Shape <- rep(Shape, len=S)
-    sdlv <- rep(sdlv, len=Rank)
-    sdOptima <- rep(sdOptima, len=Rank)
-    sdTolerances <- rep(sdTolerances, len=Rank)
-    AA <- sdOptima / 3^0.5
-    if (Rank > 1 && any(diff(sdlv) > 0))
-     stop("argument 'sdlv)' must be a vector with decreasing values")
-
-    if (FALSE)
-    change.seed.expression <- expression({
-        if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) {
-            runif(1)                       # initialize the RNG if necessary
-        }
-        if (is.null(seed)) {
-            RNGstate <- get(".Random.seed", envir = .GlobalEnv)
-        } else {
-            R.seed <- get(".Random.seed", envir = .GlobalEnv)
-            set.seed(seed)
-            RNGstate <- structure(seed, kind = as.list(RNGkind()))
-            on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
-        }
-    })
-    change.seed.expression <- expression({
-        if (length(seed)) set.seed(seed)
-    })
-    eval(change.seed.expression)
-
-    V <- matrix(rhox, p-1, p-1)
-    diag(V) <- 1
-    L <- chol(V)
-    if (length(xmat)) {
-        xnames <- colnames(xmat)
+    if (is.null(seed)) {
+      RNGstate <- get(".Random.seed", envir = .GlobalEnv)
     } else {
-        eval(change.seed.expression)
-        xmat <- matrix(rnorm(n*(p-1)), n, p-1) %*% L
-        xmat <- scale(xmat, center = TRUE)
-        xnames <- paste("x", 2:p, sep="")
-        dimnames(xmat) <- list(as.character(1:n), xnames)
+      R.seed <- get(".Random.seed", envir = .GlobalEnv)
+      set.seed(seed)
+      RNGstate <- structure(seed, kind = as.list(RNGkind()))
+      on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
     }
+  })
+  change.seed.expression <- expression({
+      if (length(seed)) set.seed(seed)
+  })
+  eval(change.seed.expression)
+
+  V <- matrix(rhox, p-1, p-1)
+  diag(V) <- 1
+  L <- chol(V)
+  if (length(xmat)) {
+    xnames <- colnames(xmat)
+  } else {
     eval(change.seed.expression)
-    ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
-    lvmat <- cbind(xmat %*% ccoefs)
-    if (Rank > 1) {
-        Rmat <- chol(var(lvmat))
-        iRmat <- solve(Rmat)
-        lvmat <- lvmat %*% iRmat  # var(lvmat) == diag(Rank)
-        ccoefs <- ccoefs %*% iRmat
+    xmat <- matrix(rnorm(n*(p-1)), n, p-1) %*% L
+    xmat <- scale(xmat, center = TRUE)
+    xnames <- paste("x", 2:p, sep = "")
+    dimnames(xmat) <- list(as.character(1:n), xnames)
+  }
+  eval(change.seed.expression)
+  ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
+  latvarmat <- cbind(xmat %*% ccoefs)
+  if (Rank > 1) {
+    Rmat <- chol(var(latvarmat))
+    iRmat <- solve(Rmat)
+    latvarmat <- latvarmat %*% iRmat  # var(latvarmat) == diag(Rank)
+    ccoefs <- ccoefs %*% iRmat
+  }
+  for (r in 1:Rank)
+    if (( Crow1positive[r] && ccoefs[1, r] < 0) ||
+        (!Crow1positive[r] && ccoefs[1, r] > 0)) {
+      ccoefs[ , r] <- -ccoefs[ , r]
+      latvarmat[ , r] <- -latvarmat[ , r]
     }
-    for(r in 1:Rank)
-        if (( Crow1positive[r] && ccoefs[1,r] < 0) ||
-           (!Crow1positive[r] && ccoefs[1,r] > 0)) {
-                ccoefs[,r] <- -ccoefs[,r]
-                lvmat[,r] <- -lvmat[,r]
-        }
-
-    if (scalelv) {
-        for(r in 1:Rank) {
-            sdlvr <- sd(lvmat[,r])
-            lvmat[,r] <- lvmat[,r] * sdlv[r] / sdlvr
-            ccoefs[,r]  <- ccoefs[,r] * sdlv[r] / sdlvr
-        }
-    } else {
-        sdlvr <- NULL
-        for(r in 1:Rank) {
-            sdlvr <- c(sdlvr, sd(lvmat[,r]))
-        }
+
+  if (scale.latvar) {
+    for (r in 1:Rank) {
+      sd.latvarr <- sd(latvarmat[, r])
+      latvarmat[, r] <- latvarmat[, r] * sd.latvar[r] / sd.latvarr
+      ccoefs[, r]  <- ccoefs[, r] * sd.latvar[r] / sd.latvarr
     }
-    if (ESOptima) {
-      if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
-          S^(1/Rank) < 2)
-          stop("S^(1/Rank) must be an integer greater or equal to 2")
-      if (Rank == 1) {
-        optima <- matrix(as.numeric(NA), S, Rank)
-        for(r in 1:Rank) {
-          optima[,r] <- seq(-AA, AA, len=S^(1/Rank))
-        }
-      } else if (Rank == 2) {
-        optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
-                             lv2=seq(-AA[2], AA[2], len=S^(1/Rank)))
-      } else if (Rank == 3) {
-        optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
-                             lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
-                             lv3=seq(-AA[3], AA[3], len=S^(1/Rank)))
-      } else {
-        optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
-                             lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
-                             lv3=seq(-AA[3], AA[3], len=S^(1/Rank)),
-                             lv4=seq(-AA[4], AA[4], len=S^(1/Rank)))
+  } else {
+    sd.latvarr <- NULL
+    for (r in 1:Rank) {
+      sd.latvarr <- c(sd.latvarr, sd(latvarmat[, r]))
+    }
+  }
+  if (es.optima) {
+    if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
+        S^(1/Rank) < 2)
+      stop("S^(1/Rank) must be an integer greater or equal to 2")
+    if (Rank == 1) {
+      optima <- matrix(as.numeric(NA), S, Rank)
+      for (r in 1:Rank) {
+        optima[, r] <- seq(-AA, AA, len = S^(1/Rank))
       }
-      if (Rank > 1)
-        optima <- matrix(unlist(optima), S, Rank) # Make sure its a matrix
+    } else if (Rank == 2) {
+      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+                            latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)))
+    } else if (Rank == 3) {
+      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+                            latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)),
+                            latvar3 = seq(-AA[3], AA[3], len = S^(1/Rank)))
     } else {
-        optima <- matrix(1, S, Rank)
-        eval(change.seed.expression)
-        for(r in 1:Rank) {
-            optima[,r] <- rnorm(n=S, sd=sdOptima[r])
-        }
+      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+                            latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)),
+                            latvar3 = seq(-AA[3], AA[3], len = S^(1/Rank)),
+                            latvar4 = seq(-AA[4], AA[4], len = S^(1/Rank)))
     }
-    for(r in 1:Rank)
-        optima[,r] <- optima[,r] * sdOptima[r] / sd(optima[,r])
-
-    ynames <- paste("y", 1:S, sep="")
-    Kvector <- rep(Kvector, len=S)
-    names(Kvector) <- ynames
-    lvnames <- if (Rank==1) "lv" else paste("lv", 1:Rank, sep="")
-    Tols <- if (EqualTolerances) matrix(1, S, Rank) else {
-               eval(change.seed.expression)
-               temp <- matrix(1, S, Rank)
-               if (S > 1)
-               for(r in 1:Rank) {
-                   temp[-1,r] <- rnorm(S-1, mean=1, sd=sdTolerances[r])
-                   if (any(temp[,r] <= 0)) stop("negative tolerances!")
-                   temp[,r] <- temp[,r]^2 # Tolerance matrix  = var-cov matrix)
-               }
-               temp
-           }
-
-    dimnames(Tols) <- list(ynames, lvnames)
-    dimnames(ccoefs) <- list(xnames, lvnames)
-    dimnames(optima) <- list(ynames, lvnames)
-    loeta <- log(loabundance)  # May be a vector
-    hieta <- log(hiabundance)
+    if (Rank > 1)
+      optima <- matrix(unlist(optima), S, Rank)  # Make sure its a matrix
+  } else {
+    optima <- matrix(1, S, Rank)
     eval(change.seed.expression)
-    logmaxima <- runif(S, min=loeta, max=hieta)  # loeta and hieta may be vector
-    names(logmaxima) <- ynames
-    etamat <- matrix(logmaxima, n, S, byrow = TRUE)
-    for(jay in 1:S) {
-        optmat <- matrix(optima[jay,], nrow=n, ncol=Rank, byrow = TRUE)
-        tolmat <- matrix(Tols[jay,], nrow=n, ncol=Rank, byrow = TRUE)
-        temp <- cbind((lvmat - optmat) / tolmat)
-        for(r in 1:Rank)
-            etamat[,jay]=etamat[,jay]-0.5*(lvmat[,r] - optmat[jay,r])*temp[,r]
+    for (r in 1:Rank) {
+      optima[, r] <- rnorm(n = S, sd = sd.optima[r])
     }
+  }
+  for (r in 1:Rank)
+    optima[, r] <- optima[, r] * sd.optima[r] / sd(optima[, r])
+
+
+  if (length(optima1.arg) && Rank == 1)
+  for (r in 1:Rank)
+    optima[, r] <- optima1.arg
+
+
 
-    rootdist <- switch(family,
-        "poisson"=1, "binomial-poisson"=1, "ordinal-poisson"=1,
-        "negbinomial"=2, "Binomial-negbinomial"=2, "Ordinal-negbinomial"=2,
-        "gamma2"=3)
+  ynames <- paste("y", 1:S, sep = "")
+  Kvector <- rep(Kvector, len = S)
+  names(Kvector) <- ynames
+  latvarnames <- if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
+  Tols <- if (eq.tolerances) {
+    matrix(1, S, Rank)
+  } else {
     eval(change.seed.expression)
-    if (rootdist == 1) {
-        ymat <- matrix(rpois(n*S, lambda = exp(etamat)), n, S)
-    } else if (rootdist == 2) {
-        mKvector <- matrix(Kvector, n, S, byrow = TRUE)
-        ymat <- matrix(rnbinom(n=n*S, mu=exp(etamat), size=mKvector),n,S)
-        if (sqrt) ymat <- ymat^0.5
-    } else if (rootdist == 3) {
-        Shape <- matrix(Shape, n, S, byrow = TRUE)
-        ymat <- matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
-        if (Log) ymat <- log(ymat)
-    } else stop("argument 'rootdist' unmatched")
-
-    tmp1 <- NULL
-    if (any(family == c("ordinal-poisson","Ordinal-negbinomial"))) {
-        tmp1 <- cut(c(ymat), breaks=breaks, labels=NULL) #To get attributes(tmp1)
-        ymat <- cut(c(ymat), breaks=breaks, labels=FALSE)
-        dim(ymat) <- c(n,S)
+    temp <- matrix(1, S, Rank)
+    if (S > 1)
+      for (r in 1:Rank) {
+        temp[-1, r] <- rnorm(S-1, mean = 1, sd = sd.tolerances[r])
+        if (any(temp[, r] <= 0))
+          stop("negative tolerances!")
+        temp[, r] <- temp[, r]^2  # Tolerance matrix  = var-cov matrix)
+      }
+    temp
+  }
+
+  dimnames(Tols)   <- list(ynames, latvarnames)
+  dimnames(ccoefs) <- list(xnames, latvarnames)
+  dimnames(optima) <- list(ynames, latvarnames)
+  loeta <- log(lo.abundance)   # May be a vector
+  hieta <- log(hi.abundance)
+  eval(change.seed.expression)
+  logmaxima <- runif(S, min = loeta, max = hieta)
+  names(logmaxima) <- ynames
+  etamat <- matrix(logmaxima, n, S, byrow = TRUE)
+  for (jay in 1:S) {
+      optmat <- matrix(optima[jay, ], nrow = n, ncol = Rank, byrow = TRUE)
+      tolmat <- matrix(  Tols[jay, ], nrow = n, ncol = Rank, byrow = TRUE)
+      temp <- cbind((latvarmat - optmat) / tolmat)
+      for (r in 1:Rank)
+        etamat[, jay] <- etamat[, jay] -
+                         0.5 * (latvarmat[, r] - optmat[jay, r]) * temp[, r]
+  }
+
+  rootdist <- switch(family,
+    "poisson" = 1, "binomial-poisson" = 1, "ordinal-poisson" = 1,
+    "negbinomial" = 2, "Binomial-negbinomial" = 2,
+    "Ordinal-negbinomial" = 2,
+    "gamma2" = 3)
+  eval(change.seed.expression)
+  if (rootdist == 1) {
+    ymat <- matrix(rpois(n * S, lambda = exp(etamat)), n, S)
+  } else if (rootdist == 2) {
+    mKvector <- matrix(Kvector, n, S, byrow = TRUE)
+    ymat <- matrix(rnbinom(n = n * S, mu = exp(etamat), size = mKvector),
+                   n, S)
+    if (sqrt.arg)
+      ymat <- ymat^0.5
+  } else if (rootdist == 3) {
+    Shape <- matrix(Shape, n, S, byrow = TRUE)
+    ymat <- matrix(rgamma(n * S, shape = Shape,
+                                 scale = exp(etamat) / Shape),
+                   n, S)
+    if (Log) ymat <- log(ymat)
+  } else {
+    stop("argument 'rootdist' unmatched")
+  }
+
+  tmp1 <- NULL
+  if (any(family == c("ordinal-poisson", "Ordinal-negbinomial"))) {
+    tmp1 <- cut(c(ymat), breaks = breaks, labels = NULL)
+    ymat <- cut(c(ymat), breaks = breaks, labels = FALSE)
+    dim(ymat) <- c(n,S)
     }
-    if (any(family == c("binomial-poisson","Binomial-negbinomial")))
-        ymat <- 0 + (ymat > 0)
+    if (any(family == c("binomial-poisson", "Binomial-negbinomial")))
+      ymat <- 0 + (ymat > 0)
 
     myform <- as.formula(paste(paste("cbind(",
-             paste(paste("y",1:S,sep=""), collapse=","),
-             ") ~ ", sep=""),
-             paste(paste("x",2:p,sep=""), collapse="+"), sep=""))
-
-    dimnames(ymat) <- list(as.character(1:n), ynames)
-    ans <- data.frame(xmat, ymat)
-    attr(ans, "ccoefficients") <- ccoefs
-    attr(ans, "Crow1positive") <- Crow1positive
-    attr(ans, "family") <- family
-    attr(ans, "formula") <- myform # Useful for running cqo() on the data
-    attr(ans, "Rank") <- Rank
-    attr(ans, "family") <- family
-    attr(ans, "Kvector") <- Kvector
-    attr(ans, "logmaxima") <- logmaxima
-    attr(ans, "loabundance") <- loabundance
-    attr(ans, "hiabundance") <- hiabundance
-    attr(ans, "optima") <- optima
-    attr(ans, "Log") <- Log
-    attr(ans, "lv") <- lvmat
-    attr(ans, "eta") <- etamat
-    attr(ans, "EqualTolerances") <- EqualTolerances
-    attr(ans, "EqualMaxima") <- EqualMaxima || all(loabundance == hiabundance)
-    attr(ans, "ESOptima") <- ESOptima
-    attr(ans, "seed") <- seed # RNGstate
-    attr(ans, "sdTolerances") <- sdTolerances
-    attr(ans, "sdlv") <-  if (scalelv) sdlv else sdlvr
-    attr(ans, "sdOptima") <- sdOptima
-    attr(ans, "Shape") <- Shape
-    attr(ans, "sqrt") <- sqrt
-    attr(ans, "tolerances") <- Tols^0.5  # Like a standard deviation
-        attr(ans, "breaks") <- if (length(tmp1)) attributes(tmp1) else breaks
-    ans
+             paste(paste("y", 1:S, sep = ""), collapse = ", "),
+             ") ~ ", sep = ""),
+             paste(paste("x", 2:p, sep = ""), collapse = "+"), sep = ""))
+
+  dimnames(ymat) <- list(as.character(1:n), ynames)
+  ans <- data.frame(xmat, ymat)
+  attr(ans, "concoefficients") <- ccoefs
+  attr(ans, "Crow1positive") <- Crow1positive
+  attr(ans, "family") <- family
+  attr(ans, "formula") <- myform # Useful for running cqo() on the data
+  attr(ans, "Rank") <- Rank
+  attr(ans, "family") <- family
+  attr(ans, "Kvector") <- Kvector
+  attr(ans, "logmaxima") <- logmaxima
+  attr(ans, "lo.abundance") <- lo.abundance
+  attr(ans, "hi.abundance") <- hi.abundance
+  attr(ans, "optima") <- optima
+  attr(ans, "Log") <- Log
+  attr(ans, "latvar") <- latvarmat
+  attr(ans, "eta") <- etamat
+  attr(ans, "eq.tolerances") <- eq.tolerances
+  attr(ans, "eq.maxima") <- eq.maxima ||
+                              all(lo.abundance == hi.abundance)
+  attr(ans, "es.optima") <- es.optima
+  attr(ans, "seed") <- seed # RNGstate
+  attr(ans, "sd.tolerances") <- sd.tolerances
+  attr(ans, "sd.latvar") <- if (scale.latvar) sd.latvar else sd.latvarr
+  attr(ans, "sd.optima") <- sd.optima
+  attr(ans, "Shape") <- Shape
+  attr(ans, "sqrt") <- sqrt.arg
+  attr(ans, "tolerances") <- Tols^0.5  # Like a standard deviation
+  attr(ans, "breaks") <- if (length(tmp1)) attributes(tmp1) else breaks
+  ans
 }
 
 
 
 
  if (FALSE)
-dcqo <- function(x, p, S,
-                 family = c("poisson", "binomial", "negbinomial", "ordinal"),
-                 Rank = 1,
-                 EqualTolerances = TRUE,
-                 EqualMaxima = FALSE,
-                 EquallySpacedOptima = FALSE,
-                 loabundance = if (EqualMaxima) 100 else 10,
-                 hiabundance = 100,
-                 sdTolerances = 1,
-                 sdOptima = 1,
-                 nlevels = 4, # ignored unless family="ordinal"
-                 seed = NULL
-                 ) {
+dcqo <-
+  function(x, p, S,
+           family = c("poisson", "binomial", "negbinomial", "ordinal"),
+           Rank = 1,
+           eq.tolerances = TRUE,
+           eq.maxima = FALSE,
+           EquallySpacedOptima = FALSE,
+           lo.abundance = if (eq.maxima) 100 else 10,
+           hi.abundance = 100,
+           sd.tolerances = 1,
+           sd.optima = 1,
+           nlevels = 4,  # ignored unless family = "ordinal"
+           seed = NULL) {
  warning("12/6/06; needs a lot of work based on rcqo()")
 
 
@@ -304,44 +324,45 @@ dcqo <- function(x, p, S,
 
 
   if (!is.Numeric(p, integer.valued = TRUE,
-                  positive = TRUE, allowable.length = 1) ||
+                  positive = TRUE, length.arg = 1) ||
       p < 2)
     stop("bad input for argument 'p'")
   if (!is.Numeric(S, integer.valued = TRUE,
-                  positive = TRUE, allowable.length = 1))
+                  positive = TRUE, length.arg = 1))
     stop("bad input for argument 'S'")
   if (!is.Numeric(Rank, integer.valued = TRUE,
-                  positive = TRUE, allowable.length = 1))
+                  positive = TRUE, length.arg = 1))
     stop("bad input for argument 'Rank'")
   if (length(seed) &&
       !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'seed'")
-  if (!is.logical(EqualTolerances) || length(EqualTolerances)>1)
-    stop("bad input for argument 'EqualTolerances)'")
-  if (EqualMaxima && loabundance != hiabundance)
-    stop("'loabundance' and 'hiabundance' must ",
-         "be equal when 'EqualTolerances = TRUE'")
+  if (!is.logical(eq.tolerances) || length(eq.tolerances)>1)
+    stop("bad input for argument 'eq.tolerances)'")
+  if (eq.maxima && lo.abundance != hi.abundance)
+    stop("'lo.abundance' and 'hi.abundance' must ",
+         "be equal when 'eq.tolerances = TRUE'")
   if (length(seed)) set.seed(seed)
 
-  xmat <- matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
-                paste("x", 2:p, sep="")))
+  xmat <- matrix(rnorm(n*(p-1)), n, p-1,
+                 dimnames = list(as.character(1:n),
+                                 paste("x", 2:p, sep = "")))
   ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
-  lvmat <- xmat %*% ccoefs
-  optima <- matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
-  Tols <- if (EqualTolerances) matrix(1, S, Rank) else
-         matrix(rnorm(Rank*S, mean=1, sd=1), S, Rank)
-  loeta <- log(loabundance)
-  hieta <- log(hiabundance)
-  logmaxima <- runif(S, min=loeta, max=hieta)
-
-  etamat <- matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
-  for(jay in 1:S) {
-    optmat <- matrix(optima[jay,], n, Rank, byrow = TRUE)
-    tolmat <- matrix(Tols[jay,], n, Rank, byrow = TRUE)
-    temp <- cbind((lvmat - optmat) * tolmat)
-    for(r in 1:Rank)
-        etamat[,jay] <- etamat[,jay] - 0.5 * temp[,r] *
-                       (lvmat[,r] - optmat[jay,r])
+  latvarmat <- xmat %*% ccoefs
+  optima <- matrix(rnorm(Rank*S, sd = sd.optima), S, Rank)
+  Tols <- if (eq.tolerances) matrix(1, S, Rank) else
+         matrix(rnorm(Rank*S, mean = 1, sd = 1), S, Rank)
+  loeta <- log(lo.abundance)
+  hieta <- log(hi.abundance)
+  logmaxima <- runif(S, min = loeta, max = hieta)
+
+  etamat <- matrix(logmaxima, n, S, byrow = TRUE)
+  for (jay in 1:S) {
+    optmat <- matrix(optima[jay, ], n, Rank, byrow = TRUE)
+    tolmat <- matrix(  Tols[jay, ], n, Rank, byrow = TRUE)
+    temp <- cbind((latvarmat - optmat) * tolmat)
+    for (r in 1:Rank)
+      etamat[, jay] <- etamat[, jay] - 0.5 * temp[, r] *
+                       (latvarmat[, r] - optmat[jay, r])
   }
 
   ymat <- if (family == "negbinomial") {
@@ -349,14 +370,15 @@ dcqo <- function(x, p, S,
 
 
   } else {
-     matrix(rpois(n*S, lambda = exp(etamat)), n, S)
+     matrix(rpois(n * S, lambda = exp(etamat)), n, S)
   }
   if (family == "binomial")
     ymat <- 0 + (ymat > 0)
 
-  dimnames(ymat) <- list(as.character(1:n), paste("y", 1:S, sep=""))
+  dimnames(ymat) <- list(as.character(1:n),
+                         paste("y", 1:S, sep = ""))
   ans <- data.frame(xmat, ymat)
-  attr(ans, "ccoefficients") <- ccoefs
+  attr(ans, "concoefficients") <- ccoefs
   attr(ans, "family") <- family
   ans
 }
@@ -366,13 +388,13 @@ dcqo <- function(x, p, S,
 
 
 getInitVals <- function(gvals, llfun, ...) {
-    LLFUN <- match.fun(llfun)
-    ff <- function(myx, ...) LLFUN(myx, ...)
-    objFun <- gvals
-    for(ii in 1:length(gvals))
-        objFun[ii] <- ff(myx=gvals[ii], ...) 
-    try.this <- gvals[objFun == max(objFun)]  # Usually scalar, maybe vector
-    try.this
+  LLFUN <- match.fun(llfun)
+  ff <- function(myx, ...) LLFUN(myx, ...)
+  objFun <- gvals
+  for (ii in 1:length(gvals))
+    objFun[ii] <- ff(myx = gvals[ii], ...) 
+  try.this <- gvals[objFun == max(objFun)]  # Usually scalar, maybe vector
+  try.this
 }
 
 
@@ -384,14 +406,16 @@ getInitVals <- function(gvals, llfun, ...) {
 
 
 campp <- function(q, size, prob, mu) {
-    if (!missing(mu)) {
-        if (!missing(prob))
-            stop("'prob' and 'mu' both specified")
-        prob <- size/(size + mu)
-    }
-    K <- (1/3) * ((9*q+8)/(q+1) - ((9*size-1)/size) * (mu/(q+1))^(1/3)) /
-        sqrt( (1/size) * (mu/(q+1))^(2/3) + 1 / (q+1)) # Note the +, not -
-    pnorm(K)
+  if (!missing(mu)) {
+    if (!missing(prob))
+      stop("arguments 'prob' and 'mu' both specified")
+    prob <- size/(size + mu)
+  }
+  K <- (1/3) * ((9*q+8) / (q+1) -
+       ((9*size-1)/size) *
+       (mu/(q+1))^(1/3)) / sqrt( (1/size) *
+                   (mu/(q+1))^(2/3) + 1 / (q+1))  # Note the +, not -
+  pnorm(K)
 }
 
 
diff --git a/R/family.robust.R b/R/family.robust.R
index e7370e9..fc2689b 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -12,6 +12,10 @@
 
 
 
+
+
+
+
 edhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -48,7 +52,7 @@ dhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
 rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   myl <- rep(0.0, len = use.n)
@@ -134,12 +138,12 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
   A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
   eps <- A1 / (1 + A1)
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 4)
        stop("argument 'imethod' must be 1 or 2 or 3 or 4")
 
-  if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(k, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'k'")
 
   if (length(zero) &&
@@ -261,7 +265,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
             .elocat = elocat, .escale = escale,
             .eps    = eps,       .k      = k ))),
   weight = eval(substitute(expression({
-    wz   <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
+    wz   <- matrix(as.numeric(NA), n, 2)  # diag matrix; y is one-col too
 
 
 
@@ -290,12 +294,12 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
   A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
   eps <- A1 / (1 + A1)
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 4)
     stop("argument 'imethod' must be 1 or 2 or 3 or 4")
 
-  if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(k, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'k'")
 
 
@@ -376,7 +380,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
     mylocat <- eta2theta(eta, .llocat,  earg = .elocat)
     myk     <- .k
 
-    zedd <- (y - mylocat) # / myscale
+    zedd <- (y - mylocat)  # / myscale
     cond2 <- (abs(zedd) <=  myk)
     cond3 <-     (zedd  >   myk)
 
@@ -400,13 +404,13 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
             .elocat = elocat,
             .eps    = eps,       .k      = k ))),
   weight = eval(substitute(expression({
-    wz   <- matrix(as.numeric(NA), n, 1) # diag matrix; y is one-col too
+    wz   <- matrix(as.numeric(NA), n, 1)  # diag matrix; y is one-col too
 
 
 
 
     temp4 <- erf(myk / sqrt(2))
-    ned2l.dlocat2 <- temp4 * (1 - .eps) # / myscale^2
+    ned2l.dlocat2 <- temp4 * (1 - .eps)  # / myscale^2
 
 
     wz[, iam(1,1,M)] <- ned2l.dlocat2 * dlocat.deta^2
diff --git a/R/family.rrr.R b/R/family.rrr.R
index ce3e5c6..759a571 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -6,6 +6,8 @@
 
 
 
+
+
 replace.constraints <- function(Blist, cm, index) {
 
   for (iii in index)
@@ -14,17 +16,20 @@ replace.constraints <- function(Blist, cm, index) {
 }
 
 
+
+
  valt.control <- function(
                  Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
                               60, 80, 100, 125, 2^(8:12)),
-                 Criterion = c("rss", "coefficients"),
-                 Linesearch = FALSE, Maxit = 7,
+                 Criterion = c("res.ss", "coefficients"),
+                 Linesearch = FALSE,
+                 Maxit = 7,
                  Suppress.warning = TRUE,
                  Tolerance = 1e-7, ...) {
 
   if (mode(Criterion) != "character" && mode(Criterion) != "name")
     Criterion <- as.character(substitute(Criterion))
-  Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
+  Criterion <- match.arg(Criterion, c("res.ss", "coefficients"))[1]
 
   list(Alphavec = Alphavec,
        Criterion = Criterion, 
@@ -35,6 +40,8 @@ replace.constraints <- function(Blist, cm, index) {
 }
 
 
+
+
 qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
   Rank <- ncol(numat)
   moff <- NULL
@@ -44,32 +51,30 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
            if (ITolerances) {
              moff <- 0
              for (ii in 1:Rank)
-               moff <- moff - 0.5 * temp1[,ii]
+               moff <- moff - 0.5 * temp1[, ii]
            }
            cbind(numat, if (ITolerances) NULL else temp1)
   } else {
     as.matrix(numat)
   }
-  list(matrix = if (Aoffset>0) ans else ans[, -(1:Rank), drop = FALSE],
+  list(matrix = if (Aoffset > 0) ans else ans[, -(1:Rank), drop = FALSE],
        offset = moff)
 }
 
 
 
-
-
  valt <- function(x, z, U, Rank = 1,
                   Blist = NULL, 
                   Cinit = NULL,
                   Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
                                60, 80, 100, 125, 2^(8:12)),
-                  Criterion = c("rss", "coefficients"),
+                  Criterion = c("res.ss", "coefficients"),
                   Crow1positive = rep(TRUE, length.out = Rank),
                   colx1.index,
                   Linesearch = FALSE,
                   Maxit = 20, 
-                  szero = NULL,
-                  SD.Cinit = 0.02,
+                  str0 = NULL,
+                  sd.Cinit = 0.02,
                   Suppress.warning = FALSE,
                   Tolerance = 1e-6, 
                   trace = FALSE,
@@ -81,151 +86,165 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
                  
 
 
-    if (mode(Criterion) != "character" && mode(Criterion) != "name")
-        Criterion <- as.character(substitute(Criterion))
-    Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
-
-    if (any(diff(Alphavec) <= 0))
-        stop("'Alphavec' must be an increasing sequence") 
-
-    if (!is.matrix(z))
-        z <- as.matrix(z)
-    n <- nrow(z)
-    M <- ncol(z)
-    if (!is.matrix(x))
-        x <- as.matrix(x)
-
-    colx2.index <- if (is.null(colx1.index)) 1:ncol(x) else
-                  (1:ncol(x))[-colx1.index]
-
-    p1 <- length(colx1.index)
-    p2 <- length(colx2.index)
-    p  <- p1 + p2
-    if (!p2) stop("'p2', the number of variables for the ",
-                  "reduced-rank regression, must be > 0")
-
-    if (!length(Blist)) {
-        Blist <- replace.constraints(vector("list", p), diag(M), 1:p)
-    }
-
-    dU <- dim(U)
-    if (dU[2] != n)
-        stop("input unconformable")
-
-    clist2 <- replace.constraints(vector("list", Rank+p1),
-               if (length(szero))
-               diag(M)[, -szero, drop = FALSE] else diag(M), 1:Rank)
-    if (p1) {
-      for (kk in 1:p1)
-        clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
-    }
-
-    if (is.null(Cinit))
-        Cinit <- matrix(rnorm(p2*Rank, sd = SD.Cinit), p2, Rank)
+  if (mode(Criterion) != "character" && mode(Criterion) != "name")
+    Criterion <- as.character(substitute(Criterion))
+  Criterion <- match.arg(Criterion, c("res.ss", "coefficients"))[1]
+
+  if (any(diff(Alphavec) <= 0))
+    stop("'Alphavec' must be an increasing sequence") 
+
+  if (!is.matrix(z))
+    z <- as.matrix(z)
+  n <- nrow(z)
+  M <- ncol(z)
+  if (!is.matrix(x))
+    x <- as.matrix(x)
+
+  colx2.index <- if (is.null(colx1.index)) 1:ncol(x) else
+                 (1:ncol(x))[-colx1.index]
+
+  p1 <- length(colx1.index)
+  p2 <- length(colx2.index)
+  p  <- p1 + p2
+  if (!p2)
+    stop("'p2', the number of variables for the ",
+         "reduced-rank regression, must be > 0")
+
+  if (!length(Blist)) {
+    Blist <- replace.constraints(vector("list", p), diag(M), 1:p)
+  }
 
-    fit <- list(rss = 0)  # Only for initial old.crit below
+  dU <- dim(U)
+  if (dU[2] != n)
+    stop("input unconformable")
+
+  clist2 <- replace.constraints(vector("list", Rank+p1),
+                                if (length(str0))
+                                diag(M)[, -str0, drop = FALSE] else
+                                diag(M), 1:Rank)
+  if (p1) {
+    for (kk in 1:p1)
+      clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
+  }
 
-    C <- Cinit # This is input for the main iter loop
-    old.crit <- switch(Criterion, coefficients=C, rss=fit$rss)
+  if (is.null(Cinit))
+    Cinit <- matrix(rnorm(p2*Rank, sd = sd.Cinit), p2, Rank)
 
-    recover <- 0  # Allow a few iterations between different line searches 
-    for (iter in 1:Maxit) {
-      iter.save <- iter
+  fit <- list(res.ss = 0)  # Only for initial old.crit below
 
-        lv.mat <- x[, colx2.index, drop = FALSE] %*% C
-        new.lv.model.matrix <- cbind(lv.mat,
-                                    if (p1) x[, colx1.index] else NULL)
-        fit <- vlm.wfit(xmat = new.lv.model.matrix, z, Blist = clist2,
-                       U = U, matrix.out = TRUE, is.vlmX = FALSE,
-                       rss = FALSE, qr = FALSE, xij = xij)
-        A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
+  C <- Cinit # This is input for the main iter loop
+  old.crit <- switch(Criterion, coefficients = C, res.ss = fit$res.ss)
 
-        clist1 <- replace.constraints(Blist, A, colx2.index)
-        fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U,
-                       matrix.out = TRUE, is.vlmX = FALSE,
-                       rss = TRUE, qr = FALSE, xij = xij)
-        C <- fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
-             solve(t(A) %*% A)
+  recover <- 0  # Allow a few iterations between different line searches 
+  for (iter in 1:Maxit) {
+    iter.save <- iter
 
-        numat <- x[, colx2.index, drop = FALSE] %*% C
-        evnu <- eigen(var(numat))
-        temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-                evnu$vector %*% evnu$value^(-0.5)
-        C <- C %*% temp7
-        A <- A %*% t(solve(temp7))
-        temp8 <- crow1C(cmat = C, Crow1positive, amat = A)
-        C <- temp8$cmat
-        A <- temp8$amat
+      latvar.mat <- x[, colx2.index, drop = FALSE] %*% C
+      new.latvar.model.matrix <- cbind(latvar.mat,
+                                       if (p1) x[, colx1.index] else NULL)
+      fit <- vlm.wfit(xmat = new.latvar.model.matrix, z, Blist = clist2,
+                      U = U, matrix.out = TRUE, is.vlmX = FALSE,
+                      res.ss = FALSE, qr = FALSE, xij = xij)
+      A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
 
+      clist1 <- replace.constraints(Blist, A, colx2.index)
+      fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U,
+                      matrix.out = TRUE, is.vlmX = FALSE,
+                      res.ss = TRUE, qr = FALSE, xij = xij)
+      C <- fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
+           solve(t(A) %*% A)
 
-        ratio <- switch(Criterion,
-                coefficients = max(abs(C - old.crit) / (Tolerance+abs(C))),
-                rss = max(abs(fit$rss - old.crit) / (Tolerance+fit$rss)))
+      numat <- x[, colx2.index, drop = FALSE] %*% C
+      evnu <- eigen(var(numat))
+      temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+                 evnu$vector %*% evnu$value^(-0.5)
+      C <- C %*% temp7
+      A <- A %*% t(solve(temp7))
+      temp8 <- crow1C(cmat = C, Crow1positive, amat = A)
+      C <- temp8$cmat
+      A <- temp8$amat
+
+
+      ratio <-
+          switch(Criterion,
+                 coefficients = max(abs(C - old.crit) / (
+                                    Tolerance + abs(C))),
+                 res.ss = max(abs(fit$res.ss - old.crit) / (
+                              Tolerance + fit$res.ss)))
 
         if (trace) {
-            cat("   Alternating iteration", iter,
-                ",   Convergence criterion  = ", format(ratio), "\n")
-            if (!is.null(fit$rss))
-                cat("    ResSS  = ", fit$rss, "\n")
-            flush.console()
-        }
+          cat("   Alternating iteration", iter,
+              ",   Convergence criterion  = ", format(ratio), "\n")
+          if (!is.null(fit$res.ss))
+              cat("    ResSS  = ", fit$res.ss, "\n")
+          flush.console()
+      }
 
-        if (ratio < Tolerance) {
-            if (!Linesearch || (Linesearch && iter >= 3)) break
-        } else if (iter == Maxit && !Suppress.warning) {
-            warning("did not converge")
-        }
+      if (ratio < Tolerance) {
+        if (!Linesearch || (Linesearch && iter >= 3))
+          break
+      } else if (iter == Maxit && !Suppress.warning) {
+        warning("did not converge")
+      }
 
-        fini.linesearch <- FALSE
-        if (Linesearch && iter - recover >= 2) {
-            xnew <- C
-
-            direction1 <- (xnew-xold) # / sqrt(1 + sum((xnew-xold)^2))
-            ftemp <- fit$rss  # Most recent objective function 
-            use.alpha <- 0   # The current step relative to (xold, yold)
-            for (itter in 1:length(Alphavec)) {
-                CC <- xold + Alphavec[itter] * direction1
-
-                try.lv.mat <- x[, colx2.index, drop = FALSE] %*% CC
-                try.new.lv.model.matrix = cbind(try.lv.mat,
-                                   if (p1) x[,colx1.index] else NULL)
-
-                try <- vlm.wfit(xmat = try.new.lv.model.matrix, z,
-                               Blist = clist2, U = U, matrix.out = TRUE,
-                               is.vlmX = FALSE, rss = TRUE, qr = FALSE,
-                               xij = xij)
-                if (try$rss < ftemp) {
-                    use.alpha <- Alphavec[itter]
-                    fit <- try 
-                    ftemp <- try$rss
-                    C <- CC 
-                    A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
-                    lv.mat <- x[, colx2.index, drop = FALSE] %*% C
-                    recover <- iter # Give it some altg iters to recover
-                } else {
-                    if (trace && use.alpha > 0) {
-                        cat("    Finished line search using Alpha  = ",
-                            use.alpha, "\n")
-                        flush.console()
-                    }
-                    fini.linesearch <- TRUE
-                }
-                if (fini.linesearch) break 
-            } # End of itter loop 
-        }
+      fini.linesearch <- FALSE
+      if (Linesearch && iter - recover >= 2) {
+          xnew <- C
+
+          direction1 <- (xnew - xold)  # / sqrt(1 + sum((xnew-xold)^2))
+          ftemp <- fit$res.ss  # Most recent objective function 
+          use.alpha <- 0  # The current step relative to (xold, yold)
+          for (itter in 1:length(Alphavec)) {
+            CC <- xold + Alphavec[itter] * direction1
+
+            try.latvar.mat <- x[, colx2.index, drop = FALSE] %*% CC
+            try.new.latvar.model.matrix <-
+              cbind(try.latvar.mat,
+                    if (p1) x[, colx1.index] else NULL)
+
+            try <- vlm.wfit(xmat = try.new.latvar.model.matrix, z,
+                            Blist = clist2, U = U, matrix.out = TRUE,
+                            is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
+                            xij = xij)
+            if (try$res.ss < ftemp) {
+              use.alpha <- Alphavec[itter]
+              fit <- try 
+              ftemp <- try$res.ss
+              C <- CC 
+              A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
+              latvar.mat <- x[, colx2.index, drop = FALSE] %*% C
+              recover <- iter  # Give it some altg iters to recover
+            } else {
+              if (trace && use.alpha > 0) {
+                cat("    Finished line search using Alpha  = ",
+                    use.alpha, "\n")
+                flush.console()
+              }
+              fini.linesearch <- TRUE
+            }
+          if (fini.linesearch) break 
+        }  # End of itter loop 
+    }
 
-        xold <- C # Do not take care of drift
-        old.crit <- switch(Criterion, coefficients = C, rss = fit$rss)
-    } # End of iter loop
+    xold <- C # Do not take care of drift
+    old.crit <- switch(Criterion,
+                       coefficients = C,
+                       res.ss = fit$res.ss)
+  }  # End of iter loop
 
-    list(A = A, C = C, fitted = fit$fitted, new.coeffs = fit$coef,
-         rss = fit$rss)
+  list(A = A,
+       C = C,
+       fitted = fit$fitted,
+       new.coeffs = fit$coef,
+       res.ss = fit$res.ss)
 }
 
 
 
-lm2qrrvlm.model.matrix <- function(x, Blist, C, control, assign = TRUE,
-                                  no.thrills = FALSE) {
+
+ lm2qrrvlm.model.matrix <-
+  function(x, Blist, C, control, assign = TRUE,
+           no.thrills = FALSE) {
 
     Rank <- control$Rank
     colx1.index <- control$colx1.index
@@ -236,8 +255,8 @@ lm2qrrvlm.model.matrix <- function(x, Blist, C, control, assign = TRUE,
 
     M <- nrow(Blist[[1]])
     p1 <- length(colx1.index)
-    combine2 <- c(control$szero,
-                 if (Corner) control$Index.corner else NULL)
+    combine2 <- c(control$str0,
+                  if (Corner) control$Index.corner else NULL)
 
     Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
     NoA <- length(combine2) == M    # No unknown parameters in A
@@ -245,57 +264,62 @@ lm2qrrvlm.model.matrix <- function(x, Blist, C, control, assign = TRUE,
         Aoffset <- 0
         vector("list", Aoffset+Qoffset+p1)
     } else {
-        Aoffset <- Rank
-        replace.constraints(vector("list", Aoffset+Qoffset+p1),
-           if (length(combine2)) diag(M)[,-combine2,drop = FALSE] else diag(M),
-           1:Rank) # If Corner then does not contain \bI_{Rank}
+      Aoffset <- Rank
+      replace.constraints(vector("list", Aoffset+Qoffset+p1),
+           if (length(combine2)) diag(M)[, -combine2, drop = FALSE] else diag(M),
+           1:Rank)  # If Corner then does not contain \bI_{Rank}
     }
     if (Quadratic && !ITolerances)
-        clist2 <- replace.constraints(clist2,
-            if (control$EqualTolerances)
-                matrix(1, M, 1) - eijfun(Dzero, M) else {
-            if (length(Dzero)) diag(M)[,-Dzero,drop = FALSE] else diag(M)},
-            Aoffset + (1:Qoffset))
+      clist2 <- replace.constraints(clist2,
+          if (control$EqualTolerances)
+              matrix(1, M, 1) - eijfun(Dzero, M) else {
+          if (length(Dzero)) diag(M)[,-Dzero, drop = FALSE] else diag(M)},
+          Aoffset + (1:Qoffset))
     if (p1)
       for (kk in 1:p1)
         clist2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
     if (!no.thrills) {
-        i63 <- iam(NA, NA, M=Rank, both = TRUE)
-        names(clist2) <- c(
-               if (NoA) NULL else paste("(lv", 1:Rank, ")", sep = ""), 
-               if (Quadratic && Rank == 1 && !ITolerances)
-                   "(lv^2)" else 
-               if (Quadratic && Rank>1 && !ITolerances)
-                   paste("(lv", i63$row, ifelse(i63$row == i63$col, "^2",
-                   paste("*lv", i63$col, sep = "")), ")", sep = "") else NULL,
-               if (p1) names(colx1.index) else NULL)
+      i63 <- iam(NA, NA, M=Rank, both = TRUE)
+      names(clist2) <- c(
+             if (NoA) NULL else paste("(latvar", 1:Rank, ")", sep = ""), 
+             if (Quadratic && Rank == 1 && !ITolerances)
+                 "(latvar^2)" else 
+             if (Quadratic && Rank>1 && !ITolerances)
+                 paste("(latvar", i63$row, ifelse(i63$row == i63$col, "^2",
+                 paste("*latvar", i63$col, sep = "")), ")", sep = "") else NULL,
+             if (p1) names(colx1.index) else NULL)
     }
 
-    lv.mat <- x[,control$colx2.index,drop = FALSE] %*% C
+    latvar.mat <- x[, control$colx2.index, drop = FALSE] %*% C
 
 
-    tmp900 <- qrrvglm.xprod(lv.mat, Aoffset, Quadratic, ITolerances)
-    new.lv.model.matrix <- cbind(tmp900$matrix,
-                                if (p1) x[,colx1.index] else NULL)
+    tmp900 <- qrrvglm.xprod(latvar.mat, Aoffset, Quadratic, ITolerances)
+    new.latvar.model.matrix <- cbind(tmp900$matrix,
+                                     if (p1) x[,colx1.index] else NULL)
     if (!no.thrills)
-        dimnames(new.lv.model.matrix) <- list(dimnames(x)[[1]], names(clist2))
+      dimnames(new.latvar.model.matrix) <- list(dimnames(x)[[1]],
+                                                names(clist2))
 
     if (assign) {
-        asx <- attr(x, "assign")
-        asx <- vector("list", ncol(new.lv.model.matrix))
-        names(asx) <- names(clist2)
-        for (ii in 1:length(names(asx))) {
-          asx[[ii]] <- ii
-        }
-        attr(new.lv.model.matrix, "assign") <- asx
+      asx <- attr(x, "assign")
+      asx <- vector("list", ncol(new.latvar.model.matrix))
+      names(asx) <- names(clist2)
+      for (ii in 1:length(names(asx))) {
+        asx[[ii]] <- ii
+      }
+      attr(new.latvar.model.matrix, "assign") <- asx
     }
 
     if (no.thrills)
-        list(new.lv.model.matrix = new.lv.model.matrix, constraints = clist2,
-             offset = tmp900$offset) else
-        list(new.lv.model.matrix = new.lv.model.matrix, constraints = clist2,
-             NoA = NoA, Aoffset = Aoffset, lv.mat = lv.mat,
-             offset = tmp900$offset)
+      list(new.latvar.model.matrix = new.latvar.model.matrix,
+           constraints = clist2,
+           offset = tmp900$offset) else
+      list(new.latvar.model.matrix = new.latvar.model.matrix,
+           constraints = clist2,
+           NoA = NoA,
+           Aoffset = Aoffset,
+           latvar.mat = latvar.mat,
+           offset = tmp900$offset)
 }
 
 
@@ -305,13 +329,13 @@ valt.2iter <- function(x, z, U, Blist, A, control) {
 
   clist1 <- replace.constraints(Blist, A, control$colx2.index)
   fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U, matrix.out = TRUE, 
-                  is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = control$xij)
+                  is.vlmX = FALSE, res.ss = TRUE, qr = FALSE, xij = control$xij)
   C <- fit$mat.coef[control$colx2.index, , drop = FALSE] %*%
        A %*% solve(t(A) %*% A)
 
   list(A = A, C = C,
        fitted = fit$fitted, new.coeffs = fit$coef,
-       Blist = clist1, rss = fit$rss)
+       Blist = clist1, res.ss = fit$res.ss)
 }
 
 
@@ -331,14 +355,14 @@ valt.1iter <- function(x, z, U, Blist, C, control,
 
     Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
     tmp833 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist, C=C, control=control)
-    new.lv.model.matrix <- tmp833$new.lv.model.matrix 
+    new.latvar.model.matrix <- tmp833$new.latvar.model.matrix 
     clist2 <- tmp833$constraints # Does not contain \bI_{Rank}
-    lv.mat <- tmp833$lv.mat
+    latvar.mat <- tmp833$latvar.mat
     if (Corner)
-        zedd[,Index.corner] <- zedd[,Index.corner] - lv.mat
+        zedd[,Index.corner] <- zedd[,Index.corner] - latvar.mat
 
     if (nice31 && MSratio == 1) {
-        fit <- list(mat.coef = NULL, fitted.values = NULL, rss = 0)
+        fit <- list(mat.coef = NULL, fitted.values = NULL, res.ss = 0)
 
         clist2 <- NULL # for vlm.wfit
 
@@ -346,40 +370,40 @@ valt.1iter <- function(x, z, U, Blist, C, control,
         for (ii in 1:NOS) {
           i5 <- i5 + 1:MSratio
 
-            tmp100 <- vlm.wfit(xmat = new.lv.model.matrix,
+            tmp100 <- vlm.wfit(xmat = new.latvar.model.matrix,
                                zedd[, i5, drop = FALSE],
                                Blist = clist2,
-                               U = U[i5,,drop = FALSE],
+                               U = U[i5,, drop = FALSE],
                                matrix.out = TRUE,
-                               is.vlmX = FALSE, rss = TRUE,
+                               is.vlmX = FALSE, res.ss = TRUE,
                                qr = FALSE,
                                Eta.range = control$Eta.range,
                                xij = control$xij,
                                lp.names = lp.names[i5])
-            fit$rss <- fit$rss + tmp100$rss
+            fit$res.ss <- fit$res.ss + tmp100$res.ss
             fit$mat.coef <- cbind(fit$mat.coef, tmp100$mat.coef)
             fit$fitted.values <- cbind(fit$fitted.values,
                                        tmp100$fitted.values)
         }
     } else {
-        fit <- vlm.wfit(xmat = new.lv.model.matrix,
+        fit <- vlm.wfit(xmat = new.latvar.model.matrix,
                        zedd, Blist = clist2, U = U,
                        matrix.out = TRUE,
-                       is.vlmX = FALSE, rss = TRUE, qr = FALSE,
+                       is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
                        Eta.range = control$Eta.range,
                        xij = control$xij, lp.names = lp.names)
     }
     A <- if (tmp833$NoA) matrix(0, M, Rank) else
-        t(fit$mat.coef[1:Rank,,drop = FALSE])
+        t(fit$mat.coef[1:Rank,, drop = FALSE])
     if (Corner)
         A[Index.corner,] <- diag(Rank)     
 
     B1 <- if (p1)
-      fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),,drop = FALSE] else
+      fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),, drop = FALSE] else
       NULL
     fv <- as.matrix(fit$fitted.values)
     if (Corner)
-        fv[,Index.corner] <- fv[,Index.corner] + lv.mat
+        fv[,Index.corner] <- fv[,Index.corner] + latvar.mat
     Dmat <- if (Quadratic) {
             if (ITolerances) {
                 tmp800 <- matrix(0, M, Rank*(Rank+1)/2)
@@ -388,13 +412,13 @@ valt.1iter <- function(x, z, U, Blist, C, control,
                 tmp800
             } else 
                 t(fit$mat.coef[(tmp833$Aoffset+1):
-                  (tmp833$Aoffset+Qoffset),,drop = FALSE])
+                  (tmp833$Aoffset+Qoffset),, drop = FALSE])
     } else
         NULL
 
     list(Amat = A, B1 = B1, Cmat = C, Dmat = Dmat,
          fitted = if (M == 1) c(fv) else fv,
-         new.coeffs = fit$coef, constraints = clist2, rss = fit$rss,
+         new.coeffs = fit$coef, constraints = clist2, res.ss = fit$res.ss,
          offset = if (length(tmp833$offset)) tmp833$offset else NULL)
 }
 
@@ -404,7 +428,7 @@ valt.1iter <- function(x, z, U, Blist, C, control,
 
 rrr.init.expression <- expression({
     if (length(control$Quadratic) && control$Quadratic)
-        copy_X_vlm <- TRUE
+      copy.X.vlm <- TRUE
 
 
 
@@ -417,16 +441,16 @@ rrr.init.expression <- expression({
               "quasibinomial" = 1, "negbinomial" = 3,
               "gamma2" = 5, "gaussianff" = 8,
               0)  # stop("cannot fit this model using fast algorithm")
-    if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
+    if (modelno == 1) modelno = get("modelno", envir = VGAMenv)
     rrcontrol$modelno = control$modelno = modelno
     if (modelno == 3 || modelno == 5) {
 
 
         M <- 2 * ifelse(is.matrix(y), ncol(y), 1)
-          control$szero <-
-        rrcontrol$szero <- seq(from = 2, to=M, by = 2)  # Handles A
+          control$str0 <-
+        rrcontrol$str0 <- seq(from = 2, to = M, by = 2)  # Handles A
           control$Dzero <-
-        rrcontrol$Dzero <- seq(from = 2, to=M, by = 2)  # Handles D
+        rrcontrol$Dzero <- seq(from = 2, to = M, by = 2)  # Handles D
 
 
     }
@@ -441,19 +465,19 @@ rrr.init.expression <- expression({
 
 rrr.alternating.expression <- expression({
 
-    alt <- valt(x, z, U, Rank=Rank,
+    alt <- valt(x, z, U, Rank = Rank,
                 Blist = Blist,
                 Cinit = rrcontrol$Cinit,
                 Criterion = rrcontrol$Criterion,
                 colx1.index = rrcontrol$colx1.index,
                 Linesearch = rrcontrol$Linesearch,
                 Maxit = rrcontrol$Maxit,
-                szero = rrcontrol$szero,
-                SD.Cinit = rrcontrol$SD.Cinit,
+                str0 = rrcontrol$str0,
+                sd.Cinit = rrcontrol$sd.Cinit,
                 Suppress.warning = rrcontrol$Suppress.warning,
                 Tolerance = rrcontrol$Tolerance,
                 trace = trace,
-                xij = control$xij) # This is subject to drift in A and C
+                xij = control$xij)  # This is subject to drift in A and C
 
     ans2 <- rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x = x)
 
@@ -472,7 +496,7 @@ rrr.alternating.expression <- expression({
     if (length(Dmat)) {
       ind0 <- iam(NA, NA, both = TRUE, M = Rank)
       for (kay in 1:M) {
-        elts <- Dmat[kay, , drop = FALSE] # Manual recycling
+        elts <- Dmat[kay, , drop = FALSE]  # Manual recycling
         if (length(elts) < Rank)
           elts <- matrix(elts, 1, Rank)
         Dk <- m2adefault(elts, M = Rank)[, , 1]
@@ -498,8 +522,8 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
     C.old <- C
 
     if (rrcontrol$Corner) {
-      tmp87 <- A[Index.corner,,drop = FALSE]
-      Mmat <- solve(tmp87) # The normalizing matrix
+      tmp87 <- A[Index.corner,, drop = FALSE]
+      Mmat <- solve(tmp87)  # The normalizing matrix
       C <- C %*% t(tmp87)
       A <- A %*% Mmat
       A[Index.corner,] <- diag(Rank)  # Make sure 
@@ -524,10 +548,10 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
                                      Dmat = Dmat, M = M)
     }
 
-    if (rrcontrol$Uncorrelated.lv) {
-        lv.mat <- x[, colx2.index, drop = FALSE] %*% C
-        var.lv.mat <- var(lv.mat)
-        UU <- chol(var.lv.mat)
+    if (rrcontrol$Uncorrelated.latvar) {
+        latvar.mat <- x[, colx2.index, drop = FALSE] %*% C
+        var.latvar.mat <- var(latvar.mat)
+        UU <- chol(var.latvar.mat)
         Ut <- solve(UU)
         Mmat <- t(UU)
         C <- C %*% Ut
@@ -557,7 +581,7 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
     }
 
 
-    list(Amat=A, Cmat=C, Dmat=Dmat)
+    list(Amat = A, Cmat = C, Dmat = Dmat)
 }
 
 
@@ -566,8 +590,8 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
 
 rrr.end.expression <- expression({
 
-  if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
-    rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
+  if (exists(".VGAM.etamat", envir = VGAMenv))
+    rm(".VGAM.etamat", envir = VGAMenv)
 
 
   if (control$Quadratic) {
@@ -580,12 +604,12 @@ rrr.end.expression <- expression({
     Blist <- replace.constraints(Blist.save, Amat, colx2.index)
   }
 
-    X_vlm_save <- if (control$Quadratic) {
+    X.vlm.save <- if (control$Quadratic) {
         tmp300 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist.save,
                                         C = Cmat, control=control)
-        lv.mat <- tmp300$lv.mat  # Needed at the top of new.s.call
+        latvar.mat <- tmp300$latvar.mat  # Needed at the top of new.s.call
 
-        lm2vlm.model.matrix(tmp300$new.lv.model.matrix,
+        lm2vlm.model.matrix(tmp300$new.latvar.model.matrix,
                             B.list,
                             xij = control$xij)
     } else {
@@ -596,7 +620,7 @@ rrr.end.expression <- expression({
     fv <- tmp.fitted            # Contains \bI \bnu
     eta <- fv + offset
     if (FALSE && control$Rank == 1) {
-        ooo <- order(lv.mat[,1])
+      ooo <- order(latvar.mat[, 1])
     }
     mu <- family at linkinv(eta, extra)
 
@@ -641,18 +665,18 @@ rrr.derivative.expression <- expression({
              all(trivial.constraints(constraints) == 1)
 
     theta0 <- c(Cmat)
-    assign(".VGAM.dot.counter", 0, envir = VGAM:::VGAMenv)
+    assign(".VGAM.dot.counter", 0, envir = VGAMenv)
     if (control$OptimizeWrtC) {
         if (control$Quadratic && control$FastAlgorithm) {
             if (iter == 2) {
-                if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
-                    rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
+                if (exists(".VGAM.etamat", envir = VGAMenv))
+                    rm(".VGAM.etamat", envir = VGAMenv)
             }
             if (iter > 2 && !quasi.newton$convergence) {
-                if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
-                    ..VGAM.z <- get(".VGAM.z", envir = VGAM:::VGAMenv)
-                    ..VGAM.U <- get(".VGAM.U", envir = VGAM:::VGAMenv)
-                    ..VGAM.beta <- get(".VGAM.beta", envir = VGAM:::VGAMenv)
+                if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+                    ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
+                    ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
+                    ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
                 }
                 if (zthere) {
                     z <- matrix(..VGAM.z, n, M)  # minus any offset
@@ -665,18 +689,18 @@ rrr.derivative.expression <- expression({
                 NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
 
                 canfitok <-
-                  (exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
-                  get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+                  (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
+                  get("CQO.FastAlgorithm", envir = VGAMenv))
                 if (!canfitok)
                   stop("cannot fit this model using fast algorithm")
                 p2star <- if (nice31) 
        ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
-       (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
+       (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol, 1,NOS))
                 p1star <- if (nice31) p1 *
                   ifelse(modelno == 3 || modelno == 5, 2, 1) else
-                  (ncol(X_vlm_save) - p2star)
-                X_vlm_1save <- if (p1star > 0)
-                        X_vlm_save[,-(1:p2star)] else NULL
+                  (ncol(X.vlm.save) - p2star)
+                X.vlm.1save <- if (p1star > 0)
+                        X.vlm.save[,-(1:p2star)] else NULL
                 quasi.newton <- optim(par=Cmat, fn=callcqof, 
                   gr <- if (control$GradientFunction) calldcqo else NULL,
                   method=which.optimizer,
@@ -686,38 +710,38 @@ rrr.derivative.expression <- expression({
                                  length.out = length(Cmat)),
                   maxit = 250),
                   etamat=eta, xmat=x, ymat=y, wvec=w,
-                  X_vlm_1save = if (nice31) NULL else X_vlm_1save,
+                  X.vlm.1save = if (nice31) NULL else X.vlm.1save,
                   modelno=modelno, Control=control,
                   n = n, M = M, p1star=p1star,
                   p2star=p2star, nice31 = nice31)
 
 
-                if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
-                    ..VGAM.z <- get(".VGAM.z", envir = VGAM:::VGAMenv)
-                    ..VGAM.U <- get(".VGAM.U", envir = VGAM:::VGAMenv)
-                    ..VGAM.beta <- get(".VGAM.beta", envir = VGAM:::VGAMenv)
+                if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+                    ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
+                    ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
+                    ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
                 }
                 if (zthere) {
                     z <- matrix(..VGAM.z, n, M)  # minus any offset
                     U <- matrix(..VGAM.U, M, n)
                 }
             } else {
-                if (exists(".VGAM.offset", envir = VGAM:::VGAMenv))
-                    rm(".VGAM.offset", envir = VGAM:::VGAMenv)
+                if (exists(".VGAM.offset", envir = VGAMenv))
+                    rm(".VGAM.offset", envir = VGAMenv)
             }
         } else {
             use.reltol <- if (length(rrcontrol$Reltol) >= iter) 
-                rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
+              rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
             quasi.newton <-
-            optim(par=theta0,
-                  fn=rrr.derivC.rss, 
-                  method=which.optimizer,
-                  control=list(fnscale=rrcontrol$Fnscale, 
-                               maxit=rrcontrol$Maxit,
-                               abstol=rrcontrol$Abstol,
-                               reltol=use.reltol),
-                  U = U, z= if (control$ITolerances) z+offset else z,
-                  M = M, xmat=x,    # varbix2=varbix2,
+            optim(par = theta0,
+                  fn = rrr.derivC.res.ss, 
+                  method = which.optimizer,
+                  control = list(fnscale = rrcontrol$Fnscale, 
+                                 maxit = rrcontrol$Maxit,
+                                 abstol = rrcontrol$Abstol,
+                                 reltol = use.reltol),
+                  U = U, z = if (control$ITolerances) z + offset else z,
+                  M = M, xmat = x,  # varbix2 = varbix2,
                   Blist = Blist, rrcontrol = rrcontrol)
         }
 
@@ -727,10 +751,10 @@ rrr.derivative.expression <- expression({
     Cmat <- matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
 
     if (Rank > 1 && rrcontrol$ITolerances) {
-            numat <- x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
+            numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
             evnu <- eigen(var(numat))
             Cmat <- Cmat %*% evnu$vector
-            numat <- x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
+            numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
             offset <- if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
     }
 }
@@ -778,25 +802,25 @@ rrr.derivative.expression <- expression({
 
 
 
-rrr.derivC.rss <- function(theta, U, z, M, xmat, Blist, rrcontrol,
+rrr.derivC.res.ss <- function(theta, U, z, M, xmat, Blist, rrcontrol,
                           omit.these = NULL) {
 
     if (rrcontrol$trace) {
         cat(".")
         flush.console()
     }
-    alreadyThere <- exists(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
+    alreadyThere <- exists(".VGAM.dot.counter", envir = VGAMenv)
     if (alreadyThere) {
-        VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
+        VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAMenv)
         VGAM.dot.counter <- VGAM.dot.counter + 1 
         assign(".VGAM.dot.counter", VGAM.dot.counter,
-               envir = VGAM:::VGAMenv)
+               envir = VGAMenv)
         if (VGAM.dot.counter > max(50, options()$width - 5)) {
             if (rrcontrol$trace) {
                 cat("\n")
                 flush.console()
             }
-            assign(".VGAM.dot.counter", 0, envir = VGAM:::VGAMenv)
+            assign(".VGAM.dot.counter", 0, envir = VGAMenv)
         }
     }
 
@@ -806,22 +830,22 @@ rrr.derivC.rss <- function(theta, U, z, M, xmat, Blist, rrcontrol,
     tmp700 <- lm2qrrvlm.model.matrix(x = xmat, Blist = Blist,
                    no.thrills = !rrcontrol$Corner,
                    C = Cmat, control = rrcontrol, assign = FALSE)
-    Blist <- tmp700$constraints # Does not contain \bI_{Rank} \bnu
+    Blist <- tmp700$constraints  # Does not contain \bI_{Rank} \bnu
 
     if (rrcontrol$Corner) {
-        z <- as.matrix(z) # should actually call this zedd
-        z[,rrcontrol$Index.corner] <- z[,rrcontrol$Index.corner] -
-                                     tmp700$lv.mat
+        z <- as.matrix(z)  # should actually call this zedd
+        z[, rrcontrol$Index.corner] <- z[, rrcontrol$Index.corner] -
+                                       tmp700$latvar.mat
     }
 
     if (length(tmp700$offset)) z <- z - tmp700$offset
 
 
-    vlm.wfit(xmat=tmp700$new.lv.model.matrix, zmat=z,
-             Blist = Blist, ncolx=ncol(xmat), U = U, only.rss = TRUE,
-             matrix.out = FALSE, is.vlmX = FALSE, rss= TRUE, qr = FALSE,
+    vlm.wfit(xmat=tmp700$new.latvar.model.matrix, zmat=z,
+             Blist = Blist, ncolx=ncol(xmat), U = U, only.res.ss = TRUE,
+             matrix.out = FALSE, is.vlmX = FALSE, res.ss= TRUE, qr = FALSE,
              Eta.range = rrcontrol$Eta.range,
-             xij = rrcontrol$xij)$rss
+             xij = rrcontrol$xij)$res.ss
 }
 
 
@@ -867,16 +891,16 @@ nlminbcontrol <- function(Abs.tol = 10^(-6),
 
 
 
-Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
+Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
 
 
-  if (length(varlvI) != 1 || !is.logical(varlvI)) 
-    stop("'varlvI' must be TRUE or FALSE")
+  if (length(varI.latvar) != 1 || !is.logical(varI.latvar)) 
+    stop("'varI.latvar' must be TRUE or FALSE")
   if (length(reference) > 1)
     stop("'reference' must be of length 0 or 1")
   if (length(reference) &&
       is.Numeric(reference))
-      if (!is.Numeric(reference, allowable.length = 1,
+      if (!is.Numeric(reference, length.arg = 1,
                       integer.valued = TRUE))
         stop("bad input for argument 'reference'")
   if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
@@ -887,21 +911,21 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
   Rank <- ocontrol$Rank 
   M <- object at misc$M
   NOS <- if (length(object at y)) ncol(object at y) else M
-  MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
+  MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
   Quadratic <- if (ConstrainedQO) ocontrol$Quadratic else TRUE
   if (!Quadratic) stop("object is not a quadratic ordination object")
   p1 <- length(ocontrol$colx1.index)
   p2 <- length(ocontrol$colx2.index)
   Index.corner <- ocontrol$Index.corner
-  szero <- ocontrol$szero
+  str0 <- ocontrol$str0
   EqualTolerances <- ocontrol$EqualTolerances
   Dzero <- ocontrol$Dzero
   Corner <- if (ConstrainedQO) ocontrol$Corner else FALSE
 
   estITol <- if (ConstrainedQO) object at control$ITolerances else FALSE
-  modelno <- object at control$modelno  # 1,2,3,4,5,6,7 or 0
-  combine2 <- c(szero, if (Corner) Index.corner else NULL)
-  NoA <- length(combine2) == M # A is fully known.
+  modelno <- object at control$modelno  # 1, 2, 3, 4, 5, 6, 7 or 0
+  combine2 <- c(str0, if (Corner) Index.corner else NULL)
+  NoA <- length(combine2) == M  # A is fully known.
 
   Qoffset <- if (Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
 
@@ -914,9 +938,11 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
 
   dzero.vector <- rep(FALSE, length = M)
   if (length(Dzero))
-      dzero.vector[Dzero] <- TRUE
+    dzero.vector[Dzero] <- TRUE
   names(dzero.vector) <- ynames 
-  lv.names <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
+  latvar.names <- if (Rank == 1)
+    "latvar" else
+    paste("latvar", 1:Rank, sep = "")
 
 
 
@@ -965,10 +991,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
     if (ptr1 > 0) {
       this.spp <- candidates[ptr1]
     }
-  elts <- Dmat[this.spp,,drop = FALSE]
+  elts <- Dmat[this.spp,, drop = FALSE]
       if (length(elts) < Rank)
         elts <- matrix(elts, 1, Rank)
-      Dk <- m2adefault(elts, M = Rank)[,,1]    # Hopefully negative-def 
+      Dk <- m2adefault(elts, M = Rank)[,, 1]    # Hopefully negative-def 
       temp400 <- eigen(Dk)
       ptr1 <- ptr1 + 1 
       if (all(temp400$value < 0))
@@ -1022,7 +1048,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
     if (Rank > 1) {
       if (!length(xmat <- object at x))
         stop("cannot obtain the model matrix")
-      numat <- xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+      numat <- xmat[,ocontrol$colx2.index, drop = FALSE] %*% Cmat
       evnu <- eigen(var(numat))
       Mmat <- solve(t(evnu$vector))
       Cmat <- Cmat %*% evnu$vector  # == Cmat %*% solve(t(Mmat))
@@ -1048,10 +1074,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
 
 
   if (ConstrainedQO)
-    if (varlvI) {
+    if (varI.latvar) {
       if (!length(xmat <- object at x))
         stop("cannot obtain the model matrix")
-      numat <- xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+      numat <- xmat[,ocontrol$colx2.index, drop = FALSE] %*% Cmat
       sdnumat <- apply(cbind(numat), 2, sd)
       Mmat <- if (Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
       Cmat <- Cmat %*% solve(t(Mmat))
@@ -1059,6 +1085,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
       temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
       Cmat <- temp800$cmat
       Amat <- temp800$amat
+               Cmat # Not needed
 
 
 
@@ -1092,26 +1119,26 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
   }
   names(maximum) <- ynames
     
-  lv.mat <- if (ConstrainedQO) {
-    object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat 
+  latvar.mat <- if (ConstrainedQO) {
+    object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat 
   } else {
-    object at lv
+    object at latvar
   }
 
-  dimnames(Amat) <- list(lp.names, lv.names)
+  dimnames(Amat) <- list(lp.names, latvar.names)
   if (ConstrainedQO)
-    dimnames(Cmat) <- list(names(ocontrol$colx2.index), lv.names)
+    dimnames(Cmat) <- list(names(ocontrol$colx2.index), latvar.names)
   if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
-  dimnames(lv.mat) <- list(dimnames(xmat)[[1]], lv.names)
+  dimnames(latvar.mat) <- list(dimnames(xmat)[[1]], latvar.names)
 
   ans <- 
   new(Class <- if (ConstrainedQO) "Coef.qrrvglm" else "Coef.uqo",
        A = Amat, B1 = B1, Constrained = ConstrainedQO, D = Darray,
        NOS = NOS, Rank = Rank,
-       lv = lv.mat,
-       lvOrder = lv.mat,
+       latvar = latvar.mat,
+       latvar.order = latvar.mat,
        Optimum = optimum, 
-       OptimumOrder = optimum, 
+       Optimum.order = optimum, 
        bellshaped = bellshaped,
        Dzero = dzero.vector,
        Maximum = maximum,
@@ -1119,9 +1146,9 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
   if (ConstrainedQO) {ans at C <- Cmat} else {Cmat <- NULL}
 
   for (rrr in 1:Rank)
-    ans at OptimumOrder[rrr,] <- order(ans at Optimum[rrr,])
+    ans at Optimum.order[rrr, ] <- order(ans at Optimum[rrr, ])
   for (rrr in 1:Rank)
-    ans at lvOrder[,rrr] <- order(ans at lv[,rrr])
+    ans at latvar.order[, rrr] <- order(ans at latvar[, rrr])
 
   if (length(object at misc$estimated.dispersion) &&
       object at misc$estimated.dispersion) {
@@ -1139,97 +1166,100 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
   if (MSratio > 1) {
     keepIndex <- seq(from = 1, to = M, by = MSratio)
     ans at Dzero <- ans at Dzero[keepIndex]
-    ans at Optimum <- ans at Optimum[,keepIndex,drop = FALSE]
-    ans at Tolerance <- ans at Tolerance[,,keepIndex,drop = FALSE]
+    ans at Optimum <- ans at Optimum[,keepIndex, drop = FALSE]
+    ans at Tolerance <- ans at Tolerance[,,keepIndex, drop = FALSE]
     ans at bellshaped <- ans at bellshaped[keepIndex]
     names(ans at Dzero) <- ynames
   } else {
-    dimnames(ans at D) <- list(lv.names, lv.names, ynames)
+    dimnames(ans at D) <- list(latvar.names, latvar.names, ynames)
   }
   names(ans at bellshaped) <- ynames 
-  dimnames(ans at Optimum) <- list(lv.names, ynames)
-  dimnames(ans at Tolerance) <- list(lv.names, lv.names, ynames)
+  dimnames(ans at Optimum) <- list(latvar.names, ynames)
+  dimnames(ans at Tolerance) <- list(latvar.names, latvar.names, ynames)
   ans 
 }
 
 
 setClass(Class = "Coef.rrvglm", representation(
-      "A"            = "matrix",
-      "B1"           = "matrix",  # This may be unassigned if p1 = 0.
-      "C"            = "matrix",
-      "Rank"         = "numeric",
-      "colx1.index"  = "numeric",
-      "colx2.index"  = "numeric",
-      "Atilde"       = "matrix"))
+      "A"             = "matrix",
+      "B1"            = "matrix",  # This may be unassigned if p1 = 0.
+      "C"             = "matrix",
+      "Rank"          = "numeric",
+      "colx1.index"   = "numeric",
+      "colx2.index"   = "numeric",
+      "Atilde"        = "matrix"))
 
 setClass(Class = "Coef.uqo", representation(
-      "A"            = "matrix",
-      "B1"           = "matrix",
-      "Constrained"  = "logical",
-      "D"            = "array",
-      "NOS"          = "numeric",
-      "Rank"         = "numeric",
-      "lv"           = "matrix",
-      "lvOrder"      = "matrix",
-      "Maximum"      = "numeric",
-      "Optimum"      = "matrix",
-      "OptimumOrder" = "matrix",
-      "bellshaped"   = "logical",
-      "dispersion"   = "numeric",
-      "Dzero"        = "logical",
-      "Tolerance"    = "array"))
+      "A"             = "matrix",
+      "B1"            = "matrix",
+      "Constrained"   = "logical",
+      "D"             = "array",
+      "NOS"           = "numeric",
+      "Rank"          = "numeric",
+      "latvar"        = "matrix",
+      "latvar.order"  = "matrix",
+      "Maximum"       = "numeric",
+      "Optimum"       = "matrix",
+      "Optimum.order" = "matrix",
+      "bellshaped"    = "logical",
+      "dispersion"    = "numeric",
+      "Dzero"         = "logical",
+      "Tolerance"     = "array"))
 
 setClass(Class = "Coef.qrrvglm", representation(
       "C"            = "matrix"),
     contains = "Coef.uqo")
 
+
 show.Coef.qrrvglm <- function(x, ...) {
 
-    object <- x 
-    Rank <- object at Rank
-    M <- nrow(object at A)
-    NOS <- object at NOS
-    mymat <- matrix(as.numeric(NA), NOS, Rank)
-    if (Rank == 1) {  # || object at Diagonal
-        for (ii in 1:NOS) {
-            fred <- if (Rank>1) diag(object at Tolerance[,,ii,drop = FALSE]) else
-                   object at Tolerance[,,ii]
-            if (all(fred > 0))
-                mymat[ii,] <- sqrt(fred)
-        }
-        dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
-                             if (Rank == 1) "lv" else
-                             paste("Tolerance", dimnames(mymat)[[2]],
-                                   sep = ""))
+  object <- x 
+  Rank <- object at Rank
+  M <- nrow(object at A)
+  NOS <- object at NOS
+  mymat <- matrix(as.numeric(NA), NOS, Rank)
+  if (Rank == 1) {  # || object at Diagonal
+    for (ii in 1:NOS) {
+      fred <- if (Rank > 1)
+                diag(object at Tolerance[, , ii, drop = FALSE]) else
+                object at Tolerance[, , ii]
+      if (all(fred > 0))
+        mymat[ii,] <- sqrt(fred)
+    }
+    dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
+                            if (Rank == 1) "latvar" else
+                            paste("Tolerance", dimnames(mymat)[[2]],
+                                  sep = ""))
     } else {
-        for (ii in 1:NOS) {
-            fred <- eigen(object at Tolerance[,,ii])
-            if (all(fred$value > 0))
-                mymat[ii,] <- sqrt(fred$value)
-        }
-        dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
-                               paste("tol", 1:Rank, sep = ""))
+      for (ii in 1:NOS) {
+        fred <- eigen(object at Tolerance[, , ii])
+          if (all(fred$value > 0))
+              mymat[ii, ] <- sqrt(fred$value)
+      }
+      dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
+                              paste("tol", 1:Rank, sep = ""))
     }
 
     dimnames(object at A) <- list(dimnames(object at A)[[1]],
-        if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else
-                            "A")
+      if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else
+                          "A")
 
     Maximum <- if (length(object at Maximum))
-              cbind(Maximum = object at Maximum) else NULL
+               cbind(Maximum = object at Maximum) else NULL
     if (length(Maximum) && length(mymat) && Rank == 1)
-        Maximum[is.na(mymat),] <- NA
+      Maximum[is.na(mymat),] <- NA
 
-    optmat <- cbind(t(object at Optimum))
+   optmat <- cbind(t(object at Optimum))
     dimnames(optmat) <- list(dimnames(optmat)[[1]],
-        if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
-        else "Optimum")
+        if (Rank > 1)
+          paste("Optimum", dimnames(optmat)[[2]], sep = ".") else
+          "Optimum")
     if (length(optmat) && length(mymat) && Rank == 1)
-        optmat[is.na(mymat),] <- NA
+        optmat[is.na(mymat), ] <- NA
 
     if ( object at Constrained ) {
-        cat("\nC matrix (constrained/canonical coefficients)\n")
-        print(object at C, ...)
+      cat("\nC matrix (constrained/canonical coefficients)\n")
+      print(object at C, ...)
     }
     cat("\nB1 and A matrices\n")
     print(cbind(t(object at B1),
@@ -1237,13 +1267,15 @@ show.Coef.qrrvglm <- function(x, ...) {
     cat("\nOptima and maxima\n")
     print(cbind(Optimum = optmat,
                 Maximum), ...)
-    if (Rank > 1) { # !object at Diagonal && Rank > 1
-        cat("\nTolerances\n") } else
-        cat("\nTolerance\n")
+    if (Rank > 1) {  # !object at Diagonal && Rank > 1
+      cat("\nTolerances\n")
+    } else {
+      cat("\nTolerance\n")
+    }
     print(mymat, ...)
 
     cat("\nStandard deviation of the latent variables (site scores)\n")
-    print(apply(cbind(object at lv), 2, sd))
+    print(apply(cbind(object at latvar), 2, sd))
     invisible(object)
 }
 
@@ -1264,133 +1296,136 @@ setMethod("summary", "qrrvglm", function(object, ...)
     summary.qrrvglm(object, ...))
 
 
-predictqrrvglm <- function(object,
-                         newdata = NULL,
-                         type = c("link", "response", "lv", "terms"),
-                         se.fit = FALSE,
-                         deriv = 0,
-                         dispersion = NULL,
-                         extra = object at extra, 
-                         varlvI = FALSE, reference = NULL, ...) {
-    if (se.fit)
-        stop("cannot handle se.fit == TRUE yet")
-    if (deriv != 0)
-        stop("derivative is not equal to 0")
-
-    if (mode(type) != "character" && mode(type) != "name")
-        type <- as.character(substitute(type))
-    type <- match.arg(type, c("link", "response", "lv", "terms"))[1]
-    if (type == "lv")
-        stop("cannot handle type='lv' yet")
-    if (type == "terms")
-        stop("cannot handle type='terms' yet")
+predictqrrvglm <-
+  function(object,
+           newdata = NULL,
+           type = c("link", "response", "latvar", "terms"),
+           se.fit = FALSE,
+           deriv = 0,
+           dispersion = NULL,
+           extra = object at extra, 
+           varI.latvar = FALSE, reference = NULL, ...) {
+  if (se.fit)
+    stop("cannot handle se.fit == TRUE yet")
+  if (deriv != 0)
+    stop("derivative is not equal to 0")
 
-    M <- object at misc$M
-    Rank  <- object at control$Rank
+  if (mode(type) != "character" && mode(type) != "name")
+    type <- as.character(substitute(type))
+  type <- match.arg(type, c("link", "response", "latvar", "terms"))[1]
+  if (type == "latvar")
+    stop("cannot handle type='latvar' yet")
+  if (type == "terms")
+    stop("cannot handle type='terms' yet")
 
-    na.act <- object at na.action
-    object at na.action <- list()
+  M <- object at misc$M
+  Rank  <- object at control$Rank
 
-    if (!length(newdata) && type == "response" && length(object at fitted.values)) {
-        if (length(na.act)) {
-            return(napredict(na.act[[1]], object at fitted.values))
-        } else {
-            return(object at fitted.values)
-        }
-    }
+  na.act <- object at na.action
+  object at na.action <- list()
 
-    if (!length(newdata)) {
-        X <- model.matrixvlm(object, type = "lm", ...)
-        offset <- object at offset
-        tt <- object at terms$terms   # terms(object)
-        if (!length(object at x))
-            attr(X, "assign") <- attrassignlm(X, tt)
+  if (!length(newdata) && type == "response" && length(object at fitted.values)) {
+    if (length(na.act)) {
+      return(napredict(na.act[[1]], object at fitted.values))
     } else {
-        if (is.smart(object) && length(object at smart.prediction)) {
-            setup.smart("read", smart.prediction = object at smart.prediction)
-        }
+      return(object at fitted.values)
+    }
+  }
 
-        tt <- object at terms$terms # terms(object) # 11/8/03; object at terms$terms
-        X <- model.matrix(delete.response(tt), newdata, contrasts =
-                      if (length(object at contrasts)) object at contrasts else NULL,
-                      xlev = object at xlevels)
+  if (!length(newdata)) {
+    X <- model.matrixvlm(object, type = "lm", ...)
+    offset <- object at offset
+    tt <- object at terms$terms   # terms(object)
+    if (!length(object at x))
+      attr(X, "assign") <- attrassignlm(X, tt)
+  } else {
+    if (is.smart(object) && length(object at smart.prediction)) {
+      setup.smart("read", smart.prediction = object at smart.prediction)
+    }
 
-        if (nrow(X) != nrow(newdata)) {
-            as.save <- attr(X, "assign")
-            X <- X[rep(1, nrow(newdata)),,drop = FALSE]
-            dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
-            attr(X, "assign") <- as.save  # Restored
-        }
+    tt <- object at terms$terms # terms(object)  # 11/8/03; object at terms$terms
+    X <- model.matrix(delete.response(tt), newdata, contrasts =
+                  if (length(object at contrasts)) object at contrasts else NULL,
+                  xlev = object at xlevels)
 
-        offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
-            eval(attr(tt,"variables")[[off.num+1]], newdata)
-        } else if (!is.null(object at offset))
-            eval(object at call$offset, newdata)
+    if (nrow(X) != nrow(newdata)) {
+      as.save <- attr(X, "assign")
+      X <- X[rep(1, nrow(newdata)),, drop = FALSE]
+      dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
+      attr(X, "assign") <- as.save  # Restored
+    }
 
-        if (any(c(offset) != 0)) stop("currently cannot handle nonzero offsets")
+    offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
+      eval(attr(tt,"variables")[[off.num+1]], newdata)
+    } else if (!is.null(object at offset))
+      eval(object at call$offset, newdata)
 
-        if (is.smart(object) && length(object at smart.prediction)) {
-            wrapup.smart()
-        }
+      if (any(c(offset) != 0))
+        stop("currently cannot handle nonzero offsets")
 
-        attr(X, "assign") <- attrassigndefault(X, tt)
+      if (is.smart(object) && length(object at smart.prediction)) {
+        wrapup.smart()
     }
 
+    attr(X, "assign") <- attrassigndefault(X, tt)
+  }
+
     ocontrol <- object at control
 
     Rank <- ocontrol$Rank
     NOS <- ncol(object at y)
     sppnames <- dimnames(object at y)[[2]]
-    modelno <- ocontrol$modelno  # 1,2,3,5 or 0
+    modelno <- ocontrol$modelno  # 1, 2, 3, 5 or 0
     M <- if (any(slotNames(object) == "predictors") &&
              is.matrix(object at predictors))
            ncol(object at predictors) else
            object at misc$M
-    MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
+    MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
     if (MSratio != 1) stop("can only handle MSratio == 1 for now")
 
 
     if (length(newdata)) {
-        Coefs <- Coef(object, varlvI = varlvI, reference = reference)
-        X1mat <- X[,ocontrol$colx1.index,drop = FALSE]
-        X2mat <- X[,ocontrol$colx2.index,drop = FALSE]
-        lvmat <- as.matrix(X2mat %*% Coefs at C) # n x Rank
-
-        etamat <- as.matrix(X1mat %*% Coefs at B1 + lvmat %*% t(Coefs at A))
-        whichSpecies <- 1:NOS  # Do it all for all species
-        for (sppno in 1:length(whichSpecies)) {
-            thisSpecies <- whichSpecies[sppno]
-            Dmat <- matrix(Coefs at D[,,thisSpecies], Rank, Rank)
-            etamat[,thisSpecies] <- etamat[,thisSpecies] +
-                                   mux34(lvmat, Dmat, symmetric = TRUE)
-        }
+      Coefs <- Coef(object, varI.latvar = varI.latvar, reference = reference)
+      X1mat <- X[,ocontrol$colx1.index, drop = FALSE]
+      X2mat <- X[,ocontrol$colx2.index, drop = FALSE]
+      latvarmat <- as.matrix(X2mat %*% Coefs at C)  # n x Rank
+
+      etamat <- as.matrix(X1mat %*% Coefs at B1 + latvarmat %*% t(Coefs at A))
+      which.species <- 1:NOS  # Do it all for all species
+      for (sppno in 1:length(which.species)) {
+        thisSpecies <- which.species[sppno]
+        Dmat <- matrix(Coefs at D[,,thisSpecies], Rank, Rank)
+        etamat[,thisSpecies] <- etamat[,thisSpecies] +
+                               mux34(latvarmat, Dmat, symmetric = TRUE)
+      }
     } else {
-        etamat <-  object at predictors
-    }
+      etamat <-  object at predictors
+  }
 
-    pred <- switch(type,
-    response = {
-        fv = if (length(newdata)) object at family@linkinv(etamat, extra) else
-                    fitted(object)
-        if (M > 1 && is.matrix(fv)) {
-            dimnames(fv) <- list(dimnames(fv)[[1]],
-                                 dimnames(object at fitted.values)[[2]])
-        }
-        fv
-    },
-    link = etamat,
-    lv = stop("failure here"),
-    terms = stop("failure here"))
-
-    if (!length(newdata) && length(na.act)) {
-        if (se.fit) {
-          pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
-          pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
-        } else {
-            pred <- napredict(na.act[[1]], pred)
-        }
+  pred <- switch(type,
+  response = {
+    fv = if (length(newdata))
+           object at family@linkinv(etamat, extra) else
+           fitted(object)
+    if (M > 1 && is.matrix(fv)) {
+      dimnames(fv) <- list(dimnames(fv)[[1]],
+                           dimnames(object at fitted.values)[[2]])
+    }
+    fv
+  },
+  link = etamat,
+  latvar = stop("failure here"),
+  terms = stop("failure here"))
+
+  if (!length(newdata) && length(na.act)) {
+    if (se.fit) {
+      pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
+      pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
+    } else {
+        pred <- napredict(na.act[[1]], pred)
     }
-    pred
+  }
+  pred
 }
 
 
@@ -1407,9 +1442,10 @@ coefqrrvglm <- function(object, matrix.out = FALSE,
 
 
 
-residualsqrrvglm  <- function(object,
-      type = c("deviance", "pearson", "working", "response", "ldot"),
-      matrix.arg = TRUE) {
+residualsqrrvglm  <-
+  function(object,
+           type = c("deviance", "pearson", "working", "response", "ldot"),
+           matrix.arg = TRUE) {
   stop("this function has not been written yet")
 }
 
@@ -1422,49 +1458,49 @@ setMethod("residuals",  "qrrvglm",
 
 
 show.rrvglm <- function(x, ...) {
-    if (!is.null(cl <- x at call)) {
-      cat("Call:\n")
-      dput(cl)
-    }
-    vecOfBetas <- x at coefficients
-    if (any(nas <- is.na(vecOfBetas))) {
-      if (is.null(names(vecOfBetas)))
-        names(vecOfBetas) <- paste("b",
-              1:length(vecOfBetas), sep = "")
-      cat("\nCoefficients: (", sum(nas),
-          " not defined because of singularities)\n", sep = "")
-    } else 
-        cat("\nCoefficients:\n")
-    print.default(vecOfBetas, ...)    # used to be print()
-
-    if (FALSE) {
-      Rank <- x at Rank
-      if (!length(Rank))
-        Rank <- sum(!nas)
-    }
-
-    if (FALSE) {
-      nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
-      rdf <- x at df.residual
-      if (!length(rdf))
-        rdf <- nobs - Rank
-    }
-    cat("\n")
+  if (!is.null(cl <- x at call)) {
+    cat("Call:\n")
+    dput(cl)
+  }
+  vecOfBetas <- x at coefficients
+  if (any(nas <- is.na(vecOfBetas))) {
+    if (is.null(names(vecOfBetas)))
+      names(vecOfBetas) <- paste("b",
+            1:length(vecOfBetas), sep = "")
+    cat("\nCoefficients: (", sum(nas),
+        " not defined because of singularities)\n", sep = "")
+  } else 
+      cat("\nCoefficients:\n")
+  print.default(vecOfBetas, ...)    # used to be print()
+
+  if (FALSE) {
+    Rank <- x at Rank
+    if (!length(Rank))
+      Rank <- sum(!nas)
+  }
 
-    if (length(deviance(x)))
-        cat("Residual deviance:", format(deviance(x)), "\n")
-    if (length(vll <- logLik.vlm(x)))
-        cat("Log-likelihood:", format(vll), "\n")
-
-    if (length(x at criterion)) {
-        ncrit <- names(x at criterion)
-        for (iii in ncrit)
-            if (iii != "loglikelihood" && iii != "deviance")
-              cat(paste(iii, ":", sep = ""),
-                  format(x at criterion[[iii]]), "\n")
-    }
+  if (FALSE) {
+    nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
+    rdf <- x at df.residual
+    if (!length(rdf))
+      rdf <- nobs - Rank
+  }
+  cat("\n")
+
+  if (length(deviance(x)))
+    cat("Residual deviance:", format(deviance(x)), "\n")
+  if (length(vll <- logLik.vlm(x)))
+    cat("Log-likelihood:", format(vll), "\n")
+
+  if (length(x at criterion)) {
+    ncrit <- names(x at criterion)
+    for (iii in ncrit)
+      if (iii != "loglikelihood" && iii != "deviance")
+        cat(paste(iii, ":", sep = ""),
+            format(x at criterion[[iii]]), "\n")
+  }
 
-    invisible(x)
+  invisible(x)
 }
 
 
@@ -1477,27 +1513,27 @@ setMethod("show", "rrvglm", function(object) show.rrvglm(object))
 
 
 
-rrvglm.control.Gaussian <- function(half.stepsizing = FALSE,
-                                    save.weight = TRUE, ...) {
 
-    list(half.stepsizing = FALSE,
-         save.weight = as.logical(save.weight)[1])
-}
 
 
 
 summary.rrvglm <- function(object, correlation = FALSE,
                            dispersion = NULL, digits = NULL, 
-                           numerical= TRUE,
+                           numerical = TRUE,
                            h.step = 0.0001, 
                            kill.all = FALSE, omit13 = FALSE,
-                           fixA = FALSE, ...) {
+                           fixA = FALSE, 
+                           presid = TRUE, ...) {
+
 
 
 
 
 
-    if (!is.Numeric(h.step, allowable.length = 1) ||
+
+
+
+    if (!is.Numeric(h.step, length.arg = 1) ||
         abs(h.step) > 1)
       stop("bad input for 'h.step'")
 
@@ -1510,8 +1546,10 @@ summary.rrvglm <- function(object, correlation = FALSE,
     newobject <- as(object, "vglm")
 
 
-    stuff <- summaryvglm(newobject, correlation=correlation,
-                          dispersion=dispersion)
+    stuff <- summaryvglm(newobject,
+                         correlation = correlation,
+                         dispersion = dispersion,
+                         presid = presid)
 
     answer <-
     new(Class = "summary.rrvglm",
@@ -1521,13 +1559,15 @@ summary.rrvglm <- function(object, correlation = FALSE,
         cov.unscaled = stuff at cov.unscaled,
         correlation = stuff at correlation,
         df = stuff at df,
-        pearson.resid = stuff at pearson.resid,
         sigma = stuff at sigma)
 
 
     if (is.numeric(stuff at dispersion))
       slot(answer, "dispersion") <- stuff at dispersion
 
+    if (presid && length(stuff at pearson.resid))
+      slot(answer, "pearson.resid") <- stuff at pearson.resid
+
 
 
     tmp5 <- get.rrvglm.se1(object, omit13 = omit13,
@@ -1552,11 +1592,11 @@ summary.rrvglm <- function(object, correlation = FALSE,
     }
 
     tmp8 <- object at misc$M - object at control$Rank - 
-           length(object at control$szero)
+            length(object at control$str0)
     answer at df[1] <- answer at df[1] + tmp8 * object at control$Rank
     answer at df[2] <- answer at df[2] - tmp8 * object at control$Rank
     if (dispersion == 0) {
-        dispersion <- tmp5$rss / answer at df[2]  # Estimate 
+      dispersion <- tmp5$res.ss / answer at df[2]  # Estimate 
     }
 
     answer at coef3 <- get.rrvglm.se2(answer at cov.unscaled,
@@ -1586,7 +1626,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     if (length(fit at control$Nested) && fit at control$Nested)
         stop("sorry, cannot handle nested models yet")
 
-    szero <- fit at control$szero
+    str0 <- fit at control$str0
 
 
     if (!length(fit at x))
@@ -1597,7 +1637,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     Blist <- fit at constraints
     ncolBlist <- unlist(lapply(Blist, ncol))
 
-    p1 <- length(colx1.index) # May be 0
+    p1 <- length(colx1.index)  # May be 0
     p2 <- length(colx2.index)
 
     Rank <- fit at control$Rank  # fit at misc$Nested.Rank   
@@ -1612,7 +1652,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
     x2mat <- fit at x[, colx2.index, drop = FALSE]
  
-    wz <- weights(fit, type = "work") # old: wweights(fit)  #fit at weights
+    wz <- weights(fit, type = "work")  # old: wweights(fit)  #fit at weights
     if (!length(wz))
         stop("cannot get fit at weights")
 
@@ -1620,7 +1660,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     n <- fit at misc$n
     Index.corner <- fit at control$Index.corner   # used to be (1:Rank);
     zmat <- fit at predictors + fit at residuals
-    theta <- c(Amat[-c(Index.corner,szero),])
+    theta <- c(Amat[-c(Index.corner,str0), ])
     if (fit at control$checkwz)
       wz <- checkwz(wz, M = M, trace = trace,
                    wzepsilon = fit at control$wzepsilon)
@@ -1628,22 +1668,22 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
     if (numerical) {
         delct.da <- num.deriv.rrr(fit, M = M, r = Rank,
-                        x1mat=x1mat, x2mat=x2mat, p2 = p2, 
-                        Index.corner, Aimat=Amat,
-                        B1mat=B1mat, Cimat=Cmat,
+                        x1mat = x1mat, x2mat = x2mat, p2 = p2, 
+                        Index.corner, Aimat = Amat,
+                        B1mat = B1mat, Cimat = Cmat,
                         h.step = h.step,
-                        colx2.index=colx2.index,
+                        colx2.index = colx2.index,
                         xij = fit at control$xij,
-                        szero = szero)
+                        str0 = str0)
     } else {
-        delct.da <- dctda.fast.only(theta=theta, wz = wz,
+        delct.da <- dctda.fast.only(theta = theta, wz = wz,
                         U = U, zmat,
-                        M = M, r = Rank, x1mat=x1mat,
-                        x2mat=x2mat, p2 = p2,
-                        Index.corner, Aimat=Amat,
-                        B1mat=B1mat, Cimat=Cmat,
+                        M = M, r = Rank, x1mat = x1mat,
+                        x2mat = x2mat, p2 = p2,
+                        Index.corner, Aimat = Amat,
+                        B1mat = B1mat, Cimat = Cmat,
                         xij = fit at control$xij,
-                        szero = szero)
+                        str0 = str0)
     }
 
 
@@ -1656,7 +1696,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
     sfit2233 <- summaryvglm(newobject) 
     d8 <-  dimnames(sfit2233 at cov.unscaled)[[1]]
-    cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
+    cov2233 <- solve(sfit2233 at cov.unscaled)  # Includes any intercepts
     dimnames(cov2233) <- list(d8, d8)
 
     log.vec33 <- NULL 
@@ -1666,24 +1706,24 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
       if (any(ii == names(colx2.index))) {
         log.vec33 <- c(log.vec33, choose.from[[ii]])
       }
-    cov33 <- cov2233[ log.vec33, log.vec33, drop = FALSE] # r*p2 by r*p2
+    cov33 <- cov2233[ log.vec33, log.vec33, drop = FALSE]  # r*p2 by r*p2
     cov23 <- cov2233[-log.vec33, log.vec33, drop = FALSE]
     cov22 <- cov2233[-log.vec33,-log.vec33, drop = FALSE]
 
 
-    lv.mat <- x2mat %*% Cmat
-    offs <- matrix(0, n, M)     # The "0" handles szero's 
-    offs[,Index.corner] <- lv.mat
-    if (M == (Rank + length(szero)))
+    latvar.mat <- x2mat %*% Cmat
+    offs <- matrix(0, n, M)  # The "0" handles str0's 
+    offs[, Index.corner] <- latvar.mat
+    if (M == (Rank + length(str0)))
       stop("cannot handle full-rank models yet")
-    cm <- matrix(0, M, M - Rank - length(szero))
-    cm[-c(Index.corner, szero),] <- diag(M - Rank - length(szero))
+    cm <- matrix(0, M, M - Rank - length(str0))
+    cm[-c(Index.corner, str0), ] <- diag(M - Rank - length(str0))
 
     Blist <- vector("list", length(colx1.index)+1) 
-    names(Blist) <- c(names(colx1.index), "I(lv.mat)")
+    names(Blist) <- c(names(colx1.index), "I(latvar.mat)")
     for (ii in names(colx1.index))
       Blist[[ii]] <- fit at constraints[[ii]]
-    Blist[["I(lv.mat)"]] <- cm
+    Blist[["I(latvar.mat)"]] <- cm
 
 
     if (p1) {
@@ -1696,29 +1736,29 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
       has.intercept <- any(bb == "(Intercept)")
       bb[bb == "(Intercept)"] <- "1"
-      if (p1>1)
+      if (p1 > 1)
         bb <- paste(bb, collapse = "+")
       if (has.intercept) {
-        bb <- paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse = " ")
+        bb <- paste("zmat - offs ~ ", bb, " + I(latvar.mat)", collapse = " ")
       } else {
-        bb <- paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse = " ")
+        bb <- paste("zmat - offs ~ -1 + ", bb, " + I(latvar.mat)", collapse = " ")
       }
       bb <- as.formula(bb)
     } else {
-      bb <- as.formula("zmat - offs ~ -1 + I(lv.mat)")
+      bb <- as.formula("zmat - offs ~ -1 + I(latvar.mat)")
     }
 
 
     if (fit at misc$dataname == "list") {
-        dspec <- FALSE
+      dspec <- FALSE
     } else {
-        mytext1 <- "exists(x=fit at misc$dataname, envir = VGAM:::VGAMenv)"
-        myexp1 <- parse(text=mytext1)
-        is.there <- eval(myexp1)
-        bbdata <- if (is.there)
-                get(fit at misc$dataname, envir=VGAM:::VGAMenv) else
+      mytext1 <- "exists(x=fit at misc$dataname, envir = VGAMenv)"
+      myexp1 <- parse(text=mytext1)
+      is.there <- eval(myexp1)
+      bbdata <- if (is.there)
+                get(fit at misc$dataname, envir = VGAMenv) else
                 get(fit at misc$dataname)
-        dspec <- TRUE
+      dspec <- TRUE
     }
 
 
@@ -1741,7 +1781,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     dimnames(cov1122) <- list(d8, d8)
 
     lcs <- length(coefvlm(sfit1122))
-    log.vec11 <- (lcs-(M-Rank-length(szero))*Rank+1):lcs
+    log.vec11 <- (lcs-(M-Rank-length(str0))*Rank+1):lcs
     cov11 <- cov1122[log.vec11,  log.vec11, drop = FALSE]
     cov12 <- cov1122[ log.vec11, -log.vec11, drop = FALSE]
     cov22 <- cov1122[-log.vec11, -log.vec11, drop = FALSE]
@@ -1777,7 +1817,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     dimnames(ans) <- list(names(acoefs), names(acoefs))
     list(cov.unscaled = ans,
          coefficients = acoefs,
-         rss = sfit1122 at rss)
+         res.ss = sfit1122 at res.ss)
 }
 
 
@@ -1786,8 +1826,8 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
 
     d8 <-  dimnames(cov.unscaled)[[1]]
     ans <- matrix(coefficients, length(coefficients), 3) 
-    ans[,2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
-    ans[,3] <- ans[,1] / ans[,2]
+    ans[, 2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
+    ans[, 3] <- ans[, 1] / ans[, 2]
     dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value"))
     ans
 }
@@ -1797,18 +1837,18 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
 num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
                           p2, Index.corner, Aimat, B1mat, Cimat, 
                           h.step = 0.0001, colx2.index,
-                          xij = NULL, szero = NULL) {
+                          xij = NULL, str0 = NULL) {
 
 
     nn <- nrow(x2mat)
     if (nrow(Cimat) != p2 || ncol(Cimat) != r)
         stop("'Cimat' wrong shape")
 
-    dct.da <- matrix(as.numeric(NA), (M-r-length(szero))*r, r*p2)
+    dct.da <- matrix(as.numeric(NA), (M-r-length(str0))*r, r*p2)
 
-    if ((length(Index.corner) + length(szero)) == M)
+    if ((length(Index.corner) + length(str0)) == M)
         stop("cannot handle full rank models yet")
-    cbindex <- (1:M)[-c(Index.corner, szero)]
+    cbindex <- (1:M)[-c(Index.corner, str0)]
 
     ptr <- 1
     for (sss in 1:r)
@@ -1830,9 +1870,9 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
             newmu <- fit at family@linkinv(neweta, fit at extra) 
             fit at fitted.values <- as.matrix(newmu)  # 20100909
 
-            fred <- weights(fit, type = "w", deriv= TRUE, ignore.slot= TRUE)
+            fred <- weights(fit, type = "w", deriv = TRUE, ignore.slot = TRUE)
             if (!length(fred))
-                stop("cannot get @weights and $deriv from object")
+              stop("cannot get @weights and @deriv from object")
             wz <- fred$weights
             deriv.mu <- fred$deriv
 
@@ -1843,10 +1883,10 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
               newzmat <- newzmat - x1mat %*% B1mat
 
             newfit <- vlm.wfit(xmat = x2mat, zmat = newzmat,
-                              Blist = small.Blist, U = U,
-                              matrix.out = FALSE, is.vlmX = FALSE,
-                              rss = TRUE, qr = FALSE, x.ret = FALSE,
-                              offset = NULL, xij = xij)
+                               Blist = small.Blist, U = U,
+                               matrix.out = FALSE, is.vlmX = FALSE,
+                               res.ss = TRUE, qr = FALSE, x.ret = FALSE,
+                               offset = NULL, xij = xij)
             dct.da[ptr,] <- (newfit$coef - t(Cimat)) / h.step
             ptr <- ptr + 1
         }
@@ -1860,63 +1900,63 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
 dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
                            p2, Index.corner, Aimat, B1mat, Cimat,
                            xij = NULL,
-                           szero = NULL) {
+                           str0 = NULL) {
 
 
-    if (length(szero))
-        stop("cannot handle 'szero' in dctda.fast.only()")
+  if (length(str0))
+    stop("cannot handle 'str0' in dctda.fast.only()")
 
-    nn <- nrow(x2mat)
-    if (nrow(Cimat) != p2 || ncol(Cimat) != r)
-        stop("Cimat wrong shape")
-
-    fred <- kronecker(matrix(1,1,r), x2mat)
-    fred <- kronecker(fred, matrix(1,M,1))
-    barney <- kronecker(Aimat, matrix(1,1,p2))
-    barney <- kronecker(matrix(1, nn, 1), barney)
-
-    temp <- array(t(barney*fred), c(p2*r, M, nn))
-    temp <- aperm(temp, c(2,1,3))     # M by p2*r by nn
-    temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
-    temp <- m2adefault(temp, M=p2*r)         # Note M != M here!
-    G <- solve(rowSums(temp, dims = 2))   # p2*r by p2*r 
+  nn <- nrow(x2mat)
+  if (nrow(Cimat) != p2 || ncol(Cimat) != r)
+    stop("Cimat wrong shape")
 
-    dc.da <- array(NA, c(p2, r, M, r))  # different from other functions
-    if (length(Index.corner) == M)
-        stop("cannot handle full rank models yet")
-    cbindex <- (1:M)[-Index.corner]    # complement of Index.corner 
-    resid2 <- if (length(x1mat))
-     mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
-           upper = FALSE, as.matrix = TRUE) else
-     mux22(t(wz), zmat                  , M = M,
-           upper = FALSE, as.matrix = TRUE)
+  fred <- kronecker(matrix(1, 1,r), x2mat)
+  fred <- kronecker(fred, matrix(1,M, 1))
+  barney <- kronecker(Aimat, matrix(1, 1,p2))
+  barney <- kronecker(matrix(1, nn, 1), barney)
 
-    for (sss in 1:r)
-        for (ttt in cbindex) {
-            fred <- t(x2mat) *
-                   matrix(resid2[, ttt], p2, nn, byrow = TRUE) # p2 * nn
-            temp2 <- kronecker(eifun(sss,r), rowSums(fred))
-            for (kkk in 1:r) {
-                Wiak <- mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow = TRUE),
-                              M = M, upper = FALSE,
-                              as.matrix = TRUE) # nn * M
-                wxx <- Wiak[,ttt] * x2mat
-                blocki <- t(x2mat) %*% wxx 
-                temp4a <- blocki %*% Cimat[,kkk]
-                if (kkk == 1) {
-                    temp4b <- blocki %*% Cimat[,sss]
-                }
-                temp2 <- temp2 - kronecker(eifun(sss,r), temp4a) -
-                                kronecker(eifun(kkk,r), temp4b)
-            }
-            dc.da[,,ttt,sss] <- G %*% temp2 
+  temp <- array(t(barney*fred), c(p2*r, M, nn))
+  temp <- aperm(temp, c(2, 1, 3))     # M by p2*r by nn
+  temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
+  temp <- m2adefault(temp, M=p2*r)         # Note M != M here!
+  G <- solve(rowSums(temp, dims = 2))   # p2*r by p2*r 
+
+  dc.da <- array(NA, c(p2, r, M, r))  # different from other functions
+  if (length(Index.corner) == M)
+      stop("cannot handle full rank models yet")
+  cbindex <- (1:M)[-Index.corner]    # complement of Index.corner 
+  resid2 <- if (length(x1mat))
+   mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
+         upper = FALSE, as.matrix = TRUE) else
+   mux22(t(wz), zmat                  , M = M,
+         upper = FALSE, as.matrix = TRUE)
+
+  for (sss in 1:r)
+    for (ttt in cbindex) {
+      fred <- t(x2mat) *
+              matrix(resid2[, ttt], p2, nn, byrow = TRUE)  # p2 * nn
+      temp2 <- kronecker(I.col(sss, r), rowSums(fred))
+      for (kkk in 1:r) {
+        Wiak <- mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow = TRUE),
+                      M = M, upper = FALSE,
+                      as.matrix = TRUE)  # nn * M
+        wxx <- Wiak[,ttt] * x2mat
+        blocki <- t(x2mat) %*% wxx 
+        temp4a <- blocki %*% Cimat[,kkk]
+        if (kkk == 1) {
+            temp4b <- blocki %*% Cimat[,sss]
         }
-    ans1 <- dc.da[,,cbindex,,drop = FALSE]  # p2 x r x (M-r) x r 
-    ans1 <- aperm(ans1, c(2,1,3,4))   # r x p2 x (M-r) x r 
+        temp2 <- temp2 - kronecker(I.col(sss, r), temp4a) -
+                         kronecker(I.col(kkk, r), temp4b)
+      }
+      dc.da[,,ttt,sss] <- G %*% temp2 
+    }
+  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # p2 x r x (M-r) x r 
+  ans1 <- aperm(ans1, c(2, 1, 3, 4))  # r x p2 x (M-r) x r 
 
-    ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
-    ans1 <- t(ans1)
-    ans1
+  ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
+  ans1 <- t(ans1)
+  ans1
 }
 
 
@@ -1926,114 +1966,114 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
 
 
 
-    nn <- nrow(xmat)
+  nn <- nrow(xmat)
 
-    Aimat <- matrix(as.numeric(NA), M, r)
-    Aimat[Index.corner,] <- diag(r)
-    Aimat[-Index.corner,] <- theta    # [-(1:M)]
+  Aimat <- matrix(as.numeric(NA), M, r)
+  Aimat[Index.corner,] <- diag(r)
+  Aimat[-Index.corner,] <- theta    # [-(1:M)]
 
-    if (intercept) {
-        Blist <- vector("list", pp+1)
-        Blist[[1]] <- diag(M)
-        for (ii in 2:(pp+1))
-            Blist[[ii]] <- Aimat
-    } else {
-        Blist <- vector("list", pp)
-        for (ii in 1:pp)
-            Blist[[ii]] <- Aimat
-    }
+  if (intercept) {
+    Blist <- vector("list", pp+1)
+    Blist[[1]] <- diag(M)
+    for (ii in 2:(pp+1))
+      Blist[[ii]] <- Aimat
+  } else {
+    Blist <- vector("list", pp)
+    for (ii in 1:pp)
+      Blist[[ii]] <- Aimat
+  }
 
-    coeffs <- vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = TRUE,
-                      xij = xij)$mat.coef
-    c3 <- coeffs <- t(coeffs)  # transpose to make M x (pp+1)
+  coeffs <- vlm.wfit(xmat = xmat, z, Blist, U = U, matrix.out = TRUE,
+                     xij = xij)$mat.coef
+  c3 <- coeffs <- t(coeffs)  # transpose to make M x (pp+1)
 
 
-    int.vec <- if (intercept) c3[,1] else 0  # \boldeta_0
-    Cimat <- if (intercept) t(c3[Index.corner,-1,drop = FALSE]) else 
-             t(c3[Index.corner,,drop = FALSE])
-    if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
-        stop("Cimat wrong shape")
+  int.vec <- if (intercept) c3[, 1] else 0  # \boldeta_0
+  Cimat <- if (intercept) t(c3[Index.corner,-1, drop = FALSE]) else 
+           t(c3[Index.corner,, drop = FALSE])
+  if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
+    stop("Cimat wrong shape")
 
-    fred <- kronecker(matrix(1,1,r),
-                      if (intercept) xmat[,-1,drop = FALSE] else xmat)
-    fred <- kronecker(fred, matrix(1,M,1))
-    barney <- kronecker(Aimat, matrix(1,1,pp))
-    barney <- kronecker(matrix(1, nn, 1), barney)
+  fred <- kronecker(matrix(1, 1,r),
+                    if (intercept) xmat[,-1, drop = FALSE] else xmat)
+  fred <- kronecker(fred, matrix(1,M, 1))
+  barney <- kronecker(Aimat, matrix(1, 1,pp))
+  barney <- kronecker(matrix(1, nn, 1), barney)
 
-    temp <- array(t(barney*fred), c(r*pp,M,nn))
-    temp <- aperm(temp, c(2,1,3))
-    temp <- mux5(wz, temp, M = M, matrix.arg = TRUE)
-    temp <- m2adefault(temp, M = r*pp)     # Note M != M here!
-    G <- solve(rowSums(temp, dims = 2))
+  temp <- array(t(barney*fred), c(r*pp,M,nn))
+  temp <- aperm(temp, c(2, 1, 3))
+  temp <- mux5(wz, temp, M = M, matrix.arg = TRUE)
+  temp <- m2adefault(temp, M = r*pp)     # Note M != M here!
+  G <- solve(rowSums(temp, dims = 2))
 
-    dc.da <- array(NA, c(pp,r,M,r))  # different from other functions
-    cbindex <- (1:M)[-Index.corner]
-    resid2 <- mux22(t(wz),
-                    z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
-                    upper = FALSE, as.matrix = TRUE)  # mat= TRUE,
+  dc.da <- array(NA, c(pp,r,M,r))  # different from other functions
+  cbindex <- (1:M)[-Index.corner]
+  resid2 <- mux22(t(wz),
+                  z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
+                  upper = FALSE, as.matrix = TRUE)  # mat= TRUE,
 
-    for (s in 1:r)
-        for (tt in cbindex) {
-            fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
-                     t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
-            temp2 <- kronecker(eifun(s,r), rowSums(fred))
-
-            temp4 <- rep(0,pp)
-            for (k in 1:r) {
-                Wiak <- mux22(t(wz),
-                              matrix(Aimat[, k], nn, M, byrow = TRUE),
-                              M = M, upper = FALSE, as.matrix = TRUE)
-                wxx <- Wiak[,tt] * (if (intercept)
-                                    xmat[, -1, drop = FALSE] else
-                                    xmat)
-                blocki <- (if (intercept)
-                          t(xmat[, -1, drop = FALSE]) else
-                          t(xmat)) %*% wxx
-                temp4 <- temp4 + blocki %*% Cimat[, k]
-            }
-            dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(eifun(s,r),temp4))
-        }
-    ans1 <- dc.da[,,cbindex,,drop = FALSE]  # pp x r x (M-r) x r 
-    ans1 <- aperm(ans1, c(2,1,3,4))   # r x pp x (M-r) x r 
+  for (s in 1:r)
+      for (tt in cbindex) {
+          fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
+                   t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
+          temp2 <- kronecker(I.col(s, r), rowSums(fred))
+
+          temp4 <- rep(0,pp)
+          for (k in 1:r) {
+              Wiak <- mux22(t(wz),
+                            matrix(Aimat[, k], nn, M, byrow = TRUE),
+                            M = M, upper = FALSE, as.matrix = TRUE)
+              wxx <- Wiak[,tt] * (if (intercept)
+                                  xmat[, -1, drop = FALSE] else
+                                  xmat)
+              blocki <- (if (intercept)
+                        t(xmat[, -1, drop = FALSE]) else
+                        t(xmat)) %*% wxx
+              temp4 <- temp4 + blocki %*% Cimat[, k]
+          }
+          dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
+      }
+  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # pp x r x (M-r) x r 
+  ans1 <- aperm(ans1, c(2, 1, 3, 4))   # r x pp x (M-r) x r 
 
-    ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow = TRUE)
+  ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow = TRUE)
 
 
-    detastar.da <- array(0,c(M,r,r,nn))
-    for (s in 1:r)
-        for (j in 1:r) {
-            t1 <- t(dc.da[,j,,s])
-            t1 <- matrix(t1, M, pp)
-            detastar.da[,j,s,] <- t1 %*% (if (intercept)
-                                  t(xmat[,-1,drop = FALSE]) else t(xmat))
-        }
+  detastar.da <- array(0,c(M,r,r,nn))
+  for (s in 1:r)
+      for (j in 1:r) {
+          t1 <- t(dc.da[,j,,s])
+          t1 <- matrix(t1, M, pp)
+          detastar.da[,j,s,] <- t1 %*% (if (intercept)
+                                t(xmat[,-1, drop = FALSE]) else t(xmat))
+      }
 
-    etastar <- (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
-    eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
-
-    sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,,1])
-
-    deta0.da <- array(0,c(M,M,r))
-    AtWi <- kronecker(matrix(1, nn, 1), Aimat)
-    AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE, 
-    AtWi <- array(t(AtWi), c(r,M,nn))
-    for (ss in 1:r) {
-        temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1] #MxM
-        temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
-        temp93 <- mux7(temp92, AtWi)
-        temp91 <- rowSums(temp93, dims = 2)    # M x M
-        deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
-    }
-    ans2 <- deta0.da[-(1:r),,,drop = FALSE]   # (M-r) x M x r
-    ans2 <- aperm(ans2, c(1,3,2))       # (M-r) x r x M
-    ans2 <- matrix(c(ans2), (M-r)*r, M) 
+  etastar <- (if (intercept) xmat[,-1, drop = FALSE] else xmat) %*% Cimat
+  eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
 
-    list(dc.da = ans1, dint.da = ans2)
+  sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,, 1])
+
+  deta0.da <- array(0,c(M,M,r))
+  AtWi <- kronecker(matrix(1, nn, 1), Aimat)
+  AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE, 
+  AtWi <- array(t(AtWi), c(r, M, nn))
+  for (ss in 1:r) {
+    temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,, 1]  # MxM
+    temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
+    temp93 <- mux7(temp92, AtWi)
+    temp91 <- rowSums(temp93, dims = 2)  # M x M
+    deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
+  }
+  ans2 <- deta0.da[-(1:r), , , drop = FALSE]  # (M-r) x M x r
+  ans2 <- aperm(ans2, c(1, 3, 2))  # (M-r) x r x M
+  ans2 <- matrix(c(ans2), (M-r)*r, M) 
+
+  list(dc.da = ans1, dint.da = ans2)
 }
 
 
 
-rrr.deriv.rss <- function(theta, wz, U, z, M, r, xmat,
+rrr.deriv.res.ss <- function(theta, wz, U, z, M, r, xmat,
                          pp, Index.corner, intercept = TRUE,
                          xij = NULL) {
 
@@ -2053,7 +2093,7 @@ rrr.deriv.rss <- function(theta, wz, U, z, M, r, xmat,
   }
 
   vlm.wfit(xmat = xmat, z, Blist, U = U, matrix.out = FALSE,
-           rss = TRUE, xij = xij)$rss
+           res.ss = TRUE, xij = xij)$res.ss
 }
 
 
@@ -2088,13 +2128,13 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   c3 <- coeffs <- t(coeffs)  # transpose to make M x (pp+1)
 
 
-  int.vec <- if (intercept) c3[,1] else 0  # \boldeta_0
+  int.vec <- if (intercept) c3[, 1] else 0  # \boldeta_0
   Cimat <- if (intercept) t(c3[Index.corner, -1, drop = FALSE]) else
-           t(c3[Index.corner,,drop = FALSE])
+           t(c3[Index.corner,, drop = FALSE])
   if (nrow(Cimat) != pp || ncol(Cimat) != r)
       stop("Cimat wrong shape")
 
-  fred <- kronecker(matrix(1,1,r),
+  fred <- kronecker(matrix(1, 1,r),
                    if (intercept) xmat[, -1, drop = FALSE] else xmat)
   fred <- kronecker(fred, matrix(1, M, 1))
   barney <- kronecker(Aimat, matrix(1, 1, pp))
@@ -2114,9 +2154,9 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
 
   for (s in 1:r)
     for (tt in cbindex) {
-      fred <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
+      fred <- (if (intercept) t(xmat[,-1, drop = FALSE]) else
                t(xmat)) * matrix(resid2[,tt],pp,nn,byrow = TRUE) 
-      temp2 <- kronecker(eifun(s,r), rowSums(fred))
+      temp2 <- kronecker(I.col(s, r), rowSums(fred))
 
       temp4 <- rep(0,pp)
       for (k in 1:r) {
@@ -2129,7 +2169,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
                   t(xmat)) %*% wxx 
         temp4 <- temp4 + blocki %*% Cimat[,k]
       }
-      dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(eifun(s,r),temp4))
+      dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
     }
 
   detastar.da <- array(0,c(M,r,r,nn))
@@ -2138,26 +2178,26 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
       t1 <- t(dc.da[,j,s,])
       t1 <- matrix(t1, M, pp)
       detastar.da[,j,s,] <- t1 %*% (if (intercept)
-                            t(xmat[,-1,drop = FALSE]) else t(xmat))
+                            t(xmat[, -1, drop = FALSE]) else t(xmat))
     }
 
-  etastar <- (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
+  etastar <- (if (intercept) xmat[, -1, drop = FALSE] else xmat) %*% Cimat
   eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
 
-  sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,,1])
+  sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[, , 1])
 
-  deta0.da <- array(0,c(M,M,r))
+  deta0.da <- array(0, c(M, M, r))
 
   AtWi <- kronecker(matrix(1, nn, 1), Aimat)
-  AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE, 
-  AtWi <- array(t(AtWi), c(r,M,nn))
+  AtWi <- mux111(t(wz), AtWi, M = M, upper = FALSE)  # matrix.arg= TRUE, 
+  AtWi <- array(t(AtWi), c(r, M, nn))
 
   for (ss in 1:r) {
-    temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1]
-    temp92 <- array(detastar.da[,,ss,],c(M,r,nn))
+    temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,, 1]
+    temp92 <- array(detastar.da[, , ss, ], c(M, r, nn))
     temp93 <- mux7(temp92,AtWi)
-    temp91 <- apply(temp93,1:2,sum)     # M x M
-    temp91 <- rowSums(temp93, dims = 2)   # M x M
+    temp91 <- apply(temp93, 1:2,sum)  # M x M
+    temp91 <- rowSums(temp93, dims = 2)  # M x M
     deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
   }
 
@@ -2184,15 +2224,17 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
 
 
 
+
+
 vellipse <- function(R, ratio = 1, orientation = 0,
-                    center = c(0,0), N=300) {
-    if (length(center) != 2) stop("center must be of length 2")
-    theta <-       2*pi*(0:N)/N
-    x1 <-       R*cos(theta)
-    y1 <- ratio*R*sin(theta)
-    x <- center[1] + cos(orientation)*x1 - sin(orientation)*y1
-    y <- center[2] + sin(orientation)*x1 + cos(orientation)*y1
-    cbind(x, y)
+                    center = c(0, 0), N = 300) {
+  if (length(center) != 2) stop("center must be of length 2")
+  theta <-       2*pi*(0:N)/N
+  x1 <-       R*cos(theta)
+  y1 <- ratio*R*sin(theta)
+  x <- center[1] + cos(orientation)*x1 - sin(orientation)*y1
+  y <- center[2] + sin(orientation)*x1 + cos(orientation)*y1
+  cbind(x, y)
 }
 
 
@@ -2201,19 +2243,20 @@ biplot.qrrvglm <- function(x, ...) {
 }
 
 
-lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
-          add = FALSE, plot.it= TRUE, rug= TRUE, y = FALSE, 
-          type = c("fitted.values", "predictors"),
-          xlab=paste("Latent Variable",
-                     if (Rank == 1) "" else " 1", sep = ""),
-          ylab= if (Rank == 1) switch(type, predictors = "Predictors", 
+ lvplot.qrrvglm <-
+  function(object, varI.latvar = FALSE, reference = NULL,
+           add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE, 
+           type = c("fitted.values", "predictors"),
+           xlab = paste("Latent Variable",
+                        if (Rank == 1) "" else " 1", sep = ""),
+           ylab = if (Rank == 1) switch(type, predictors = "Predictors", 
               fitted.values = "Fitted values") else "Latent Variable 2",
-          pcex=par()$cex, pcol=par()$col, pch=par()$pch, 
-          llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
+          pcex = par()$cex, pcol = par()$col, pch = par()$pch, 
+          llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
           label.arg = FALSE, adj.arg = -0.1, 
           ellipse = 0.95, Absolute = FALSE, 
               elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200,
-          chull.arg = FALSE, clty = 2, ccol=par()$col, clwd=par()$lwd,
+          chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
               cpch = "   ",
           C = FALSE,
               OriginC = c("origin","mean"),
@@ -2242,7 +2285,7 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
         stop("can only handle rank 1 or 2 models")
     M <- object at misc$M
     NOS <- ncol(object at y)
-    MSratio <- M / NOS  # First value is g(mean) = quadratic form in lv
+    MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
     n <- object at misc$n
     colx2.index <- object at control$colx2.index
     cx1i <- object at control$colx1.index  # May be NULL
@@ -2251,11 +2294,11 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
         stop("latent variable plots allowable only for ",
              "noRRR = ~ 1 models")
 
-    Coef.list <- Coef(object, varlvI = varlvI, reference = reference)
+    Coef.list <- Coef(object, varI.latvar = varI.latvar, reference = reference)
     if ( C) Cmat <- Coef.list at C
-    nustar <- Coef.list at lv # n x Rank 
+    nustar <- Coef.list at latvar  # n x Rank 
 
-    if (!plot.it) return(nustar)
+    if (!show.plot) return(nustar)
 
     r.curves <- slot(object, type)   # n times M (\boldeta or \boldmu) 
     if (!add) {
@@ -2265,8 +2308,8 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
                     object at y else r.curves,
                     type = "n", xlab=xlab, ylab=ylab, ...)
         } else { # Rank == 2
-            matplot(c(Coef.list at Optimum[1,], nustar[,1]),
-                    c(Coef.list at Optimum[2,], nustar[,2]),
+            matplot(c(Coef.list at Optimum[1,], nustar[, 1]),
+                    c(Coef.list at Optimum[2,], nustar[, 2]),
                     type = "n", xlab=xlab, ylab=ylab, ...)
         }
     }
@@ -2301,35 +2344,35 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
             o <- sort.list(xx)
             xx <- xx[o]
             yy <- yy[o]
-            lines(xx, yy, col=lcol[i], lwd=llwd[i], lty=llty[i])
+            lines(xx, yy, col =lcol[i], lwd = llwd[i], lty = llty[i])
             if ( y && type == "fitted.values") {
                 ypts <- object at y
                 if (ncol(as.matrix(ypts)) == ncol(r.curves))
-                    points(xx, ypts[o,i], col=pcol[i],
-                           cex=pcex[i], pch=pch[i])
+                    points(xx, ypts[o,i], col =pcol[i],
+                           cex = pcex[i], pch=pch[i])
             } 
         } 
         if (rug) rug(xx) 
     } else {
         for (i in 1:ncol(r.curves))
             points(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
-                   col=pcol[i], cex=pcex[i], pch=pch[i])
+                   col =pcol[i], cex = pcex[i], pch=pch[i])
         if (label.arg) {
             for (i in 1:ncol(r.curves))
                 text(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
                      labels=(dimnames(Coef.list at Optimum)[[2]])[i], 
-                     adj=adj.arg[i], col=pcol[i], cex=pcex[i])
+                     adj=adj.arg[i], col =pcol[i], cex = pcex[i])
         }
         if (chull.arg) {
-            hull <- chull(nustar[,1], nustar[,2])
+            hull <- chull(nustar[, 1], nustar[, 2])
             hull <- c(hull, hull[1])
-            lines(nustar[hull,1], nustar[hull,2], type = "b", pch=cpch,
-                  lty=clty, col=ccol, lwd=clwd)
+            lines(nustar[hull, 1], nustar[hull, 2], type = "b", pch=cpch,
+                  lty = clty, col =ccol, lwd = clwd)
         }
         if (length(ellipse)) {
             ellipse.temp <- if (ellipse > 0) ellipse else 0.95
-            if (ellipse < 0 && (!object at control$EqualTolerances || varlvI))
-              stop("an equal-tolerances assumption and 'varlvI = FALSE' ",
+            if (ellipse < 0 && (!object at control$EqualTolerances || varI.latvar))
+              stop("an equal-tolerances assumption and 'varI.latvar = FALSE' ",
                    "is needed for 'ellipse' < 0")
             if ( check.ok ) {
                 colx1.index <- object at control$colx1.index
@@ -2342,7 +2385,7 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
                                 else Coef.list at Maximum[i] * ellipse.temp,
                                 extra = object at extra)
                 if (MSratio > 1) 
-                    cutpoint <- cutpoint[1,1]
+                    cutpoint <- cutpoint[1, 1]
 
                 cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
                                extra = object at extra) - cutpoint
@@ -2354,7 +2397,7 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
                     B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
                     if (ellipse < 0) A <- B <- -ellipse / 2
 
-                    theta.angle <- asin(etoli$vector[2,1]) *
+                    theta.angle <- asin(etoli$vector[2, 1]) *
                         ifelse(object at control$Crow1positive[2], 1, -1)
                     if (object at control$Crow1positive[1])
                         theta.angle <- pi - theta.angle
@@ -2363,33 +2406,33 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
                                        orientation = theta.angle,
                                        center = Coef.list at Optimum[,i],
                                        N = egrid),
-                              lwd=elwd[i], col=ecol[i], lty=elty[i])
+                              lwd = elwd[i], col =ecol[i], lty = elty[i])
                 }
             }
         }
 
         if ( C ) {
             if (is.character(OriginC) && OriginC == "mean")
-                OriginC <- c(mean(nustar[,1]), mean(nustar[,2]))
+                OriginC <- c(mean(nustar[, 1]), mean(nustar[, 2]))
             if (is.character(OriginC) && OriginC == "origin")
                 OriginC <- c(0,0)
             for (i in 1:nrow(Cmat))
                 arrows(x0=OriginC[1], y0=OriginC[2],
-                       x1=OriginC[1] + stretchC*Cmat[i,1],
-                       y1=OriginC[2] + stretchC*Cmat[i,2],
-                       lty=Clty[i], col=Ccol[i], lwd=Clwd[i])
+                       x1=OriginC[1] + stretchC*Cmat[i, 1],
+                       y1=OriginC[2] + stretchC*Cmat[i, 2],
+                       lty = Clty[i], col =Ccol[i], lwd = Clwd[i])
             if (label.arg) {
                 temp200 <- dimnames(Cmat)[[1]]
                 for (i in 1:nrow(Cmat))
-                    text(OriginC[1] + stretchC*Cmat[i,1],
-                         OriginC[2] + stretchC*Cmat[i,2], col=Ccol[i],
-                         labels=temp200[i], adj=Cadj.arg[i], cex=Ccex[i])
+                    text(OriginC[1] + stretchC*Cmat[i, 1],
+                         OriginC[2] + stretchC*Cmat[i, 2], col =Ccol[i],
+                         labels=temp200[i], adj=Cadj.arg[i], cex = Ccex[i])
             }
         }
         if (sites) {
-            text(nustar[,1], nustar[,2], adj = 0.5,
+            text(nustar[, 1], nustar[, 2], adj = 0.5,
                  labels = if (is.null(spch)) dimnames(nustar)[[1]] else 
-                 rep(spch, length = nrow(nustar)), col=scol, cex=scex, font=sfont)
+                 rep(spch, length = nrow(nustar)), col =scol, cex = scex, font=sfont)
         }
     }
     invisible(nustar)
@@ -2400,36 +2443,36 @@ lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
 lvplot.rrvglm <- function(object,
                          A = TRUE,
                          C = TRUE,
-                         scores = FALSE, plot.it= TRUE,
-                         groups=rep(1, n),
-                         gapC=sqrt(sum(par()$cxy^2)), scaleA = 1,
+                         scores = FALSE, show.plot = TRUE,
+                         groups = rep(1, n),
+                         gapC = sqrt(sum(par()$cxy^2)), scaleA = 1,
                          xlab = "Latent Variable 1",
                          ylab = "Latent Variable 2",
          Alabels= if (length(object at misc$predictors.names))
          object at misc$predictors.names else paste("LP", 1:M, sep = ""),
-                         Aadj=par()$adj,
-                         Acex=par()$cex,
-                         Acol=par()$col,
+                         Aadj = par()$adj,
+                         Acex = par()$cex,
+                         Acol = par()$col,
                          Apch = NULL,
                          Clabels=rownames(Cmat),
-                         Cadj=par()$adj,
-                         Ccex=par()$cex,
-                         Ccol=par()$col, 
-                         Clty=par()$lty, 
-                         Clwd=par()$lwd, 
+                         Cadj = par()$adj,
+                         Ccex = par()$cex,
+                         Ccol = par()$col, 
+                         Clty = par()$lty, 
+                         Clwd = par()$lwd, 
                          chull.arg = FALSE,
-                         ccex=par()$cex,
-                         ccol=par()$col,
-                         clty=par()$lty,
-                         clwd=par()$lwd,
+                         ccex = par()$cex,
+                         ccol = par()$col,
+                         clty = par()$lty,
+                         clwd = par()$lwd,
                          spch = NULL,
-                         scex=par()$cex,
-                         scol=par()$col,
+                         scex = par()$cex,
+                         scol = par()$col,
                          slabels=rownames(x2mat),
                          ...) {
 
 
-    if (object at control$Rank != 2 && plot.it)
+    if (object at control$Rank != 2 && show.plot)
         stop("can only handle rank-2 models")
     M <- object at misc$M
     n <- object at misc$n
@@ -2447,15 +2490,15 @@ lvplot.rrvglm <- function(object,
     }
     x2mat <- object at x[, colx2.index, drop = FALSE]
     nuhat <- x2mat %*% Cmat
-    if (!plot.it) return(as.matrix(nuhat))
+    if (!show.plot) return(as.matrix(nuhat))
 
     index.nosz <- 1:M
     allmat <- rbind(if (A) Amat else NULL, 
                    if (C) Cmat else NULL, 
                    if (scores) nuhat else NULL)
 
-    plot(allmat[,1], allmat[,2], type = "n",
-         xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
+    plot(allmat[, 1], allmat[, 2], type = "n",
+         xlab=xlab, ylab=ylab, ...)  # xlim etc. supplied through ...
 
     if (A) {
         Aadj <- rep(Aadj, length.out = length(index.nosz))
@@ -2466,14 +2509,14 @@ lvplot.rrvglm <- function(object,
         if (length(Apch)) {
             Apch <- rep(Apch, length.out = length(index.nosz))
             for (i in index.nosz)
-                points(Amat[i,1],
-                       Amat[i,2],
-                       pch=Apch[i],cex=Acex[i],col=Acol[i])
+                points(Amat[i, 1],
+                       Amat[i, 2],
+                       pch=Apch[i],cex = Acex[i],col=Acol[i])
         } else {
             for (i in index.nosz)
-                text(Amat[i,1], Amat[i,2],
-                     Alabels[i], cex=Acex[i],
-                     col=Acol[i], adj=Aadj[i])
+                text(Amat[i, 1], Amat[i, 2],
+                     Alabels[i], cex = Acex[i],
+                     col =Acol[i], adj=Aadj[i])
         }
     }
 
@@ -2488,12 +2531,12 @@ lvplot.rrvglm <- function(object,
         if (length(Clabels) != p2)
             stop("'length(Clabels)' must be equal to ", p2)
         for (ii in 1:p2) {
-            arrows(0, 0, Cmat[ii,1], Cmat[ii,2],
-                   lwd=Clwd[ii], lty=Clty[ii], col=Ccol[ii])
-            const <- 1 + gapC[ii] / sqrt(Cmat[ii,1]^2 + Cmat[ii,2]^2)
-            text(const*Cmat[ii,1], const*Cmat[ii,2],
-                 Clabels[ii], cex=Ccex[ii],
-                 adj=Cadj[ii], col=Ccol[ii])
+            arrows(0, 0, Cmat[ii, 1], Cmat[ii, 2],
+                   lwd = Clwd[ii], lty = Clty[ii], col =Ccol[ii])
+            const <- 1 + gapC[ii] / sqrt(Cmat[ii, 1]^2 + Cmat[ii, 2]^2)
+            text(const*Cmat[ii, 1], const*Cmat[ii, 2],
+                 Clabels[ii], cex = Ccex[ii],
+                 adj=Cadj[ii], col =Ccol[ii])
         }
     }
 
@@ -2515,20 +2558,20 @@ lvplot.rrvglm <- function(object,
                warning("spch/scol/scex is different for individuals ",
                        "from the same group")
 
-            temp <- nuhat[gp,,drop = FALSE]
+            temp <- nuhat[gp,, drop = FALSE]
             if (length(spch)) {
-                points(temp[,1], temp[,2], cex=scex[gp], pch=spch[gp],
-                       col=scol[gp])
+                points(temp[, 1], temp[, 2], cex = scex[gp], pch=spch[gp],
+                       col =scol[gp])
             } else {
-                text(temp[,1], temp[,2], label=slabels, cex=scex[gp],
-                     col=scol[gp])
+                text(temp[, 1], temp[, 2], label = slabels, cex = scex[gp],
+                     col =scol[gp])
             }
             if (chull.arg) {
-                hull <- chull(temp[,1],temp[,2])
+                hull <- chull(temp[, 1], temp[, 2])
                 hull <- c(hull, hull[1])
-                lines(temp[hull,1], temp[hull,2],
-                      type = "b", lty=clty[ii],
-                      col=ccol[ii], lwd=clwd[ii], pch = "  ")
+                lines(temp[hull, 1], temp[hull, 2],
+                      type = "b", lty = clty[ii],
+                      col = ccol[ii], lwd = clwd[ii], pch = "  ")
             }
         }
     }
@@ -2550,7 +2593,7 @@ lvplot.rrvglm <- function(object,
     Amat <- object at constraints[[colx2.index[1]]]
 
     B1mat <- if (p1)
-      coefvlm(object, matrix.out = TRUE)[colx1.index,,drop = FALSE] else
+      coefvlm(object, matrix.out = TRUE)[colx1.index,, drop = FALSE] else
       NULL
 
 
@@ -2561,9 +2604,9 @@ lvplot.rrvglm <- function(object,
 
 
     Rank <- object at control$Rank
-    lv.names <- if (Rank>1) paste("lv", 1:Rank, sep = "") else "lv"
-    dimnames(Amat) <- list(object at misc$predictors.names, lv.names)
-    dimnames(Cmat) <- list(dimnames(Cmat)[[1]], lv.names)
+    latvar.names <- if (Rank>1) paste("latvar", 1:Rank, sep = "") else "latvar"
+    dimnames(Amat) <- list(object at misc$predictors.names, latvar.names)
+    dimnames(Cmat) <- list(dimnames(Cmat)[[1]], latvar.names)
 
     ans <- new(Class = "Coef.rrvglm",
       A            = Amat,
@@ -2578,7 +2621,7 @@ lvplot.rrvglm <- function(object,
 
     if (object at control$Corner)
         ans at Atilde <- Amat[-c(object at control$Index.corner,
-                         object at control$szero),,drop = FALSE]
+                         object at control$str0),, drop = FALSE]
     ans
 }
 
@@ -2643,10 +2686,10 @@ setMethod("biplot",  "rrvglm", function(x, ...)
 
 
 summary.qrrvglm <- function(object,
-                           varlvI = FALSE, reference = NULL, ...) {
+                           varI.latvar = FALSE, reference = NULL, ...) {
     answer <- object
-    answer at post$Coef <- Coef(object, varlvI = varlvI, reference = reference, 
-                            ...) # Store it here; non-elegant
+    answer at post$Coef <- Coef(object, varI.latvar = varI.latvar, reference = reference, 
+                            ...)  # Store it here; non-elegant
 
     if (length((answer at post$Coef)@dispersion) &&
        length(object at misc$estimated.dispersion) &&
@@ -2666,7 +2709,7 @@ show.summary.qrrvglm <- function(x, ...) {
   cat("\nCall:\n")
   dput(x at call)
 
-  print(x at post$Coef, ...) # non-elegant programming
+  print(x at post$Coef, ...)  # non-elegant programming
 
   if (length(x at dispersion) > 1) {
     cat("\nDispersion parameters:\n")
@@ -2713,14 +2756,14 @@ setMethod("show", "Coef.rrvglm", function(object)
 
 
  grc <- function(y, Rank = 1, Index.corner = 2:(1+Rank),
-                 szero = 1,
+                 str0 = 1,
                  summary.arg = FALSE, h.step = 0.0001, ...) {
                            
 
 
     myrrcontrol <- rrvglm.control(Rank = Rank,
                                   Index.corner = Index.corner,
-                                  szero = szero, ...)
+                                  str0 = str0, ...)
     object.save <- y
     if (is(y, "rrvglm")) {
         y <- object.save at y
@@ -2733,12 +2776,12 @@ setMethod("show", "Coef.rrvglm", function(object)
           "or a rrvglm() object")
 
     ei <- function(i, n) diag(n)[, i, drop = FALSE]
-    .grc.df <- data.frame(Row.2 = eifun(2, nrow(y)))
+    .grc.df <- data.frame(Row.2 = I.col(2, nrow(y)))
 
     yn1 <- if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
               paste("X2.", 1:nrow(y), sep = "")
     warn.save <- options()$warn
-    options(warn = -3) # Suppress the warnings (hopefully, temporarily)
+    options(warn = -3)  # Suppress the warnings (hopefully, temporarily)
     if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
         yn1 <- paste("X2.", 1:nrow(y), sep = "")
     options(warn = warn.save)
@@ -2755,12 +2798,12 @@ setMethod("show", "Coef.rrvglm", function(object)
     }
     for (ii in 2:ncol(y)) {
             cms[[paste("Col.", ii, sep = "")]] <-
-               modmat.col[,ii,drop = FALSE]
+               modmat.col[,ii, drop = FALSE]
         .grc.df[[paste("Col.", ii, sep = "")]] <- rep(1, nrow(y))
     }
     for (ii in 2:nrow(y)) {
             cms[[yn1[ii]]] <- diag(ncol(y))
-        .grc.df[[yn1[ii]]] <- eifun(ii, nrow(y))
+        .grc.df[[yn1[ii]]] <- I.col(ii, nrow(y))
     }
 
     dimnames(.grc.df) <- list(if (length(dimnames(y)[[1]]))
@@ -2779,7 +2822,7 @@ setMethod("show", "Coef.rrvglm", function(object)
       str2 <- paste(str2, yn1[ii], sep = " + ")
     myrrcontrol$noRRR <- as.formula(str1)  # Overwrite this
 
-    assign(".grc.df", .grc.df, envir = VGAM:::VGAMenv)
+    assign(".grc.df", .grc.df, envir = VGAMenv)
 
     warn.save <- options()$warn
     options(warn = -3)    # Suppress the warnings (hopefully, temporarily)
@@ -2797,8 +2840,8 @@ setMethod("show", "Coef.rrvglm", function(object)
       answer <- as(answer, "grc")
     }
 
-    if (exists(".grc.df", envir = VGAM:::VGAMenv))
-      rm(".grc.df", envir = VGAM:::VGAMenv)
+    if (exists(".grc.df", envir = VGAMenv))
+      rm(".grc.df", envir = VGAMenv)
 
     answer
 }
@@ -2812,17 +2855,17 @@ summary.grc <- function(object, ...) {
 
 
 trplot.qrrvglm <- function(object,
-       whichSpecies = NULL,
-       add = FALSE, plot.it = TRUE,
+       which.species = NULL,
+       add = FALSE, show.plot = TRUE,
        label.sites = FALSE, 
        sitenames = rownames(object at y),
        axes.equal = TRUE,
-       cex=par()$cex,
+       cex = par()$cex,
        col = 1:(nos*(nos-1)/2),
        log = "", 
        lty = rep(par()$lty, length.out = nos*(nos-1)/2),
        lwd = rep(par()$lwd, length.out = nos*(nos-1)/2),
-       tcol= rep(par()$col, length.out = nos*(nos-1)/2),
+       tcol = rep(par()$col, length.out = nos*(nos-1)/2),
        xlab = NULL, ylab = NULL, 
        main = "",   # "Trajectory plot",
        type = "b",
@@ -2831,53 +2874,53 @@ trplot.qrrvglm <- function(object,
   if (coef.obj at Rank != 1)
     stop("object must be a rank-1 model")
   fv <- fitted(object)
-  modelno <- object at control$modelno  # 1,2,3, or 0
+  modelno <- object at control$modelno  # 1, 2, 3, or 0
   NOS <- ncol(fv)   # Number of species
   M <- object at misc$M  #
   nn <- nrow(fv)  # Number of sites 
   if (length(sitenames))
     sitenames <- rep(sitenames, length.out = nn)
   sppNames <- dimnames(object at y)[[2]]
-  if (!length(whichSpecies)) {
-    whichSpecies <- sppNames[1:NOS]
-    whichSpecies.numer <- 1:NOS
+  if (!length(which.species)) {
+    which.species <- sppNames[1:NOS]
+    which.species.numer <- 1:NOS
   } else
-  if (is.numeric(whichSpecies)) {
-    whichSpecies.numer <- whichSpecies
-    whichSpecies <- sppNames[whichSpecies.numer]  # Convert to character
+  if (is.numeric(which.species)) {
+    which.species.numer <- which.species
+    which.species <- sppNames[which.species.numer]  # Convert to character
   } else {
-     whichSpecies.numer <- match(whichSpecies, sppNames)
+     which.species.numer <- match(which.species, sppNames)
   }
-    nos <- length(whichSpecies) # nos = number of species to be plotted
+    nos <- length(which.species)  # nos = number of species to be plotted
 
-  if (length(whichSpecies.numer) <= 1)
+  if (length(which.species.numer) <= 1)
     stop("must have at least 2 species to be plotted")
   cx1i <- object at control$colx1.index
   if (check.ok)
   if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
     stop("trajectory plots allowable only for noRRR = ~ 1 models")
 
-  first.spp  <- iam(1,1,M = M,both = TRUE,diag = FALSE)$row.index
-  second.spp <- iam(1,1,M = M,both = TRUE,diag = FALSE)$col.index
-  myxlab <- if (length(whichSpecies.numer) == 2) {
+  first.spp  <- iam(1, 1,M = M,both = TRUE,diag = FALSE)$row.index
+  second.spp <- iam(1, 1,M = M,both = TRUE,diag = FALSE)$col.index
+  myxlab <- if (length(which.species.numer) == 2) {
               paste("Fitted value for",
-              if (is.character(whichSpecies.numer))
-                  whichSpecies.numer[1] else
-                  sppNames[whichSpecies.numer[1]])
+              if (is.character(which.species.numer))
+                  which.species.numer[1] else
+                  sppNames[which.species.numer[1]])
                } else "Fitted value for 'first' species"
   myxlab <- if (length(xlab)) xlab else myxlab
-  myylab <- if (length(whichSpecies.numer) == 2) {
+  myylab <- if (length(which.species.numer) == 2) {
               paste("Fitted value for",
-              if (is.character(whichSpecies.numer))
-                  whichSpecies.numer[2] else
-                  sppNames[whichSpecies.numer[2]])
+              if (is.character(which.species.numer))
+                  which.species.numer[2] else
+                  sppNames[which.species.numer[2]])
                } else "Fitted value for 'second' species"
   myylab <- if (length(ylab)) ylab else myylab
   if (!add) {
-    xxx <- if (axes.equal) fv[,whichSpecies.numer] else
-           fv[,whichSpecies.numer[first.spp]]
-    yyy <- if (axes.equal) fv[,whichSpecies.numer] else
-           fv[,whichSpecies.numer[second.spp]]
+    xxx <- if (axes.equal) fv[,which.species.numer] else
+           fv[,which.species.numer[first.spp]]
+    yyy <- if (axes.equal) fv[,which.species.numer] else
+           fv[,which.species.numer[second.spp]]
     matplot(xxx, yyy, type = "n", log = log, xlab = myxlab,
             ylab = myylab, main = main, ...)
   }
@@ -2887,25 +2930,25 @@ trplot.qrrvglm <- function(object,
   lty  <- rep(lty,  length.out = nos*(nos-1)/2)
   tcol <- rep(tcol, length.out = nos*(nos-1)/2)
 
-  oo <- order(coef.obj at lv)   # Sort by the latent variable
+  oo <- order(coef.obj at latvar)  # Sort by the latent variable
   ii <- 0
   col <- rep(col, length = nos*(nos-1)/2)
   species.names <- NULL
-  if (plot.it)
-    for (i1 in seq(whichSpecies.numer)) {
-      for (i2 in seq(whichSpecies.numer))
+  if (show.plot)
+    for (i1 in seq(which.species.numer)) {
+      for (i2 in seq(which.species.numer))
         if (i1 < i2) {
           ii <- ii + 1
           species.names <- rbind(species.names,
                                  cbind(sppNames[i1], sppNames[i2]))
-          matplot(fv[oo, whichSpecies.numer[i1]],
-                  fv[oo, whichSpecies.numer[i2]],
+          matplot(fv[oo, which.species.numer[i1]],
+                  fv[oo, which.species.numer[i2]],
                   type = type, add = TRUE,
                   lty = lty[ii], lwd = lwd[ii], col = col[ii],
                   pch = if (label.sites) "   " else "*" )
           if (label.sites && length(sitenames))
-              text(fv[oo, whichSpecies.numer[i1]],
-                   fv[oo, whichSpecies.numer[i2]],
+              text(fv[oo, which.species.numer[i1]],
+                   fv[oo, which.species.numer[i2]],
                    labels = sitenames[oo], cex = cex, col = tcol[ii])
         }
     }
@@ -2967,27 +3010,28 @@ vcovqrrvglm <- function(object,
 
   answer <- NULL
   Cov.unscaled <- array(NA, c(3, 3, M), dimnames = list(
-      c("(Intercept)", "lv", "lv^2"),
-      c("(Intercept)", "lv", "lv^2"), dimnames(cobj at D)[[3]]))
+      c("(Intercept)", "latvar", "latvar^2"),
+      c("(Intercept)", "latvar", "latvar^2"), dimnames(cobj at D)[[3]]))
   for (spp in 1:M) {
     index <- c(M + ifelse(object at control$EqualTolerances, 1, M) + spp,
                spp,
                M + ifelse(object at control$EqualTolerances, 1, spp))
     vcov <- Cov.unscaled[,,spp] <-
         sobj at cov.unscaled[index, index]  # Order is A, D, B1
-    se2Max <- dvecMax[spp,,drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
-    se2Tol <- dvecTol[spp,,drop = FALSE] %*% vcov %*% cbind(dvecTol[spp,])
-    se2Opt <- dvecOpt[spp,,drop = FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
+    se2Max <- dvecMax[spp,, drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
+    se2Tol <- dvecTol[spp,, drop = FALSE] %*% vcov %*% cbind(dvecTol[spp,])
+    se2Opt <- dvecOpt[spp,, drop = FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
     answer <- rbind(answer, dispersion[spp]^0.5 *
                    c(se2Opt = se2Opt, se2Tol = se2Tol, se2Max = se2Max))
   }
 
   link.function <- if (MaxScale == "predictors")
       remove.arg(object at misc$predictors.names[1]) else ""
-  dimnames(answer) <- list(dimnames(cobj at D)[[3]], c("Optimum", "Tolerance",
-      if (nchar(link.function))
-        paste(link.function, "(Maximum)", sep = "") else
-        "Maximum"))
+  dimnames(answer) <- list(dimnames(cobj at D)[[3]],
+                           c("Optimum", "Tolerance",
+                             if (nchar(link.function))
+                           paste(link.function, "(Maximum)", sep = "") else
+                           "Maximum"))
   NAthere <- is.na(answer %*% rep(1, length.out = 3))
   answer[NAthere,] <- NA  # NA in tolerance means NA everywhere else
   new(Class = "vcov.qrrvglm",
@@ -3013,13 +3057,15 @@ setClass(Class = "vcov.qrrvglm", representation(
 
 
 model.matrix.qrrvglm <- function(object,
-                                 type = c("lv", "vlm"), ...) {
+                                 type = c("latvar", "vlm"), ...) {
 
   if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
-  type <- match.arg(type, c("lv", "vlm"))[1]
+  type <- match.arg(type, c("latvar", "vlm"))[1]
 
-  switch(type, lv = Coef(object, ...)@lv, vlm = object at x) 
+  switch(type,
+         latvar  = Coef(object, ...)@latvar,
+         vlm = object at x) 
 }
 
 
@@ -3032,18 +3078,18 @@ setMethod("model.matrix",  "qrrvglm", function(object, ...)
 
 
 
-perspqrrvglm <- function(x, varlvI = FALSE, reference = NULL,
-      plot.it = TRUE,
+perspqrrvglm <- function(x, varI.latvar = FALSE, reference = NULL,
+      show.plot = TRUE,
       xlim = NULL, ylim = NULL,
-      zlim = NULL, # zlim ignored if Rank == 1
-      gridlength = if (Rank == 1) 301 else c(51,51),
-      whichSpecies = NULL,
+      zlim = NULL,  # zlim ignored if Rank == 1
+      gridlength = if (Rank == 1) 301 else c(51, 51),
+      which.species = NULL,
       xlab = if (Rank == 1)
       "Latent Variable" else "Latent Variable 1",
       ylab = if (Rank == 1)
       "Expected Value" else "Latent Variable 2",
       zlab = "Expected value",
-      labelSpecies = FALSE,   # For Rank == 1 only
+      labelSpecies = FALSE,  # For Rank == 1 only
       stretch = 1.05,  # quick and dirty, Rank == 1 only
       main = "",
       ticktype = "detailed", 
@@ -3052,34 +3098,36 @@ perspqrrvglm <- function(x, varlvI = FALSE, reference = NULL,
       add1 = FALSE,
       ...) {
   oylim <- ylim
-  object <- x  # don't like x as the primary argument 
-  coef.obj <- Coef(object, varlvI = varlvI, reference = reference)
+  object <- x  # Do not like x as the primary argument 
+  coef.obj <- Coef(object, varI.latvar = varI.latvar,
+                   reference = reference)
   if ((Rank <- coef.obj at Rank) > 2)
     stop("object must be a rank-1 or rank-2 model")
   fv <- fitted(object)
-  NOS <- ncol(fv)    # Number of species
+  NOS <- ncol(fv)  # Number of species
   M <- object at misc$M # 
 
   xlim <- rep(if (length(xlim)) xlim else
-             range(coef.obj at lv[,1]), length = 2)
+              range(coef.obj at latvar[, 1]), length = 2)
   if (!length(oylim)) {
-      ylim <- if (Rank == 1) c(0, max(fv)*stretch) else
-          rep(range(coef.obj at lv[,2]), length = 2)
+    ylim <- if (Rank == 1)
+              c(0, max(fv) * stretch) else
+              rep(range(coef.obj at latvar[, 2]), length = 2)
   }
   gridlength <- rep(gridlength, length = Rank)
-  lv1 <- seq(xlim[1], xlim[2], length = gridlength[1])
+  latvar1 <- seq(xlim[1], xlim[2], length = gridlength[1])
   if (Rank == 1) {
-    m <- cbind(lv1)
+    m <- cbind(latvar1)
   } else {
-    lv2 <- seq(ylim[1], ylim[2], length = gridlength[2])
-    m <- expand.grid(lv1,lv2)
+    latvar2 <- seq(ylim[1], ylim[2], length = gridlength[2])
+    m <- expand.grid(latvar1,latvar2)
   }
 
   if (dim(coef.obj at B1)[1] != 1 ||
       dimnames(coef.obj at B1)[[1]] != "(Intercept)")
-      stop("noRRR = ~ 1 is needed")
-  LP <- coef.obj at A %*% t(cbind(m))   # M by n
-  LP <- LP + c(coef.obj at B1) # Assumes \bix_1 = 1 (intercept only)
+    stop("noRRR = ~ 1 is needed")
+  LP <- coef.obj at A %*% t(cbind(m))  # M by n
+  LP <- LP + c(coef.obj at B1)  # Assumes \bix_1 = 1 (intercept only)
 
   mm <- as.matrix(m)
   N <- ncol(LP)
@@ -3091,69 +3139,75 @@ perspqrrvglm <- function(x, varlvI = FALSE, reference = NULL,
                      t(mm[ii, , drop = FALSE])
     }
   }
-  LP <- t(LP)   # n by M
+  LP <- t(LP)  # n by M
 
 
-    fitvals <- object at family@linkinv(LP)   # n by NOS
+    fitvals <- object at family@linkinv(LP)  # n by NOS
     dimnames(fitvals) <- list(NULL, dimnames(fv)[[2]])
     sppNames <- dimnames(object at y)[[2]]
-    if (!length(whichSpecies)) {
-      whichSpecies <- sppNames[1:NOS]
-      whichSpecies.numer <- 1:NOS
+    if (!length(which.species)) {
+      which.species <- sppNames[1:NOS]
+      which.species.numer <- 1:NOS
     } else
-    if (is.numeric(whichSpecies)) {
-      whichSpecies.numer <- whichSpecies
-      whichSpecies <- sppNames[whichSpecies.numer] # Convert to character
+    if (is.numeric(which.species)) {
+      which.species.numer <- which.species
+      which.species <- sppNames[which.species.numer]  # Convert to character
     } else {
-      whichSpecies.numer <- match(whichSpecies, sppNames)
+      which.species.numer <- match(which.species, sppNames)
     }
     if (Rank == 1) {
-      if (plot.it) {
+      if (show.plot) {
         if (!length(oylim))
-          ylim <- c(0, max(fitvals[,whichSpecies.numer]) *
-                    stretch) # A revision
-        col <- rep(col, length.out = length(whichSpecies.numer))
-        llty <- rep(llty, leng = length(whichSpecies.numer))
-        llwd <- rep(llwd, leng = length(whichSpecies.numer))
-            if (!add1)
-            matplot(lv1, fitvals, xlab = xlab, ylab = ylab, type = "n", 
-                    main = main, xlim = xlim, ylim = ylim, ...) 
-            for (j in 1:length(whichSpecies.numer)) {
-                ptr2 <- whichSpecies.numer[j]  # points to species column
-                lines(lv1, fitvals[,ptr2], col=col[j],
-                      lty=llty[j], lwd=llwd[j], ...)
-                if (labelSpecies) {
-                    ptr1 <- (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
-                                                 fitvals[,ptr2]]
-                    ptr1 <- ptr1[1]
-                    text(lv1[ptr1], fitvals[ptr1,ptr2]+
-                         (stretch-1)*diff(range(ylim)),
-                         label=sppNames[j], col=col[j], ...)
-                }
-            }
+          ylim <- c(0, max(fitvals[,which.species.numer]) *
+                       stretch)  # A revision
+        col <- rep(col, length.out = length(which.species.numer))
+        llty <- rep(llty, leng = length(which.species.numer))
+        llwd <- rep(llwd, leng = length(which.species.numer))
+        if (!add1)
+          matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
+                  type = "n",
+                  main = main, xlim = xlim, ylim = ylim, ...) 
+        for (jloc in 1:length(which.species.numer)) {
+          ptr2 <- which.species.numer[jloc]  # points to species column
+          lines(latvar1, fitvals[, ptr2],
+                col = col[jloc],
+                lty = llty[jloc],
+                lwd = llwd[jloc], ...)
+          if (labelSpecies) {
+            ptr1 <- (1:nrow(fitvals))[max(fitvals[, ptr2]) ==
+                                              fitvals[, ptr2]]
+            ptr1 <- ptr1[1]
+            text(latvar1[ptr1],
+                 fitvals[ptr1, ptr2] + (stretch-1) * diff(range(ylim)),
+                 label = sppNames[jloc], col = col[jloc], ...)
+          }
         }
+      }
     } else {
-        maxfitted <- matrix(fitvals[,whichSpecies[1]],
-                           length(lv1), length(lv2))
-        if (length(whichSpecies) > 1)
-        for (j in whichSpecies[-1]) {
-          maxfitted <- pmax(maxfitted, matrix(fitvals[,j], 
-                                             length(lv1), length(lv2)))
-        }
-        if (!length(zlim))
-          zlim <- range(maxfitted, na.rm = TRUE)
-
-        if (plot.it)
-          graphics:::persp.default(lv1, lv2, maxfitted,
-                zlim = zlim,
-                xlab = xlab, ylab = ylab, zlab = zlab,
-                ticktype = ticktype, col = col, main = main, ...) 
+      max.fitted <- matrix(fitvals[, which.species[1]],
+                           length(latvar1), length(latvar2))
+      if (length(which.species) > 1)
+      for (jlocal in which.species[-1]) {
+        max.fitted <- pmax(max.fitted,
+                           matrix(fitvals[, jlocal],
+                                  length(latvar1), length(latvar2)))
+      }
+      if (!length(zlim))
+        zlim <- range(max.fitted, na.rm = TRUE)
+
+
+    perspdefault <- getS3method("persp", "default")
+      if (show.plot)
+        perspdefault(latvar1, latvar2, max.fitted,
+                     zlim = zlim,
+                     xlab = xlab, ylab = ylab, zlab = zlab,
+                     ticktype = ticktype, col = col, main = main, ...)
     }
 
-    invisible(list(fitted    = fitvals,
-                   lv1grid   = lv1,
-                   lv2grid   = if (Rank == 2) lv2 else NULL,
-                   maxfitted = if (Rank == 2) maxfitted else NULL))
+    invisible(list(fitted       = fitvals,
+                   latvar1grid  = latvar1,
+                   latvar2grid  = if (Rank == 2) latvar2 else NULL,
+                   max.fitted   = if (Rank == 2) max.fitted else NULL))
 }
 
 
@@ -3190,27 +3244,21 @@ Rank.cao <- function(object, ...) {
 
 
 
-ccoef.qrrvglm <- function(object, varlvI = FALSE,
+concoef.qrrvglm <- function(object, varI.latvar = FALSE,
                           reference = NULL, ...) {
-  Coef(object, varlvI = varlvI, reference = reference, ...)@C
+  Coef(object, varI.latvar = varI.latvar, reference = reference, ...)@C
 }
 
 
-ccoef.Coef.qrrvglm <- function(object, ...) {
+concoef.Coef.qrrvglm <- function(object, ...) {
   if (length(list(...)))
     warning("Too late! Ignoring the extra arguments")
   object at C
 }
 
 
-latvar.qrrvglm <- function(object, varlvI = FALSE,
-                       reference = NULL, ...) {
-  Coef(object, varlvI = varlvI, reference = reference, ...)@lv
-}
-
-
-lv.rrvglm <- function(object, ...) {
-  ans <- lvplot(object, plot.it = FALSE)
+latvar.rrvglm <- function(object, ...) {
+  ans <- lvplot(object, show.plot = FALSE)
   if (ncol(ans) == 1)
     dimnames(ans) <- list(dimnames(ans)[[1]], "lv")
   ans
@@ -3218,16 +3266,27 @@ lv.rrvglm <- function(object, ...) {
 
 
 
+latvar.qrrvglm <- function(object,
+                           varI.latvar = FALSE,
+                           reference = NULL, ...) {
+  Coef(object,
+       varI.latvar = varI.latvar,
+       reference = reference, ...)@latvar
+}
+
+
 latvar.Coef.qrrvglm <- function(object, ...) {
   if (length(list(...)))
     warning("Too late! Ignoring the extra arguments")
-  object at lv
+  object at latvar
 }
 
 
-Max.qrrvglm <- function(object, varlvI = FALSE,
-                        reference = NULL, ...) {
-  Coef(object, varlvI = varlvI, reference = reference, ...)@Maximum
+Max.qrrvglm <-
+  function(object, varI.latvar = FALSE,
+           reference = NULL, ...) {
+  Coef(object, varI.latvar = varI.latvar,
+       reference = reference, ...)@Maximum
 }
 
 
@@ -3240,8 +3299,10 @@ Max.Coef.qrrvglm <- function(object, ...) {
 }
 
 
-Opt.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
-  Coef(object, varlvI = varlvI, reference = reference, ...)@Optimum
+Opt.qrrvglm <-
+  function(object, varI.latvar = FALSE, reference = NULL, ...) {
+      Coef(object, varI.latvar = varI.latvar,
+           reference = reference, ...)@Optimum
 }
 
 
@@ -3252,8 +3313,10 @@ Opt.Coef.qrrvglm <- function(object, ...) {
 }
 
 
-Tol.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
-  Coef(object, varlvI = varlvI, reference = reference, ...)@Tolerance
+Tol.qrrvglm <-
+  function(object, varI.latvar = FALSE, reference = NULL, ...) {
+      Coef(object, varI.latvar = varI.latvar,
+           reference = reference, ...)@Tolerance
 }
 
 
@@ -3265,18 +3328,40 @@ Tol.Coef.qrrvglm <- function(object, ...) {
 }
 
 
+
  if (!isGeneric("ccoef"))
-    setGeneric("ccoef", function(object, ...)
-    standardGeneric("ccoef")) 
+    setGeneric("ccoef", function(object, ...) {
+    .Deprecated("concoef")
+    
+    standardGeneric("ccoef")
+    }) 
 
 setMethod("ccoef",  "rrvglm",
-  function(object, ...) ccoef.qrrvglm(object, ...))
+  function(object, ...) concoef.qrrvglm(object, ...))
 setMethod("ccoef", "qrrvglm",
-  function(object, ...) ccoef.qrrvglm(object, ...))
+  function(object, ...) concoef.qrrvglm(object, ...))
 setMethod("ccoef",  "Coef.rrvglm",
-  function(object, ...) ccoef.Coef.qrrvglm(object, ...))
+  function(object, ...) concoef.Coef.qrrvglm(object, ...))
 setMethod("ccoef", "Coef.qrrvglm",
-  function(object, ...) ccoef.Coef.qrrvglm(object, ...))
+  function(object, ...) concoef.Coef.qrrvglm(object, ...))
+ if (!isGeneric("concoef"))
+    setGeneric("concoef", function(object, ...)
+    standardGeneric("concoef")) 
+
+setMethod("concoef",  "rrvglm",
+  function(object, ...) concoef.qrrvglm(object, ...))
+setMethod("concoef", "qrrvglm",
+  function(object, ...) concoef.qrrvglm(object, ...))
+setMethod("concoef",  "Coef.rrvglm",
+  function(object, ...) concoef.Coef.qrrvglm(object, ...))
+setMethod("concoef", "Coef.qrrvglm",
+  function(object, ...) concoef.Coef.qrrvglm(object, ...))
+
+
+
+
+
+
 
 
 setMethod("coef", "qrrvglm",
@@ -3287,22 +3372,40 @@ setMethod("coefficients", "qrrvglm",
 
  if (!isGeneric("lv"))
     setGeneric("lv",
-  function(object, ...) standardGeneric("lv")) 
+  function(object, ...) {
+    .Deprecated("latvar")
+    
+    standardGeneric("lv")
+  })
+
+
 setMethod("lv",  "rrvglm",
-  function(object, ...) lv.rrvglm(object, ...))
+  function(object, ...) {
+    
+    latvar.rrvglm(object, ...)
+  })
 setMethod("lv", "qrrvglm",
-  function(object, ...) latvar.qrrvglm(object, ...))
+  function(object, ...) {
+    
+    latvar.qrrvglm(object, ...)
+  })
 setMethod("lv",  "Coef.rrvglm",
-  function(object, ...) latvar.Coef.qrrvglm(object, ...))
+  function(object, ...) {
+    
+    latvar.Coef.qrrvglm(object, ...)
+  })
 setMethod("lv", "Coef.qrrvglm",
-  function(object, ...) latvar.Coef.qrrvglm(object, ...))
+  function(object, ...) {
+    
+    latvar.Coef.qrrvglm(object, ...)
+  })
 
 
  if (!isGeneric("latvar"))
-    setGeneric("latvar",
+     setGeneric("latvar",
   function(object, ...) standardGeneric("latvar")) 
 setMethod("latvar",  "rrvglm",
-  function(object, ...) lv.rrvglm(object, ...))
+  function(object, ...) latvar.rrvglm(object, ...))
 setMethod("latvar", "qrrvglm",
   function(object, ...) latvar.qrrvglm(object, ...))
 setMethod("latvar",  "Coef.rrvglm",
@@ -3320,6 +3423,13 @@ setMethod("Max", "Coef.qrrvglm",
   function(object, ...) Max.Coef.qrrvglm(object, ...))
 
 
+
+setMethod("Max", "cao",
+  function(object, ...) Coef(object, ...)@Maximum)
+
+
+
+
  if (!isGeneric("Opt"))
     setGeneric("Opt",
   function(object, ...) standardGeneric("Opt"))
@@ -3329,6 +3439,12 @@ setMethod("Opt", "Coef.qrrvglm",
   function(object, ...) Opt.Coef.qrrvglm(object, ...))
 
 
+setMethod("Opt", "cao",
+  function(object, ...) Coef(object, ...)@Optimum)
+
+
+
+
  if (!isGeneric("Tol"))
     setGeneric("Tol",
   function(object, ...) standardGeneric("Tol")) 
@@ -3369,15 +3485,13 @@ is.bell.qrrvglm <- function(object, ...) {
 
 
 is.bell.cao <- function(object, ...) {
-    NA * Max(object, ...)
+  NA * Max(object, ...)
 }
 
 
  if (!isGeneric("is.bell"))
     setGeneric("is.bell",
   function(object, ...) standardGeneric("is.bell"))
-setMethod("is.bell","uqo",
-  function(object, ...) is.bell.uqo(object, ...))
 setMethod("is.bell","qrrvglm",
   function(object,...) is.bell.qrrvglm(object,...))
 setMethod("is.bell","rrvglm",
diff --git a/R/family.sur.R b/R/family.sur.R
index 2548341..5a321d3 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -1,61 +1,32 @@
-# These functions are Copyright (C) 1998-2013 T. W. Yee  All rights reserved
+# These functions are
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# All rights reserved.
+
 
-# 26/6/98; family.sur.q
-# 20110406; renamed to family.sur.R
 
-# zz; does or doesn't handle? : 
-# vglm(Sur(mydataframe), sur, ...), i.e., 1st coln
-# of mydataframe is the response. 
 
 
 
-# History
-# 20110406; editing it to bring it up to scratch.
-# 20130125; trying to get SUR() going.
 
 
 
-# --------------------------------------------------------------------
-# Maybe should call this surff()??:
 
 
  SUR <- function(
                  mle.normal = FALSE,
                  divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
-#                estimator = c("classical", "iterative"),
                  parallel = FALSE, 
-                 apply.parint = TRUE,
-#                zero = NULL,
                  Varcov = NULL,
                  matrix.arg = FALSE) {
-# Notes:
-# 1. Varcov may be assigned a solve(wz) (=solve(\bSigma)),
-#    and matrix.arg tells what format it is in.
-# 2. Based a little on normal1().
-# 3. Set maxit = 1   for Zellner's estimator (2-stage).
-#    Set maxit = 111 for iterative GLS === IGLS.
 
 
-# Wrong:
-# 1. "2stage"     == Zellners estimator.
-#    "iterative"  == iterative GLS === IGLS.
-#    "MLE.normal" == not yet done.
-#    Or "maxit.sur = 2"?
 
 
-# Last modified:
-# 20130125; trying to get SUR() going.
-# 20130126; seems to work basically but not the above arguments.
-#   A lot more work needed.
-# 20130130; seems to work.
-#   Removed 'zero' argument.
 
 
-# Yettodo:
-# 2013013 ; argument 'mle.normal' is logical.
 
 
-#print("20130129; in SUR()")
+  apply.parint <- TRUE
 
 
   lmean <- "identity"
@@ -73,17 +44,10 @@
     stop("argument 'apply.parint' must be a single logical")
 
 
-# if(mode(estimator) != "character" && mode(estimator) != "name")
-#   estimator <- as.character(substitute(estimator))
-# estimator <- match.arg(estimator,
-#                      c("classical", "iterative"))[1]
-#print(paste('estimator =', estimator))
 
 
   divisor <- match.arg(divisor,
       c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"))[1]
-#print("divisor")
-#print( divisor )
 
   if (mle.normal && divisor != "n")
     warning("MLE requires 'n' as the value of argument 'divisor'. ",
@@ -94,36 +58,18 @@
   new("vglmff",
   blurb = c("Seemingly unrelated regressions"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x,
-                           .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = .apply.parint )
-#   constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel,
-#           .zero = zero, 
             .apply.parint = apply.parint ))),
 
-# deviance = function(y, mu, w, residuals = FALSE,
-#                     eta = NULL, extra = NULL) {
-# Returns the residual sum of squares
-# Nb. extra$wz is wz
 
-#print("head(y - mu)")
-#print( head(y - mu) )
-#print("head(extra$wz)")
-#print( head(extra$wz) )
 
-#   M <- if (length(extra$M)) extra$M else ifelse(is.matrix(y), ncol(y), 1)
-#   if (residuals) {
-#     if (M > 1) NULL else (y-mu) * sqrt(extra$wz)
-#   } else {
-#     ResSS.vgam(y - mu, extra$wz, M = M)
-#   }
-# },
 
   infos = eval(substitute(function(...) {
     list(Musual = 1,  # zz???
-#        zero = .zero ,
-#        link = .link ,
          parallel = .parallel ,
          multipleResponses = TRUE )
   }, list( .parallel = parallel ))),
@@ -134,8 +80,6 @@
       stop("response must be a matrix with at least 2 columns")
     ncoly <- ncol(y)
 
-#print("constraints")
-#print( constraints )
    if (is.logical( .parallel ) &&
        .parallel &&
        !all(as.logical(trivial.constraints(constraints))))
@@ -144,7 +88,6 @@
 
    temp5 <-
     w.y.check(w = w, y = y,
-#             Is.positive.y = TRUE,
               ncol.w.min = 1,
               ncol.w.max = 1,
               ncol.y.max = Inf,
@@ -155,8 +98,6 @@
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
-#print("head(w)")
-#print( head(w) )
 
     if (!all(w[1, 1] == w))
       stop("all prior 'weights' must currently have equal values")
@@ -173,65 +114,42 @@
         paste("Y", 1:M, sep = "") else ddd
 
 
-#   if ( .estimator == "classical")
-#       maxit <- 1
 
 
-# Iteration may lead to an increase in RSS 
-#   if ( .estimator == "iterative")
-#     half.stepsizing <- FALSE
 
 
-# Assign "extra$wz" something corresponding to the M x M identity matrix.
     extra$wz <- matrix(1, nrow(x), M)
 
 
     if (!length(etastart)) {
-# Note: it is a good idea to start with the OLS estimators here first.
       etastart <- matrix(0, n, M)
 
 
       Blist.early <- process.constraints(constraints, x, M,
                                          specialCM = specialCM)
-#print("Blist.early")
-#print( Blist.early )
-      X_vlm.early  <- lm2vlm.model.matrix(x, Blist.early, xij = control$xij,
+      X.vlm.early  <- lm2vlm.model.matrix(x, Blist.early,
+                                          xij = control$xij,
                                           Xm2 = Xm2)
-#print("head(X_vlm.early)")
-#print( head(X_vlm.early) )
 
       Hmatrices <- matrix(c(unlist(Blist.early)), nrow = M)
       jay.index <- 1:ncol(Hmatrices)
 
 
-      extra$ncols_X_lm <- numeric(ncoly)
+      extra$ncols.X.lm <- numeric(ncoly)
       for (jay in 1:ncoly) {
-# model.matrix(fit, lapred.index = 1, type = "lm")
-#print("Hmatrices")
-#print( Hmatrices )
-# 20121231; this code adapted from model.matrixvlm():
-#       lapred.index <- jay.index[jay]
-#       index0 <- Hmatrices[jay, ] != 0  # Orig.
-#       Index0 <- Hmatrices[lapred.index, ] != 0
-#       X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + lapred.index, Index0,
-#                         drop = FALSE]
-
-        X_lm_jay <- vlm2lm.model.matrix(x_vlm = X_vlm.early,
+
+        X.lm.jay <- vlm2lm.model.matrix(x.vlm = X.vlm.early,
                                         Blist = Blist.early,
-                                        which.lp = jay, M = M)
-#print("head(X_lm_jay)")
-#print( head(X_lm_jay) )
+                                        which.linpred = jay, M = M)
 
-# This is useful, e.g,. for changing the denominator
-        extra$ncols_X_lm[jay] <- ncol(X_lm_jay)
+        extra$ncols.X.lm[jay] <- ncol(X.lm.jay)
 
         etastart[, jay] <- y[, jay] -
-                           lsfit(x = X_lm_jay, y = y[, jay],
+                           lsfit(x = X.lm.jay, y = y[, jay],
                                  wt = c(w), intercept = FALSE)$residuals
       }  # jay
     }  # !length(etastart)
   }), list(
-#           .estimator = estimator,
             .parallel = parallel 
           ))),
   linkinv = function(eta, extra = NULL) eta, 
@@ -240,14 +158,11 @@
     Musual <- extra$Musual
     misc$link <- c(rep( .lmean , length = ncoly))
     temp.names <- predictors.names
-#   temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
     names(misc$link) <- temp.names
-#print("head(w)")
-#print( head(w) )
 
     misc$earg <- vector("list", Musual * ncoly)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii]] <- .emean
     }
     names(misc$earg) <- temp.names
@@ -262,73 +177,44 @@
             .divisor = divisor
           ))),
 
-# linkfun = function(mu, extra = NULL) mu,
   vfamily = "SUR",
 
 
   deriv = eval(substitute(expression({
-#print("in @deriv of SUR()")
-#print(paste("iter =", iter))
     mymu <- eta
     iam.indices <- iam(NA, NA, M = M, both = TRUE)
-#print("iam.indices")
-#print( iam.indices )
-#print("y")
-#print( y )
-#print("mu")
-#print( mu )
     resmat <- y - mymu
     Sigma.elts <- colMeans(resmat[, iam.indices$row.index] *
                            resmat[, iam.indices$col.index])
 
     if ( .divisor != "n") {
-# Make an adjustment for the denominator (above assumes "n")
-# Here, ratio.df >= 1.
       ratio.df <- n / switch( .divisor ,
-        "n-max(pj,pk)" = n - pmax(extra$ncols_X_lm[iam.indices$row.index],
-                                  extra$ncols_X_lm[iam.indices$col.index]),
+        "n-max(pj,pk)" = n - pmax(extra$ncols.X.lm[iam.indices$row.index],
+                                  extra$ncols.X.lm[iam.indices$col.index]),
         "sqrt((n-pj)*(n-pk))" =
-        sqrt((n - extra$ncols_X_lm[iam.indices$row.index]) *
-             (n - extra$ncols_X_lm[iam.indices$col.index])),
+        sqrt((n - extra$ncols.X.lm[iam.indices$row.index]) *
+             (n - extra$ncols.X.lm[iam.indices$col.index])),
         stop("argument 'divisor' unmatched"))
-#print("ratio.df")
-#print( ratio.df )
       Sigma.elts <- Sigma.elts * ratio.df
     } else {
       ratio.df <- rep(1, length = M*(M+1)/2)
     }
 
-#print("Sigma.elts")
-#print( Sigma.elts )
     Sigma.mat <- matrix(0, M, M)
     Sigma.mat[cbind(iam.indices$row.index,
                     iam.indices$col.index)] <- Sigma.elts
     Sigma.mat[cbind(iam.indices$col.index,
                     iam.indices$row.index)] <- Sigma.elts
 
-#print("Sigma.mat")
-#print( Sigma.mat )
-# Cholesky is more efficient than solve()
     invSigma.mat <- chol2inv(chol(Sigma.mat))
-#   invSigma.mat <- solve(Sigma.mat)  # Inefficient
-#print("invSigma.mat")
-#print( invSigma.mat )
 
 
-# dl.dmu returns \bW_i (\biy_i - \bmu_i)
     temp3 <- matrix(invSigma.mat[cbind(iam.indices$row.index,
                                        iam.indices$col.index)],
                     M*(M+1)/2, n)
     dl.dmu <- mux22(temp3, y - mymu, M = M,
                     upper = FALSE, as.matrix = TRUE)
-#print("dim(dl.dmu)")
-#print( dim(dl.dmu) )
-#print("head(dl.dmu)")
-#print( head(dl.dmu) )
-#   dl.dmu <- (y - mymu) / sdev^2  # For normal1()
     dmu.deta <- dtheta.deta(mymu,   .lmean , earg = .emean )
-#print("head(dmu.deta)")
-#print( head(dmu.deta) )
 
     c(w) * dl.dmu * dmu.deta
   }), list( .lmean = lmean,
@@ -337,10 +223,8 @@
 
 
   weight = eval(substitute(expression({
-#print("in @weight of SUR()")
 
 
-# Overwrite invSigma.mat with the inverse variance, if given.
     if (length( .Varcov )) {
       Sigma.mat <- if ( .matrix.arg ) .Varcov else {
                      temp.vec <- rep( .Varcov , len = M*(M+1)/2)
@@ -349,7 +233,6 @@
                                     iam.indices$row.index)] <- temp.vec
                      temp.mat[cbind(iam.indices$row.index,
                                     iam.indices$col.index)] <- temp.vec
-#                    temp.mat <- chol2inv(chol(temp.mat))
                      temp.mat
                    }
       invSigma.mat <- chol2inv(chol(Sigma.mat))
@@ -363,58 +246,32 @@
     extra$Sigma.mat <- Sigma.mat
     extra$invSigma.mat <- invSigma.mat
 
-#print("head(wz)")
-#print( head(wz) )
     wz
   }), list( .divisor = divisor,
-#           .estimator = estimator,
             .Varcov = Varcov,
             .matrix.arg = matrix.arg ))))
 
 
 
   if (mle.normal) {
-# Add a 'loglikelihood' slot to the object.
-# This code based on normal1().
-# Note wz is retrieved from 'extra', and 'wz' has only
-# one general symmetric pos-definite matrix that is replicated
-# a lot.
 
-# Yettodo: if "all prior 'weights' must currently have equal values" is
-# relaxed then have to do some code changes??
 
     ret.ff at loglikelihood <-
       function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
       M <- if (is.matrix(y)) ncol(y) else 1
       n <- if (is.matrix(y)) nrow(y) else length(y)
 
-# Orig:
-#     wz <- VGAM.weights.function(w = w, M = M, n = n)
-# Now:
       wz <- extra$wz
 
       temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
-# Each row of wz is the same (or should be!!)
       onewz <- if (length(extra$invSigma.mat))
                  extra$invSigma.mat else
                  (m2adefault(wz[1, , drop = FALSE], M = M))[,, 1]  # M x M
-#print("onewz")
-#print( onewz )
-#print("extra$invSigma.mat - onewz")
-#print( extra$invSigma.mat - onewz )
 
 
-# 20130131; done: use det() or determinant():
       logdet <- determinant(onewz)$modulus
-#print("logdet")
-#print( logdet )
-#       logdet <- sum(log(eigen(onewz, symmetric = TRUE,
-#                               only.values = TRUE)$values))
-#print("logdet2")
-#print( logdet )
       logretval <- -0.5 * temp1 + 0.5 * n * logdet -
                    n * (M / 2) * log(2*pi)
-#     logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
       logretval
     }
   }
@@ -425,16 +282,9 @@
 
 
 
-# 20130125; Below here is old stuff... i will leave this alone
-# --------------------------------------------------------------------
-# 20110407; Below here is old stuff... i will leave this alone
-# --------------------------------------------------------------------
-# --------------------------------------------------------------------
-# --------------------------------------------------------------------
 
 
 
-# Sur <- function...
 
 
 
diff --git a/R/family.survival.R b/R/family.survival.R
index 4d88af8..e92c1ae 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -10,15 +10,17 @@
 
 
 
- dcennormal1 <- function(r1 = 0, r2 = 0,
-                         lmu = "identity",
-                         lsd = "loge",
-                         imu = NULL, isd = NULL, zero = 2)
-{
-  if (!is.Numeric(r1, allowable.length = 1, integer.valued = TRUE) ||
+
+
+ double.cennormal <-
+  function(r1 = 0, r2 = 0,
+           lmu = "identity",
+           lsd = "loge",
+           imu = NULL, isd = NULL, zero = 2) {
+  if (!is.Numeric(r1, length.arg = 1, integer.valued = TRUE) ||
       r1 < 0)
     stop("bad input for 'r1'")
-  if (!is.Numeric(r2, allowable.length = 1, integer.valued = TRUE) ||
+  if (!is.Numeric(r2, length.arg = 1, integer.valued = TRUE) ||
       r2 < 0)
     stop("bad input for 'r2'")
 
@@ -57,7 +59,7 @@
            "of positive integers")
 
     sumw <- sum(w)
-    extra$bign <- sumw + .r1 + .r2 # Tot num of censored & uncensored obsns
+    extra$bign <- sumw + .r1 + .r2  # Tot num of censored & uncensored obsns
 
     if (!length(etastart)) {
       yyyy.est <- if (length( .imu )) .imu else median(y)
@@ -100,15 +102,15 @@
   } , list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd,
             .r1 = r1, .r2 = r2 ))),
-  vfamily = c("dcennormal1"),
+  vfamily = c("double.cennormal"),
   deriv = eval(substitute(expression({
     sd <- eta2theta(eta[, 2], .lsd, earg =.esd)
 
     q1 <- .r1 / extra$bign
     q2 <- .r2 / extra$bign
     pee <- 1 - q1 - q2  # 1 if r1==r2==0
-    z1 <- if ( .r1 == 0) - 100 else min((y - mu) / sd) # 100==Inf
-    z2 <- if ( .r2 == 0) + 100 else max((y - mu) / sd) # 100==Inf
+    z1 <- if ( .r1 == 0) - 100 else min((y - mu) / sd)  # 100==Inf
+    z2 <- if ( .r2 == 0) + 100 else max((y - mu) / sd)  # 100==Inf
     fz1 <- if ( .r1 == 0) 0 else dnorm(z1)
     fz2 <- if ( .r2 == 0) 0 else dnorm(z2)
     Fz1 <- if ( .r1 == 0) 0.02 else pnorm(z1)  # 0/0 undefined
@@ -159,12 +161,17 @@ dbisa <- function(x, shape, scale = 1, log = FALSE) {
 
 
   L <- max(length(x), length(shape), length(scale))
-  x     <- rep(x,     len = L);
-  shape <- rep(shape, len = L);
-  scale <- rep(scale, len = L);
+  if (length(x)     != L) x     <- rep(x,     len = L)
+  if (length(shape) != L) shape <- rep(shape, len = L)
+  if (length(scale) != L) scale <- rep(scale, len = L)
+
   logdensity <- rep(log(0), len = L)
+
   xok <- (x > 0)
-  xifun <- function(x) {temp <- sqrt(x); temp - 1/temp}
+  xifun <- function(x) {
+    temp <- sqrt(x)
+    temp - 1/temp
+  }
   logdensity[xok] <-
     dnorm(xifun(x[xok] / scale[xok]) / shape[xok], log = TRUE) +
     log1p(scale[xok]/x[xok]) - log(2) - log(shape[xok]) -
@@ -175,7 +182,7 @@ dbisa <- function(x, shape, scale = 1, log = FALSE) {
 }
 
 
-pbisa <- function(q, shape, scale=1) {
+pbisa <- function(q, shape, scale = 1) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
   if (!is.Numeric(shape, positive = TRUE))
@@ -189,7 +196,7 @@ pbisa <- function(q, shape, scale=1) {
 }
 
 
-qbisa <- function(p, shape, scale=1) {
+qbisa <- function(p, shape, scale = 1) {
   if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
       stop("argument 'p' must have values inside the interval (0,1)")
   if (!is.Numeric(shape, positive = TRUE))
@@ -213,6 +220,7 @@ rbisa <- function(n, shape, scale = 1) {
   ans2 <- (2 + A^2 * shape^2 - temp1) * scale / 2
 
 
+
   ans <- ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
   ans[shape <= 0] <- NaN
   ans[scale <= 0] <- NaN
@@ -228,8 +236,7 @@ rbisa <- function(n, shape, scale = 1) {
 
  bisa <- function(lshape = "loge", lscale = "loge",
                   ishape = NULL,   iscale = 1,
-                  imethod = 1, zero = NULL)
-{
+                  imethod = 1, zero = NULL) {
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
   lshape <- attr(eshape, "function.name")
@@ -243,7 +250,7 @@ rbisa <- function(n, shape, scale = 1) {
       stop("bad input for argument 'ishape'")
   if (!is.Numeric(iscale, positive = TRUE))
       stop("bad input for argument 'iscale'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
@@ -270,7 +277,7 @@ rbisa <- function(n, shape, scale = 1) {
       shape.init <- if (is.Numeric( .ishape)) rep( .ishape, len = n) else {
       if ( .imethod == 1) {
         ybar <- rep(weighted.mean(y, w), len = n)
-        ybarr <- rep(1 / weighted.mean(1/y, w), len = n) # Reqrs y > 0
+        ybarr <- rep(1 / weighted.mean(1/y, w), len = n)  # Reqrs y > 0
         sqrt(ybar / scale.init + scale.init / ybarr - 2)
       } else if ( .imethod == 2) {
         sqrt(2*( pmax(y, scale.init+0.1) / scale.init - 1))
@@ -328,7 +335,7 @@ rbisa <- function(n, shape, scale = 1) {
   }) , list( .lshape = lshape, .lscale = lscale,
              .eshape = eshape, .escale = escale ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, M) # Diagonal!!
+    wz <- matrix(as.numeric(NA), n, M)  # Diagonal!!
     wz[,iam(1,1,M)] <- 2 * dsh.deta^2 / sh^2
     hfunction <- function(alpha)
       alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
diff --git a/R/family.ts.R b/R/family.ts.R
index 40b3b2b..b07ca96 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -20,8 +20,8 @@ rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
 rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
   ptr <- 0
   Ak1 <- diag(MM)
-  for(jay in 1:MM) {
-    for(i in 1:MM) {
+  for (jay in 1:MM) {
+    for (i in 1:MM) {
       if (i > jay && (MM+1)-(Ranks.[jay]-1) <= i) {
         ptr <- ptr + 1
         Ak1[i,jay] <- coeffs[ptr]
@@ -45,11 +45,11 @@ rrar.Mi <- function(i, MM, Ranks., ki) {
     return(NULL)
   hi <- Ranks.[ki[i]] - Ranks.[ki[i+1]]
   Ji <- matrix(0, hi, Ranks.[1])
-  for(j in 1:hi) {
+  for (j in 1:hi) {
     Ji[j,j+Ranks.[ki[i+1]]] <- 1
   }
-  Mi <- matrix(0, MM-Ranks.[ki[i]], MM) # dim(Oi) == dim(Ji)
-  for(j in 1:(MM-Ranks.[ki[i]])) {
+  Mi <- matrix(0, MM-Ranks.[ki[i]], MM)  # dim(Oi) == dim(Ji)
+  for (j in 1:(MM-Ranks.[ki[i]])) {
     Mi[j,j+Ranks.[ki[i  ]]] <- 1
   }
   kronecker(Mi, Ji)
@@ -58,7 +58,7 @@ rrar.Mi <- function(i, MM, Ranks., ki) {
 
 rrar.Mmat <- function(MM, uu, Ranks., ki) {
   Mmat <- NULL
-  for(ii in uu:1) {
+  for (ii in uu:1) {
     Mmat <- rbind(Mmat, rrar.Mi(ii, MM, Ranks., ki))
   }
   Mmat
@@ -86,14 +86,14 @@ rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
   Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
 
   if (!is.null(Mmat))
-  for(i in 1:plag) {
+  for (i in 1:plag) {
     Di <- rrar.Di(i, Ranks.)
     Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
     temp <- Di %*% t(Ci)
     Htop <- cbind(Htop, Mmat %*% kronecker(diag(MM), temp))
   }
 
-  for(i in 1:plag) {
+  for (i in 1:plag) {
     Di <- rrar.Di(i, Ranks.)
     temp <- kronecker(t(Di) %*% t(Ak1), diag(MM))
     Hbot <- block.diag(Hbot, temp)
@@ -105,7 +105,7 @@ rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
 rrar.Ut <- function(y, tt, plag, MM) {
   Ut <- NULL
   if (plag>1)
-  for(i in 1:plag) {
+  for (i in 1:plag) {
     Ut <- rbind(Ut, kronecker(diag(MM), cbind(y[tt-i,])))
   }
   Ut
@@ -114,7 +114,7 @@ rrar.Ut <- function(y, tt, plag, MM) {
 
 rrar.UU <- function(y, plag, MM, n) {
   UU <- NULL
-  for(i in (plag+1):n) {
+  for (i in (plag+1):n) {
     UU <- rbind(UU, t(rrar.Ut(y, i, plag, MM)))
   }
   UU
@@ -152,12 +152,12 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
   initialize = eval(substitute(expression({
       Ranks. <- .Ranks
       plag <- length(Ranks.)
-      nn <- nrow(x)   # original n
+      nn <- nrow(x)  # original n
       indices <- 1:plag
 
-      copy_X_vlm <- TRUE # X_vlm_save matrix changes at each iteration 
+      copy.X.vlm <- TRUE  # X.vlm.save matrix changes at each iteration 
 
-      dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
+      dsrank <- -sort(-Ranks.)  # ==rev(sort(Ranks.))
       if (any(dsrank != Ranks.))
           stop("Ranks must be a non-increasing sequence")
       if (!is.matrix(y) || ncol(y) == 1) {
@@ -166,10 +166,10 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
           MM <- ncol(y)
           ki <- udsrank <- unique(dsrank)
           uu <- length(udsrank)
-          for(i in 1:uu)
+          for (i in 1:uu)
              ki[i] <- max((1:plag)[dsrank == udsrank[i]])
-          ki <- c(ki, plag+1) # For computing a
-          Ranks. <- c(Ranks., 0) # For computing a
+          ki <- c(ki, plag+1)  # For computing a
+          Ranks. <- c(Ranks., 0)  # For computing a
           aa <- sum( (MM-Ranks.[ki[1:uu]]) * (Ranks.[ki[1:uu]]-Ranks.[ki[-1]]) )
       }
       if (!intercept.only)
@@ -187,10 +187,10 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
                         runif(aa+sum(Ranks.)*MM)
       temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
                          aa, uu, nn, new.coeffs)
-      X_vlm_save <- temp8$UU %*% temp8$Ht 
+      X.vlm.save <- temp8$UU %*% temp8$Ht 
 
       if (!length(etastart)) {
-        etastart <- X_vlm_save %*% new.coeffs
+        etastart <- X.vlm.save %*% new.coeffs
         etastart <- matrix(etastart, ncol = ncol(y), byrow = TRUE)
       }
 
@@ -220,7 +220,7 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
     tt <- (1+plag):nn
     mu <- matrix(0, nn-plag, MM)
     Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
-    for(i in 1:plag) {
+    for (i in 1:plag) {
       Di <- rrar.Di(i, Ranks.)
       Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
       mu <- mu + y.save[tt-i, , drop = FALSE] %*%
@@ -237,7 +237,7 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
     misc$Dmatrices <- Dmatrices
     misc$Hmatrix <- temp8$Ht
     misc$Phimatrices <- vector("list", plag)
-    for(ii in 1:plag) {
+    for (ii in 1:plag) {
       misc$Phimatrices[[ii]] <- Ak1 %*% Dmatrices[[ii]] %*%
                                 t(Cmatrices[[ii]])
     }
@@ -246,7 +246,7 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
   vfamily = "rrar",
   deriv = expression({
     temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
-    X_vlm_save <- temp8$UU %*% temp8$Ht 
+    X.vlm.save <- temp8$UU %*% temp8$Ht 
 
     extra$coeffs <- new.coeffs
 
@@ -254,7 +254,7 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
     tt <- (1+plag):nn
     Ak1 <- rrar.Ak1(MM, new.coeffs, Ranks., aa)
     Cmatrices <- Dmatrices <- vector("list", plag)
-    for(ii in 1:plag) {
+    for (ii in 1:plag) {
       Dmatrices[[ii]] <- Di <- rrar.Di(ii, Ranks.)
       Cmatrices[[ii]] <- Ci <- rrar.Ci(ii, new.coeffs, aa, Ranks., MM)
       resmat <- resmat - y.save[tt - ii, , drop = FALSE] %*%
@@ -293,9 +293,9 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
                    coefstart = NULL,
                    step = 1.0) {
 
-  if (!is.Numeric(p.ar.lag, integer.valued = TRUE, allowable.length = 1))
+  if (!is.Numeric(p.ar.lag, integer.valued = TRUE, length.arg = 1))
     stop("bad input for argument 'p.ar.lag'")
-  if (!is.Numeric(q.ma.lag, integer.valued = TRUE, allowable.length = 1))
+  if (!is.Numeric(q.ma.lag, integer.valued = TRUE, length.arg = 1))
     stop("bad input for argument 'q.ma.lag'")
   if (q.ma.lag != 0)
     stop("sorry, only q.ma.lag = 0 is currently implemented")
@@ -317,9 +317,9 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
 
     indices <- 1:plag
     tt.index <- (1 + plag):nrow(x)
-    p_lm <- ncol(x)
+    p.lm <- ncol(x)
 
-    copy_X_vlm <- TRUE   # x matrix changes at each iteration 
+    copy.X.vlm <- TRUE   # x matrix changes at each iteration 
 
     if ( .link == "logit"   || .link == "probit" ||
          .link == "cloglog" || .link == "cauchit") {
@@ -337,14 +337,14 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
 
     new.coeffs <- .coefstart  # Needed for iter = 1 of @weight
     new.coeffs <- if (length(new.coeffs))
-                    rep(new.coeffs, len = p_lm + plag) else
-                    c(rnorm(p_lm, sd = 0.1), rep(0, plag)) 
+                    rep(new.coeffs, len = p.lm + plag) else
+                    c(rnorm(p.lm, sd = 0.1), rep(0, plag)) 
 
     if (!length(etastart)) {
-      etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p_lm]
+      etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p.lm]
     }
 
-    x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
+    x <- cbind(x, matrix(as.numeric(NA), n, plag))  # Right size now
     dx <- dimnames(x.save)
     morenames <- paste("(lag", 1:plag, ")", sep = "") 
     dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames)) 
@@ -357,7 +357,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
 
     more <- vector("list", plag)
     names(more) <- morenames
-    for(ii in 1:plag)
+    for (ii in 1:plag)
       more[[ii]] <- ii + max(unlist(attr(x.save, "assign")))
     attr(x, "assign") <- c(attr(x.save, "assign"), more)
   }), list( .link = link, .p.ar.lag = p.ar.lag,
@@ -389,14 +389,14 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
   }, list( .link = link, .earg = earg ))),
   middle2 = eval(substitute(expression({
     realfv <- fv
-    for(ii in 1:plag) {
-      realfv <- realfv + old.coeffs[ii + p_lm] *
-        (x.save[tt.index-ii, 1:p_lm, drop = FALSE] %*%
-         new.coeffs[1:p_lm]) # +
+    for (ii in 1:plag) {
+      realfv <- realfv + old.coeffs[ii + p.lm] *
+        (x.save[tt.index-ii, 1:p.lm, drop = FALSE] %*%
+         new.coeffs[1:p.lm])  # +
     }
 
     true.eta <- realfv + offset  
-    mu <- family at linkinv(true.eta, extra) # overwrite mu with correct one
+    mu <- family at linkinv(true.eta, extra)  # overwrite mu with correct one
   }), list( .link = link, .earg = earg ))),
   vfamily = c("garma", "vglmgam"),
   deriv = eval(substitute(expression({
@@ -414,23 +414,23 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
             .earg = earg ))),
 
   weight = eval(substitute(expression({
-    x[, 1:p_lm] <- x.save[tt.index, 1:p_lm] # Reinstate 
+    x[, 1:p.lm] <- x.save[tt.index, 1:p.lm]  # Reinstate 
 
-    for(ii in 1:plag) {
+    for (ii in 1:plag) {
         temp <- theta2eta(y.save[tt.index-ii], .link , earg = .earg )
 
 
-        x[, 1:p_lm] <- x[, 1:p_lm] -
-                     x.save[tt.index-ii, 1:p_lm] * new.coeffs[ii + p_lm]
-        x[, p_lm+ii] <- temp - x.save[tt.index-ii, 1:p_lm, drop = FALSE] %*%
-                            new.coeffs[1:p_lm]
+        x[, 1:p.lm] <- x[, 1:p.lm] -
+                       x.save[tt.index-ii, 1:p.lm] * new.coeffs[ii + p.lm]
+        x[, p.lm+ii] <- temp - x.save[tt.index-ii, 1:p.lm, drop = FALSE] %*%
+                            new.coeffs[1:p.lm]
     }
     class(x) <- "matrix" # Added 27/2/02; 26/2/04
 
     if (iter == 1)
       old.coeffs <- new.coeffs 
 
-    X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+    X.vlm.save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
 
     vary <- switch( .link ,
                    identity = 1,
@@ -462,7 +462,7 @@ setClass(Class = "Coef.rrar", representation(
 
 
 
-Coef.rrar = function(object, ...) {
+Coef.rrar <- function(object, ...) {
     result = new(Class = "Coef.rrar",
          "plag"     = object at misc$plag,
          "Ranks"    = object at misc$Ranks,
diff --git a/R/family.univariate.R b/R/family.univariate.R
index de20f9f..1b28310 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -34,7 +34,7 @@
   if (!is.vector(vov))
     stop("'vov' must be a vector")
   objvals <- vov
-  for(ii in 1:length(vov))
+  for (ii in 1:length(vov))
     objvals[ii] <- objfun(vov[ii], y = y, x = x, w = w,
                           extraargs = extraargs)
   try.this <- if (abs.arg) {
@@ -65,11 +65,12 @@
   etheta <- link2list(ltheta)
   ltheta <- attr(etheta, "function.name")
 
-  lnu <- as.list(substitute(lnu))
-  enu <- link2list(lnu)
-  lnu <- attr(enu, "function.name")
+  lnuvec <- as.list(substitute(lnu))
+  enuvec <- link2list(lnuvec)
+  lnuvec <- attr(enuvec, "function.name")
 
 
+  inuvec <- inu
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -83,7 +84,7 @@
             "  -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
             "Links:     ",
             namesof("theta", ltheta, earg = etheta), ", ",
-            namesof("nu",    lnu,    earg = enu),
+            namesof("nu",    lnuvec, earg = enuvec),
             "\n", "\n",
             "Mean:     nu*theta/(1+nu)"),
   constraints = eval(substitute(expression({
@@ -97,81 +98,85 @@
       stop("all y values must be in (-1, 1)")
 
     predictors.names <-
-      c(namesof("theta", .ltheta , earg = .etheta, tag = FALSE),
-        namesof("nu",    .lnu ,    earg = .enu,    tag = FALSE))
+      c(namesof("theta", .ltheta , earg = .etheta , tag = FALSE),
+        namesof("nu",    .lnuvec , earg = .enuvec , tag = FALSE))
 
     if (!length(etastart)) {
-      theta.init <- if (length( .itheta ))
-          rep( .itheta, length = n) else {
-          mccullagh89.aux <- function(thetaval, y, x, w, extraargs)
-          mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
-          theta.grid <- seq(-0.9, 0.9, by=0.05)
-          try.this <- getMaxMin(theta.grid, objfun = mccullagh89.aux,
-                               y = y,  x = x, w = w, maximize = FALSE,
-                               abs.arg = TRUE)
-          try.this <- rep(try.this, length.out = n)
-          try.this
+      theta.init <- if (length( .itheta )) {
+        rep( .itheta , length = n)
+      } else {
+        mccullagh89.aux <- function(thetaval, y, x, w, extraargs)
+          mean((y - thetaval) *
+               (thetaval^2 - 1) / (1 - 2*thetaval*y + thetaval^2))
+        theta.grid <- seq(-0.9, 0.9, by = 0.05)
+        try.this <- getMaxMin(theta.grid, objfun = mccullagh89.aux,
+                              y = y,  x = x, w = w, maximize = FALSE,
+                              abs.arg = TRUE)
+        try.this <- rep(try.this, length.out = n)
+        try.this
       }
       tmp <- y / (theta.init - y)
       tmp[tmp < -0.4] <- -0.4
       tmp[tmp > 10.0] <- 10.0
-      nu.init <- rep(if (length( .inu)) .inu else tmp, length = n)
-      nu.init[!is.finite(nu.init)] <- 0.4
+      nuvec.init <- rep(if (length( .inuvec )) .inuvec else tmp, length = n)
+      nuvec.init[!is.finite(nuvec.init)] <- 0.4
       etastart <-
         cbind(theta2eta(theta.init, .ltheta , earg = .etheta ),
-              theta2eta(nu.init,    .lnu,     earg = .enu ))
+              theta2eta(nuvec.init, .lnuvec , earg = .enuvec ))
     }
-  }), list( .ltheta = ltheta, .lnu = lnu, .inu = inu, .itheta = itheta,
-            .etheta = etheta, .enu = enu ))),
+  }), list( .ltheta = ltheta, .lnuvec = lnuvec,
+            .etheta = etheta, .enuvec = enuvec,
+            .inuvec = inuvec, .itheta = itheta ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
-    nu <- eta2theta(eta[, 2], .lnu, earg = .enu )
-    nu * Theta / (1 + nu)
-  }, list( .ltheta = ltheta, .lnu = lnu,
-           .etheta = etheta, .enu = enu ))),
+    nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
+    nuvec * Theta / (1 + nuvec)
+  }, list( .ltheta = ltheta, .lnuvec = lnuvec,
+           .etheta = etheta, .enuvec = enuvec ))),
   last = eval(substitute(expression({
-    misc$link <-    c("theta" = .ltheta , "nu" = .lnu )
+    misc$link <-    c("theta" = .ltheta , "nu" = .lnuvec )
 
-    misc$earg <- list("theta" = .etheta , "nu" = .enu )
+    misc$earg <- list("theta" = .etheta , "nu" = .enuvec )
 
-  }), list( .ltheta = ltheta, .lnu = lnu, .etheta = etheta, .enu = enu ))),
+  }), list( .ltheta = ltheta, .lnuvec = lnuvec,
+            .etheta = etheta, .enuvec = enuvec ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
-    nu <- eta2theta(eta[, 2], .lnu, earg = .enu )
+    nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else
-      sum(c(w) * ((nu-0.5)*log1p(-y^2) -
-                nu * log1p(-2*Theta*y + Theta^2) -
-              lbeta(nu + 0.5, 0.5)))
-  }, list( .ltheta = ltheta, .lnu = lnu,
-           .etheta = etheta, .enu = enu ))),
+      sum(c(w) * ((nuvec - 0.5) * log1p(-y^2) -
+                   nuvec * log1p(-2*Theta*y + Theta^2) -
+                  lbeta(nuvec + 0.5, 0.5)))
+  }, list( .ltheta = ltheta, .lnuvec = lnuvec,
+           .etheta = etheta, .enuvec = enuvec ))),
   vfamily = c("mccullagh89"),
   deriv = eval(substitute(expression({
     Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
-    nu    <- eta2theta(eta[, 2], .lnu, earg = .enu )
+    nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
 
     dTheta.deta <- dtheta.deta(Theta, .ltheta , earg = .etheta )
-    dnu.deta <- dtheta.deta(nu, .lnu, earg = .enu )
+    dnuvec.deta <- dtheta.deta(nuvec, .lnuvec , earg = .enuvec )
 
-    dl.dTheta <- 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
-    dl.dnu <- log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
-             digamma(nu + 0.5) + digamma(nu + 1)
+    dl.dTheta <- 2 * nuvec * (y-Theta) / (1 -2*Theta*y + Theta^2)
+    dl.dnuvec <- log1p(-y^2) - log1p(-2 * Theta * y + Theta^2) -
+                 digamma(nuvec + 0.5) + digamma(nuvec + 1)
 
     c(w) * cbind(dl.dTheta * dTheta.deta,
-                 dl.dnu * dnu.deta)
-  }), list( .ltheta = ltheta, .lnu = lnu,
-            .etheta = etheta, .enu = enu ))),
+                 dl.dnuvec * dnuvec.deta)
+  }), list( .ltheta = ltheta, .lnuvec = lnuvec,
+            .etheta = etheta, .enuvec = enuvec ))),
   weight = eval(substitute(expression({
-    d2l.dTheta2 <- (2 * nu^2 / (1+nu)) / (1-Theta^2)
-    d2l.dnu2 <- trigamma(nu+0.5) - trigamma(nu+1)
+    d2l.dTheta2 <- (2 * nuvec^2 / (1+nuvec)) / (1-Theta^2)
+    d2l.dnuvec2 <- trigamma(nuvec+0.5) - trigamma(nuvec+1)
 
-    wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+    wz <- matrix(as.numeric(NA), n, M)  # diagonal matrix
     wz[, iam(1, 1, M)] <- d2l.dTheta2 * dTheta.deta^2
-    wz[, iam(2, 2, M)] <- d2l.dnu2 * dnu.deta^2
+    wz[, iam(2, 2, M)] <- d2l.dnuvec2 * dnuvec.deta^2
 
     c(w) * wz
-  }), list( .ltheta = ltheta, .lnu = lnu ))))
+  }), list( .ltheta = ltheta, .lnuvec = lnuvec ))))
 }
 
 
@@ -208,10 +213,10 @@ hzeta.control <- function(save.weight = TRUE, ...) {
             "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
   initialize = eval(substitute(expression({
 
-    w.y.check(w = w, y = y)
+    w.y.check(w = w, y = y,
+              Is.integer.y = TRUE,
+              Is.positive.y = TRUE)
 
-    if (any(y < 1))
-      stop("all y values must be in 1, 2, 3,....")
 
     predictors.names <-
       namesof("alpha", .link , earg = .earg , tag = FALSE)
@@ -254,7 +259,7 @@ hzeta.control <- function(save.weight = TRUE, ...) {
     dalpha.deta <- dtheta.deta(alpha, .link , earg = .earg )
 
     d3 <- deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
-                "alpha", hessian = FALSE)
+                 "alpha", hessian = FALSE)
     eval.d3 <- eval(d3)
 
     dl.dalpha <-  attr(eval.d3, "gradient")
@@ -263,10 +268,10 @@ hzeta.control <- function(save.weight = TRUE, ...) {
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
     sd3 <- deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
-                 "alpha", hessian = FALSE)
+                  "alpha", hessian = FALSE)
     run.var <- 0
-    for(ii in 1:( .nsimEIM )) {
-      ysim <- rhzeta(n, alpha=alpha)
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rhzeta(n, alpha = alpha)
       eval.sd3 <- eval(sd3)
       dl.dalpha <-  attr(eval.d3, "gradient")
       rm(ysim)
@@ -294,8 +299,8 @@ dhzeta <- function(x, alpha, log = FALSE) {
     stop("'alpha' must be numeric and have positive values")
 
   nn <- max(length(x), length(alpha))
-  x <- rep(x, length.out = nn);
-  alpha <- rep(alpha, length.out = nn)
+  if (length(x)     != nn) x     <- rep(x,     length.out = nn)
+  if (length(alpha) != nn) alpha <- rep(alpha, length.out = nn)
 
   ox <- !is.finite(x)
   zero <- ox | round(x) != x | x < 1
@@ -333,7 +338,7 @@ qhzeta <- function(p, alpha) {
   nn <- max(length(p), length(alpha))
   p <- rep(p, length.out = nn)
   alpha <- rep(alpha, length.out = nn)
-  ans <- (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
+  ans <- (((1 - p)^(-1/alpha) - 1) / 2)  # p is in (0,1)
   ans[alpha <= 0] <- NaN
   floor(ans + 1)
 }
@@ -393,7 +398,8 @@ rhzeta <- function(n, alpha) {
     } else {
       mycmatrix <- if (M == 1) diag(1) else diag(M)
     }
-    constraints <- cm.vgam(mycmatrix, x, .PARALLEL ,
+    constraints <- cm.vgam(mycmatrix, x = x,
+                           bool = .PARALLEL ,
                            constraints, apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .ZERO , M)
   }), list( .parallel = parallel, .zero = zero ))),
@@ -454,14 +460,14 @@ rhzeta <- function(n, alpha) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:(M-1))
+    for (ii in 1:(M-1))
       misc$earg[[ii]] <- list()
     misc$earg[[M]] <- .ephi
 
     misc$expected <- TRUE
 
     if (intercept.only) {
-      misc$shape <- probs[1,] * (1/phi[1]-1) # phi & probs computed in @deriv
+      misc$shape <- probs[1,] * (1/phi[1]-1)  # phi & probs computed in @deriv
     }
   }), list( .ephi = ephi, .lphi = lphi ))),
   loglikelihood = eval(substitute(
@@ -479,18 +485,18 @@ rhzeta <- function(n, alpha) {
                         "implemented yet") else {
       ans <- rep(0.0, length.out = n)
       omega <- extra$n2
-      for(jay in 1:M) {
+      for (jay in 1:M) {
         maxyj <- max(ycount[, jay])
         loopOveri <- (n < maxyj)
         if (loopOveri) {
-          for(iii in 1:n) {
-              rrr <- 1:ycount[iii, jay] # a vector
+          for (iii in 1:n) {
+              rrr <- 1:ycount[iii, jay]  # a vector
               if (ycount[iii, jay] > 0)
                 ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
                             probs[iii, jay] + (rrr-1)*phi[iii]))
           }
         } else {
-          for(rrr in 1:maxyj) {
+          for (rrr in 1:maxyj) {
               index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
               if (any(index))
                   ans[index] <- ans[index] + log((1-phi[index]) *
@@ -502,12 +508,12 @@ rhzeta <- function(n, alpha) {
       maxomega <- max(omega)
       loopOveri <- n < maxomega
       if (loopOveri) {
-        for(iii in 1:n) {
+        for (iii in 1:n) {
           rrr <- 1:omega[iii]
           ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1) * phi[iii]))
         }
       } else {
-        for(rrr in 1:maxomega) {
+        for (rrr in 1:maxomega) {
           ind8 <- rrr <= omega
           ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1) * phi[ind8])
         }
@@ -530,11 +536,11 @@ rhzeta <- function(n, alpha) {
 
     ycount <- round(ycount)
 
-    for(jay in 1:M) {
+    for (jay in 1:M) {
       maxyj <- max(ycount[, jay])
       loopOveri <- n < maxyj
       if (loopOveri) {
-        for(iii in 1:n) {
+        for (iii in 1:n) {
           rrr <- 1:ycount[iii, jay]
           if (ycount[iii, jay] > 0) {
             PHI <- phi[iii]
@@ -545,13 +551,13 @@ rhzeta <- function(n, alpha) {
             if (jay < M) {
               dl.dprobs[iii, jay] <- dl.dprobs[iii, jay] + sum(tmp9)
             } else {
-              for(jay2 in 1:(M-1))
+              for (jay2 in 1:(M-1))
                 dl.dprobs[iii, jay2]<-dl.dprobs[iii, jay2]-sum(tmp9)
             }
           }
         }
       } else {
-        for(rrr in 1:maxyj) {
+        for (rrr in 1:maxyj) {
           index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
           PHI <- phi[index]
           dl.dphi[index] <- dl.dphi[index] +
@@ -561,7 +567,7 @@ rhzeta <- function(n, alpha) {
           if (jay < M) {
               dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
           } else {
-              for(jay2 in 1:(M-1))
+              for (jay2 in 1:(M-1))
                   dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
           }
         }
@@ -570,19 +576,19 @@ rhzeta <- function(n, alpha) {
     maxomega <- max(omega)
     loopOveri <- n < maxomega
     if (loopOveri) {
-      for(iii in 1:n) {
+      for (iii in 1:n) {
         rrr <- 1:omega[iii]
         dl.dphi[iii]<-dl.dphi[iii] - sum((rrr-2)/(1 + (rrr-2)*phi[iii]))
       }
     } else {
-      for(rrr in 1:maxomega) {
+      for (rrr in 1:maxomega) {
         index <- rrr <= omega
         dl.dphi[index] <-
         dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
       }
     }
 
-    dprobs.deta <- probs[, -M] * (1 - probs[, -M]) # n x (M-1)
+    dprobs.deta <- probs[, -M] * (1 - probs[, -M])  # n x (M-1)
     dphi.deta <- dtheta.deta(phi, .lphi , earg = .ephi )
 
     ans <- cbind(dl.dprobs * dprobs.deta,
@@ -593,7 +599,7 @@ rhzeta <- function(n, alpha) {
       wz <- matrix(0, n, dimm(M))
       loopOveri <- (n < maxomega)
       if (loopOveri) {
-          for(iii in 1:n) {
+          for (iii in 1:n) {
               rrr <- 1:omega[iii]  # A vector
               PHI <- phi[iii]
               pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
@@ -604,7 +610,7 @@ rhzeta <- function(n, alpha) {
               wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
                       sum(probs[iii, M]^2 * pYiM.ge.rrr / denomM) -
                       sum(1 / (1 + (rrr-2)*PHI)^2)
-              for(jay in 1:(M-1)) {
+              for (jay in 1:(M-1)) {
                   denomj <- ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)^2
                   pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
                                                    size = omega[iii],
@@ -613,7 +619,7 @@ rhzeta <- function(n, alpha) {
                   wz[iii, iam(jay, jay, M)] <- wz[iii, iam(jay, jay, M)] + 
                       sum(pYij.ge.rrr / denomj) + 
                       sum(pYiM.ge.rrr / denomM)
-                  for(kay in jay:(M-1)) if (kay > jay) {
+                  for (kay in jay:(M-1)) if (kay > jay) {
                     wz[iii, iam(jay, kay, M)] <- wz[iii, iam(jay, kay, M)] +
                         sum(pYiM.ge.rrr / denomM)
                   }
@@ -625,7 +631,7 @@ rhzeta <- function(n, alpha) {
               } # end of jay loop
           } # end of iii loop
       } else {
-          for(rrr in 1:maxomega) {
+          for (rrr in 1:maxomega) {
               ind5 <- rrr <= omega
               PHI <- phi[ind5]
               pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
@@ -636,7 +642,7 @@ rhzeta <- function(n, alpha) {
               wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
                       probs[ind5, M]^2 * pYiM.ge.rrr / denomM -
                       1 / (1 + (rrr-2)*PHI)^2
-              for(jay in 1:(M-1)) {
+              for (jay in 1:(M-1)) {
                   denomj <- ((1-PHI)*probs[ind5, jay] + (rrr-1)*PHI)^2
                   pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
                                                    size = omega[ind5],
@@ -644,7 +650,7 @@ rhzeta <- function(n, alpha) {
                       shape2<-(1-probs[ind5, jay])*(1/PHI-1))
                   wz[ind5, iam(jay, jay, M)] <- wz[ind5, iam(jay, jay, M)] + 
                       pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM 
-                  for(kay in jay:(M-1)) if (kay > jay) {
+                  for (kay in jay:(M-1)) if (kay > jay) {
                     wz[ind5, iam(jay, kay, M)] <- wz[ind5, iam(jay, kay, M)] +
                         pYiM.ge.rrr / denomM 
                   }
@@ -657,10 +663,10 @@ rhzeta <- function(n, alpha) {
           } # end of rrr loop
       }
 
-      for(jay in 1:(M-1))
-        for(kay in jay:(M-1))
+      for (jay in 1:(M-1))
+        for (kay in jay:(M-1))
           wz[, iam(jay, kay, M)] <- wz[, iam(jay, kay, M)] * (1-phi)^2
-      for(jay in 1:(M-1))
+      for (jay in 1:(M-1))
         wz[, iam(jay, M, M)] <- wz[, iam(jay, M, M)] * (phi-1) / phi
       wz[, iam(M, M, M)] <- wz[, iam(M, M, M)] / phi^2
 
@@ -700,7 +706,8 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
             "Posterior mean:    (n_j + shape_j)/(2*sum(n_j) + ",
                                 "sum(shape_j))\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints, apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
@@ -713,7 +720,7 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
       predictors.names <- namesof(paste("shape", 1:M, sep = ""),
                                   .link , earg = .earg , short = TRUE)
 
-      extra$n2 <- rowSums(y) # Nb. don't multiply by 2
+      extra$n2 <- rowSums(y)  # Nb. don't multiply by 2
       extra$y  <- y
 
       if (!length(etastart)) {
@@ -735,7 +742,7 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$pooled.weight <- pooled.weight
@@ -772,7 +779,7 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
 
     if (TRUE && intercept.only) {
       sumw <- sum(w)
-      for(ii in 1:ncol(wz))
+      for (ii in 1:ncol(wz))
         wz[, ii] <- sum(wz[, ii]) / sumw
       pooled.weight <- TRUE
       wz <- c(w) * wz # Put back the weights
@@ -792,7 +799,7 @@ rdiric <- function(n, shape, dimension = NULL) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   if (!is.numeric(dimension))
@@ -831,7 +838,8 @@ rdiric <- function(n, shape, dimension = NULL) {
             namesof("shapej", link, earg = earg), "\n\n",
             "Mean:     shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints, apply.int = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
@@ -869,7 +877,7 @@ rdiric <- function(n, shape, dimension = NULL) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
@@ -915,7 +923,7 @@ rdiric <- function(n, shape, dimension = NULL) {
 
   deriv.arg <- deriv
   rm(deriv)
-  if (!is.Numeric(deriv.arg, allowable.length = 1,
+  if (!is.Numeric(deriv.arg, length.arg = 1,
                   integer.valued = TRUE))
     stop("'deriv' must be a single non-negative integer")
   if (deriv.arg < 0 || deriv.arg > 2)
@@ -955,14 +963,14 @@ rdiric <- function(n, shape, dimension = NULL) {
   a <- 12; k <- 8
   B <- c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
   ans <- 0
-  for(ii in 1:(a-1))
+  for (ii in 1:(a-1))
      ans <- ans + 1.0 / ii^x
   ans <- ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
 
   term <- (x/2) / a^(x+1)
   ans <- ans + term * B[1]
 
-  for(mm in 2:k) {
+  for (mm in 2:k) {
     term <- term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
     ans <- ans + term * B[mm]
   }
@@ -974,7 +982,7 @@ rdiric <- function(n, shape, dimension = NULL) {
  Zeta.derivative <- function(x, deriv.arg = 0) {
 
 
-    if (!is.Numeric(deriv.arg, allowable.length = 1,
+    if (!is.Numeric(deriv.arg, length.arg = 1,
                     integer.valued = TRUE))
         stop("'deriv.arg' must be a single non-negative integer")
     if (deriv.arg < 0 || deriv.arg > 2)
@@ -989,8 +997,8 @@ rdiric <- function(n, shape, dimension = NULL) {
     ans <- rep(as.numeric(NA), length(x))
     nn <- sum(ok)  # Effective length (excludes x < 0 and x = 1 values)
     if (nn)
-        ans[ok] <- dotC(name = "vzetawr", as.double(x[ok]), ans = double(nn),
-                  as.integer(deriv.arg), as.integer(nn))$ans
+        ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
+                  as.integer(deriv.arg), as.integer(nn), PACKAGE = "VGAM")$ans
 
 
 
@@ -1008,7 +1016,7 @@ dzeta <- function(x, p, log = FALSE) {
   rm(log)
 
 
-    if (!is.Numeric(p, positive = TRUE)) # || min(p) <= 1
+    if (!is.Numeric(p, positive = TRUE))  # || min(p) <= 1
         stop("'p' must be numeric and > 0")
     LLL <- max(length(p), length(x))
     x <- rep(x, length.out = LLL);
@@ -1121,7 +1129,7 @@ dzeta <- function(x, p, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -1161,7 +1169,7 @@ gharmonic <- function(n, s = 1, lognexponent = 0) {
 
     if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
         stop("bad input for argument 'n'")
-    if (!is.Numeric(lognexponent, allowable.length = 1))
+    if (!is.Numeric(lognexponent, length.arg = 1))
         stop("bad input for argument 'lognexponent'")
     if (length(n) == 1 && length(s) == 1) {
         if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
@@ -1171,10 +1179,10 @@ gharmonic <- function(n, s = 1, lognexponent = 0) {
         n <- rep(n, length.out = LEN)
         ans <- s <- rep(s, length.out = LEN)
         if (lognexponent != 0) {
-            for(ii in 1:LEN)
+            for (ii in 1:LEN)
                 ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
         } else
-            for(ii in 1:LEN)
+            for (ii in 1:LEN)
                 ans[ii] <- sum((1:n[ii])^(-s[ii]))
         ans
     }
@@ -1250,7 +1258,7 @@ pzipf <- function(q, N, s) {
  zipf <- function(N = NULL, link = "loge", init.s = NULL) {
   if (length(N) &&
     (!is.Numeric(N, positive = TRUE,
-                 integer.valued = TRUE, allowable.length = 1) ||
+                 integer.valued = TRUE, length.arg = 1) ||
       N <= 1))
     stop("bad input for argument 'N'")
   enteredN <- length(N)
@@ -1281,7 +1289,7 @@ pzipf <- function(q, N, s) {
     predictors.names <- namesof("s", .link , earg = .earg , tag = FALSE)
 
     NN <- .N
-    if (!is.Numeric(NN, allowable.length = 1,
+    if (!is.Numeric(NN, length.arg = 1,
                     positive = TRUE, integer.valued = TRUE))
         NN <- max(y)
     if (max(y) > NN)
@@ -1346,7 +1354,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
 
  cauchy <- function(llocation = "identity", lscale = "loge",
                     ilocation = NULL, iscale = NULL,
-                    iprobs = seq(0.2, 0.8, by=0.2),
+                    iprobs = seq(0.2, 0.8, by = 0.2),
                     imethod = 1, nsimEIM = NULL, zero = 2) {
 
   llocat <- as.list(substitute(llocation))
@@ -1359,7 +1367,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
   lscale <- attr(escale, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -1369,7 +1377,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
       stop("bad input for argument 'zero'")
   if (length(nsimEIM) &&
-     (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+     (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE) ||
       nsimEIM <= 50))
     stop("argument 'nsimEIM' should be an integer greater than 50")
   if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
@@ -1415,7 +1423,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
                  sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
                                     log = TRUE))
              }
-             loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by=0.05)))
+             loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by = 0.05)))
              try.this <- getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
                                   y = y,  x = x, w = w)
                 try.this <- rep(c(try.this), length.out = n)
@@ -1485,7 +1493,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
         ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
         dthetas.detas = cbind(dlocation.deta, dscale.deta)
         if (length( .nsimEIM )) {
-            for(ii in 1:( .nsimEIM )) {
+            for (ii in 1:( .nsimEIM )) {
                 ysim <- rcauchy(n, loc = location, scale = myscale)
                 Z <- (ysim-location) / myscale
                 dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
@@ -1533,7 +1541,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
 
   if (!is.Numeric(scale.arg, positive = TRUE))
     stop("bad input for 'scale.arg'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -1630,9 +1638,9 @@ cauchy.control <- function(save.weight = TRUE, ...) {
 
  logistic1 <- function(llocation = "identity",
                        scale.arg = 1, imethod = 1) {
-  if (!is.Numeric(scale.arg, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(scale.arg, length.arg = 1, positive = TRUE))
     stop("'scale.arg' must be a single positive number")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -1718,10 +1726,10 @@ cauchy.control <- function(save.weight = TRUE, ...) {
            imethod = 1, zero = NULL)
 {
 
-  if (!is.Numeric(shape.arg, allowable.length = 1,
+  if (!is.Numeric(shape.arg, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
       stop("'shape' must be a positive integer")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
@@ -1812,7 +1820,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -1856,7 +1864,7 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
 
   if (!is.Numeric(x))
     stop("bad input for argument 'x'")
-  if (!is.Numeric(Qsize, allowable.length = 1,
+  if (!is.Numeric(Qsize, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'Qsize'")
   if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
@@ -1867,7 +1875,7 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
   a <- rep(a, length.out = N);
 
   xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
-  ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+  ans <- rep(if (log.arg) log(0) else 0, length.out = N)  # loglikelihood
   ans[xok] <- lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
              (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
              (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
@@ -1882,9 +1890,9 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
-  if (!is.Numeric(Qsize, allowable.length = 1,
+  if (!is.Numeric(Qsize, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'Qsize'")
   if (!is.Numeric(a, positive = TRUE) ||
@@ -1910,7 +1918,7 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
 
  borel.tanner <- function(Qsize = 1, link = "logit",
                           imethod = 1) {
-  if (!is.Numeric(Qsize, allowable.length = 1,
+  if (!is.Numeric(Qsize, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'Qsize'")
 
@@ -1919,7 +1927,7 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 4)
     stop("argument 'imethod' must be 1 or 2, 3 or 4")
@@ -2009,7 +2017,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   a <- rep(a, length.out = N);
 
   xok <- (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
-  ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+  ans <- rep(if (log.arg) log(0) else 0, length.out = N)  # loglikelihood
   ans[xok] <- ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
              lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
   if (!log.arg) {
@@ -2027,7 +2035,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 4)
       stop("argument 'imethod' must be 1 or 2, 3 or 4")
@@ -2106,8 +2114,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   function(A = 0, B = 1,
            lmu = "logit",
            lphi = "loge",
-           imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
-{
+           imu = NULL, iphi = NULL, imethod = 1, zero = NULL) {
 
 
   stdbeta <- (A == 0 && B == 1)
@@ -2124,42 +2131,42 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   lphi <- attr(ephi, "function.name")
 
 
-  if (!is.Numeric(A, allowable.length = 1) ||
-      !is.Numeric(B, allowable.length = 1) || A >= B)
+  if (!is.Numeric(A, length.arg = 1) ||
+      !is.Numeric(B, length.arg = 1) || A >= B)
     stop("A must be < B, and both must be of length one")
 
 
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'zero'")
+    stop("bad input for argument 'zero'")
 
   if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
      any(imu <= A) || any(imu >= B)))
-      stop("bad input for argument 'imu'")
+    stop("bad input for argument 'imu'")
   if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
-      stop("bad input for argument 'iphi'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+    stop("bad input for argument 'iphi'")
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
-      stop("argument 'imethod' must be 1 or 2")
+    stop("argument 'imethod' must be 1 or 2")
 
 
-    new("vglmff",
-    blurb = c("Beta distribution parameterized by mu and a ",
-              "precision parameter\n",
-              if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
-              "/ beta(mu*phi,(1-mu)*phi),\n",
-              "      0<y<1, 0<mu<1, phi>0\n\n") else
-              paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
-              "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
-              B, "-", A, ")^(phi-1)),\n",
-              A," < y < ",B, ", ", A," < mu < ",B,
-              ", mu = ", A, " + ", (B-A), " * mu1",
-              ", phi > 0\n\n", sep = ""),
-              "Links:    ",
-              namesof("mu",  lmu,  earg = emu),  ", ",
-              namesof("phi", lphi, earg = ephi)),
+  new("vglmff",
+  blurb = c("Beta distribution parameterized by mu and a ",
+            "precision parameter\n",
+            if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
+            "/ beta(mu*phi,(1-mu)*phi),\n",
+            "      0<y<1, 0<mu<1, phi>0\n\n") else
+            paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
+            "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
+            B, "-", A, ")^(phi-1)),\n",
+            A," < y < ",B, ", ", A," < mu < ",B,
+            ", mu = ", A, " + ", (B-A), " * mu1",
+            ", phi > 0\n\n", sep = ""),
+            "Links:    ",
+            namesof("mu",  lmu,  earg = emu),  ", ",
+            namesof("phi", lphi, earg = ephi)),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
@@ -2171,90 +2178,91 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     w.y.check(w = w, y = y)
 
 
-      predictors.names <- c(namesof("mu",  .lmu ,  .emu , short = TRUE),
-                            namesof("phi", .lphi , .ephi, short = TRUE))
-      if (!length(etastart)) {
-        mu.init <- if (is.Numeric( .imu )) .imu else
-                  {if ( .imethod == 1) weighted.mean(y, w) else
-                   median(rep(y, w))}
-        mu1.init <- (mu.init - .A) / ( .B - .A)  # In (0,1)
-        phi.init <- if (is.Numeric( .iphi)) .iphi else
-           max(0.01, -1 + ( .B - .A)^2 * mu1.init*(1-mu1.init)/var(y))
-        etastart <- matrix(0, n, 2)
-        etastart[, 1] <- theta2eta(mu.init, .lmu , earg = .emu )
-        etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
+    predictors.names <- c(namesof("mu",  .lmu ,  .emu , short = TRUE),
+                          namesof("phi", .lphi , .ephi, short = TRUE))
+    if (!length(etastart)) {
+      mu.init <- if (is.Numeric( .imu )) .imu else {
+                   if ( .imethod == 1) weighted.mean(y, w) else
+                                       median(rep(y, w))
+                 }
+      mu1.init <- (mu.init - .A ) / ( .B - .A )  # In (0,1)
+      phi.init <- if (is.Numeric( .iphi )) .iphi else
+         max(0.01, -1 + ( .B - .A )^2 * mu1.init*(1-mu1.init)/var(y))
+      etastart <- matrix(0, n, 2)
+      etastart[, 1] <- theta2eta(mu.init , .lmu  , earg = .emu  )
+      etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
     }
   }), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
             .A = A, .B = B, .emu = emu, .ephi = ephi,
             .imethod = imethod ))),
 
-    linkinv = eval(substitute(function(eta, extra = NULL) {
-       mu <- eta2theta(eta[, 1], .lmu , .emu )
-       mu
-    }, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
-    last = eval(substitute(expression({
-        misc$link <-    c(mu = .lmu , phi = .lphi)
-        misc$earg <- list(mu = .emu , phi = .ephi)
-        misc$limits <- c( .A, .B)
-        misc$stdbeta <- .stdbeta
-    }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
-              .emu = emu, .ephi = ephi,
-              .stdbeta = stdbeta ))),
-    loglikelihood = eval(substitute(
-        function(mu, y, w, residuals = FALSE, eta, extra = NULL){
-        mu <- eta2theta(eta[, 1], .lmu , .emu )
-        m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
-        phi <- eta2theta(eta[, 2], .lphi , .ephi )
-        if (residuals) stop("loglikelihood residuals not ",
-                            "implemented yet") else {
-            shape1 <- phi * m1u
-            shape2 <- (1 - m1u) * phi
-            zedd <- (y - .A) / ( .B - .A)
-            sum(c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
-                           log = TRUE) -
-                     log( abs( .B - .A ))))
-        }
-    }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
-             .emu = emu, .ephi = ephi,
-             .stdbeta = stdbeta ))),
-    vfamily = "betaff",
-    deriv = eval(substitute(expression({
-        mu <- eta2theta(eta[, 1], .lmu , .emu )
-        phi <- eta2theta(eta[, 2], .lphi , .ephi )
-        m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
-        dmu.deta <- dtheta.deta(mu, .lmu , .emu )
-        dmu1.dmu <- 1 / ( .B - .A)
-        dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
-        temp1 <- m1u*phi
-        temp2 <- (1-m1u)*phi
-        if ( .stdbeta ) {
-            dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
-            dl.dphi <- digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
-                mu*log(y) + (1-mu)*log1p(-y)
-        } else {
-            dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) +
-                           log(y-.A) - log( .B-y))
-            dl.dphi <- digamma(phi) - m1u*digamma(temp1) -
-                      (1-m1u)*digamma(temp2) +
-                      m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
-        }
-        c(w) * cbind(dl.dmu1 * dmu1.dmu * dmu.deta,
-                     dl.dphi * dphi.deta)
-    }), list( .lmu = lmu, .lphi = lphi,
-              .emu = emu, .ephi = ephi,
-              .A = A, .B = B,
-              .stdbeta = stdbeta ))),
-    weight = eval(substitute(expression({
-        d2l.dmu12 <- phi^2 * (trigamma(temp1) + trigamma(temp2))
-        d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
-            trigamma(temp2) * (1-m1u)^2
-        d2l.dmu1phi <- temp1*trigamma(temp1) - temp2*trigamma(temp2)
-        wz <- matrix(as.numeric(NA), n, dimm(M))
-        wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
-        wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
-        wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
-        c(w) * wz
-    }), list( .A = A, .B = B ))))
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+     mu <- eta2theta(eta[, 1], .lmu , .emu )
+     mu
+  }, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
+  last = eval(substitute(expression({
+    misc$link <-    c(mu = .lmu , phi = .lphi )
+    misc$earg <- list(mu = .emu , phi = .ephi )
+    misc$limits <- c( .A , .B )
+    misc$stdbeta <- .stdbeta
+  }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+            .emu = emu, .ephi = ephi,
+            .stdbeta = stdbeta ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+      mu <- eta2theta(eta[, 1], .lmu , .emu )
+      m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
+      phi <- eta2theta(eta[, 2], .lphi , .ephi )
+      if (residuals) stop("loglikelihood residuals not ",
+                          "implemented yet") else {
+        shape1 <- phi * m1u
+        shape2 <- (1 - m1u) * phi
+        zedd <- (y - .A) / ( .B - .A)
+      sum(c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
+                        log = TRUE) -
+                  log( abs( .B - .A ))))
+    }
+  }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+           .emu = emu, .ephi = ephi,
+           .stdbeta = stdbeta ))),
+  vfamily = "betaff",
+  deriv = eval(substitute(expression({
+    mu <- eta2theta(eta[, 1], .lmu , .emu )
+    phi <- eta2theta(eta[, 2], .lphi , .ephi )
+    m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
+    dmu.deta <- dtheta.deta(mu, .lmu , .emu )
+    dmu1.dmu <- 1 / ( .B - .A)
+    dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
+    temp1 <- m1u*phi
+    temp2 <- (1-m1u)*phi
+    if ( .stdbeta ) {
+      dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
+      dl.dphi <- digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
+          mu*log(y) + (1-mu)*log1p(-y)
+    } else {
+      dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) +
+                     log(y-.A) - log( .B-y))
+      dl.dphi <- digamma(phi) - m1u*digamma(temp1) -
+                (1-m1u)*digamma(temp2) +
+                m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
+    }
+      c(w) * cbind(dl.dmu1 * dmu1.dmu * dmu.deta,
+                   dl.dphi * dphi.deta)
+  }), list( .lmu = lmu, .lphi = lphi,
+            .emu = emu, .ephi = ephi,
+            .A = A, .B = B,
+            .stdbeta = stdbeta ))),
+  weight = eval(substitute(expression({
+    d2l.dmu12 <- (trigamma(temp1) + trigamma(temp2)) * phi^2
+    d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
+                  trigamma(temp2) * (1-m1u)^2
+    d2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
+    wz <- matrix(as.numeric(NA), n, dimm(M))
+    wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
+    wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
+    wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
+    c(w) * wz
+  }), list( .A = A, .B = B ))))
 }
 
 
@@ -2282,12 +2290,12 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
     stop("bad input for argument 'i2'")
 
-  if (!is.Numeric(A, allowable.length = 1) ||
-     !is.Numeric(B, allowable.length = 1) ||
+  if (!is.Numeric(A, length.arg = 1) ||
+     !is.Numeric(B, length.arg = 1) ||
      A >= B)
     stop("A must be < B, and both must be of length one")
 
-  stdbeta <- (A == 0 && B == 1) # stdbeta == T iff standard beta distn
+  stdbeta <- (A == 0 && B == 1)  # stdbeta == T iff standard beta distn
 
 
 
@@ -2305,7 +2313,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             namesof("shape1", lshape1, earg = eshape1),  ", ",
             namesof("shape2", lshape2, earg = eshape2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints, apply.int  = TRUE)
     constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
@@ -2344,25 +2353,26 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
-                   eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+                    eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
     .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2)
-    misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2)
-    misc$limits <- c( .A, .B)
-  }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+    misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2 )
+    misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 )
+    misc$limits <- c( .A , .B )
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+            .A = A, .B = B,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL){
     shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
-                   eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+                    eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-      zedd <- (y - .A) / ( .B - .A)
+      zedd <- (y - .A ) / ( .B - .A )
       sum(c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
-                        shape2 = shapes[, 2],
+                                  shape2 = shapes[, 2],
                         log = TRUE) - log( abs( .B - .A ))))
     }
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
@@ -2370,31 +2380,32 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   vfamily = "beta.ab",
   deriv = eval(substitute(expression({
     shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
-                   eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+                    eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
 
     dshapes.deta <-
       cbind(dtheta.deta(shapes[, 1], .lshape1 , earg = .eshape1),
             dtheta.deta(shapes[, 2], .lshape2 , earg = .eshape2))
 
-    dl.dshapes <- cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
-                 digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
+    dl.dshapes <- cbind(log(y - .A ), log( .B - y)) -
+                  digamma(shapes) +
+                  digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A )
 
     c(w) * dl.dshapes * dshapes.deta
   }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   weight = expression({
-    temp2 <- trigamma(shapes[, 1]+shapes[, 2])
-    d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
-    d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
-    d2l.dshape1shape2 <- temp2
+    temp2 <- trigamma(shapes[, 1] + shapes[, 2])
+    ned2l.dshape12 <- trigamma(shapes[, 1]) - temp2 
+    ned2l.dshape22 <- trigamma(shapes[, 2]) - temp2 
+    ned2l.dshape1shape2 <- -temp2
 
-    wz <- matrix(as.numeric(NA), n, dimm(M))   #3=dimm(M)
-    wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
-    wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
-    wz[, iam(1, 2, M)] <- d2l.dshape1shape2 * dshapes.deta[, 1] *
-                                             dshapes.deta[, 2]
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
+    wz[, iam(1, 1, M)] <- ned2l.dshape12      * dshapes.deta[, 1]^2
+    wz[, iam(2, 2, M)] <- ned2l.dshape22      * dshapes.deta[, 2]^2
+    wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 * dshapes.deta[, 1] *
+                                                dshapes.deta[, 2]
 
-    -c(w) * wz
+    c(w) * wz
   }))
 }
 
@@ -2440,7 +2451,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
                          location = 0, expected = TRUE,
                          shrinkage.init = 0.95,
                          zero = NULL) {
-  if (!is.Numeric(location, allowable.length = 1))
+  if (!is.Numeric(location, length.arg = 1))
     stop("bad input for argument 'location'")
 
   if (!is.logical(expected) || length(expected) != 1)
@@ -2455,7 +2466,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
       shrinkage.init < 0 ||
       shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -2542,7 +2553,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -2640,7 +2651,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:M)
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
@@ -2676,8 +2687,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
  gamma2.ab <-
   function(lrate = "loge", lshape = "loge",
-           irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
-{
+           irate = NULL, ishape = NULL, expected = TRUE, zero = 2) {
 
   lrate <- as.list(substitute(lrate))
   erate <- link2list(lrate)
@@ -2796,12 +2806,12 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
  gamma2 <-
   function(lmu = "loge", lshape = "loge",
            imethod = 1,  ishape = NULL,
-           parallel = FALSE, apply.parint = FALSE,
-           deviance.arg = FALSE, zero = -2)
-{
+           parallel = FALSE,
+           deviance.arg = FALSE, zero = -2) {
 
 
 
+  apply.parint <- FALSE
 
   lmu <- as.list(substitute(lmu))
   emu <- link2list(lmu)
@@ -2817,7 +2827,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
   if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
     stop("bad input for argument 'ishape'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -2843,13 +2853,15 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
               "Variance: (mu^2)/shape"),
     constraints = eval(substitute(expression({
 
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
                            apply.int = .apply.parint )
 
         dotzero <- .zero
         Musual <- 2
         eval(negzero.expression)
-        constraints <- cm.zero.vgam(constraints, x, z_Index, M)
+        constraints <- cm.zero.vgam(constraints, x, z.Index, M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
 
@@ -2875,9 +2887,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
       assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
-             envir = VGAM:::VGAMenv)
+             envir = VGAMenv)
       if (any(function.name == c("cqo","cao")) &&
-         is.Numeric( .zero , allowable.length = 1) && .zero != -2)
+         is.Numeric( .zero , length.arg = 1) && .zero != -2)
         stop("argument zero = -2 is required")
 
       M <- Musual * ncol(y)
@@ -2906,11 +2918,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
         init.shape <- matrix(1.0, n, NOS)
         mymu <- y # + 0.167 * (y == 0)  # imethod == 1 (the default)
         if ( .imethod == 2) {
-                for(ii in 1:ncol(y)) {
+                for (ii in 1:ncol(y)) {
                     mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
                 }
         }
-        for(spp in 1:NOS) {
+        for (spp in 1:NOS) {
           junk <- lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
           var.y.est <- sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
           init.shape[, spp] <- if (length( .ishape )) .ishape else
@@ -2935,8 +2947,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
               .lmu , earg = .emu )
   }, list( .lmu = lmu, .emu = emu ))),
   last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
-        rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+        rm("CQO.FastAlgorithm", envir = VGAMenv)
 
     tmp34 <- c(rep( .lmu ,    length = NOS),
               rep( .lshape , length = NOS))
@@ -2948,7 +2960,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .emu
       misc$earg[[Musual*ii  ]] <- .eshape
     }
@@ -3007,7 +3019,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   weight = eval(substitute(expression({
     ned2l.dmu2 <- shape / (mymu^2)
     ned2l.dshape2 <- trigamma(shape) - 1 / shape
-    wz <- matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
+    wz <- matrix(as.numeric(NA), n, M)  # 2 = M; diagonal!
 
     wz[, Musual*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
     wz[, Musual*(1:NOS)  ] <- ned2l.dshape2 * dshape.deta^2
@@ -3055,7 +3067,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3145,7 +3157,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -3204,11 +3216,11 @@ dbetageom <- function(x, shape1, shape2, log = FALSE) {
   if (!is.Numeric(shape2, positive = TRUE))
     stop("bad input for argument 'shape2'")
   N <- max(length(x), length(shape1), length(shape2))
-  x <- rep(x, length.out = N);
-  shape1 <- rep(shape1, length.out = N);
+  x      <- rep(x,      length.out = N)
+  shape1 <- rep(shape1, length.out = N)
   shape2 <- rep(shape2, length.out = N)
 
-  loglik <- lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
+  loglik <- lbeta(1+shape1, shape2 + abs(x)) - lbeta(shape1, shape2)
   xok <- (x == round(x) & x >= 0)
   loglik[!xok] <- log(0)
   if (log.arg) {
@@ -3237,12 +3249,12 @@ pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
         temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar), 
                shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
         unq <- unique(qstar)
-        for(ii in unq) {
+        for (ii in unq) {
             index <- (qstar == ii)
             ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
         }
     } else
-    for(ii in 1:N) {
+    for (ii in 1:N) {
         qstar <- floor(q[ii])
         ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
                  shape1 = shape1[ii], shape2 = shape2[ii])) else 0
@@ -3268,13 +3280,15 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
- negbinomial <- function(lmu = "loge", lsize = "loge",
-                         imu = NULL,   isize = NULL,
-                         probs.y = 0.75,
-                         nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
-                         deviance.arg = FALSE, imethod = 1,
-                         parallel = FALSE,
-                         shrinkage.init = 0.95, zero = -2) {
+ negbinomial <-
+  function(lmu = "loge", lsize = "loge",
+           imu = NULL,   isize = NULL,
+           probs.y = 0.75,
+           nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
+           deviance.arg = FALSE, imethod = 1,
+           parallel = FALSE,
+           shrinkage.init = 0.95, zero = -2) {
+
 
 
 
@@ -3287,6 +3301,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
+  alternate.derivs = FALSE  # 20130823; added for 'nbcanlink'
+
 
   lmuuu <- as.list(substitute(lmu))
   emuuu <- link2list(lmuuu)
@@ -3303,24 +3319,24 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
     stop("bad input for argument 'isize'")
 
-  if (!is.Numeric(cutoff, allowable.length = 1) ||
+  if (!is.Numeric(cutoff, length.arg = 1) ||
     cutoff < 0.8 ||
     cutoff >= 1)
     stop("range error in the argument 'cutoff'")
-  if (!is.Numeric(Maxiter, integer.valued = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(Maxiter, integer.valued = TRUE, length.arg = 1) ||
     Maxiter < 100)
     stop("bad input for argument 'Maxiter'")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
     integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
     shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
 
   if (!is.null(nsimEIM)) {
-    if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+    if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 10)
       warning("argument 'nsimEIM' should be an integer ",
@@ -3356,7 +3372,9 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     if ( .parallel && ncol(cbind(y)) > 1)
       stop("univariate responses needed if 'parallel = TRUE'")
-    constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints)
   }), list( .parallel = parallel, .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -3379,10 +3397,10 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     assign("CQO.FastAlgorithm",
           ( .lmuuu == "loge") && ( .lsize == "loge"),
-           envir = VGAM:::VGAMenv)
+           envir = VGAMenv)
 
     if (any(function.name == c("cqo", "cao")) &&
-        is.Numeric( .zero , allowable.length = 1) &&
+        is.Numeric( .zero , length.arg = 1) &&
         .zero != -2)
         stop("argument zero = -2 is required")
 
@@ -3396,7 +3414,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
            "the number of responses")
 
     M <- Musual * ncol(y) 
-    NOS <- ncoly <- ncol(y) # Number of species
+    NOS <- ncoly <- ncol(y)  # Number of species
     predictors.names <-
      c(namesof(if (NOS == 1) "mu"   else paste("mu",   1:NOS, sep = ""),
                 .lmuuu, earg = .emuuu, tag = FALSE),
@@ -3414,7 +3432,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     if (!length(etastart)) {
       mu.init <- y
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         use.this <- if ( .imethod == 1) {
           weighted.mean(y[, iii], w[, iii]) + 1/16
         } else if ( .imethod == 3) {
@@ -3433,7 +3451,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
           mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
         }
-      } # of for(iii)
+      } # of for (iii)
 
       if ( is.Numeric( .k.init )) {
         kay.init <- matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
@@ -3445,7 +3463,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
         k.grid <- 2^((-7):7)
         k.grid <- 2^(seq(-8, 8, length = 40))
         kay.init <- matrix(0, nrow = n, ncol = NOS)
-        for(spp. in 1:NOS) {
+        for (spp. in 1:NOS) {
           kay.init[, spp.] <- getMaxMin(k.grid,
                                        objfun = negbinomial.Loglikfun,
                                        y = y[, spp.], x = x, w = w[, spp.],
@@ -3498,8 +3516,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
            .emuuu = emuuu, .esize = esize))),
 
   last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
-        rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+        rm("CQO.FastAlgorithm", envir = VGAMenv)
 
     temp0303 <- c(rep( .lmuuu, length = NOS),
                  rep( .lsize , length = NOS))
@@ -3511,7 +3529,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- newemu
       misc$earg[[Musual*ii  ]] <- .esize
     }
@@ -3612,33 +3630,52 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
     dl.dmu <- y / mu - (y + kmat) / (mu + kmat)
-    dl.dk <- digamma(y + kmat) - digamma(kmat) -
-            (y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
+    dl.dk  <- digamma(y + kmat) - digamma(kmat) -
+              (y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
 
     if ( .lmuuu == "nbcanlink")
       newemu$wrt.eta <- 1
-    dmu.deta <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta1
+    dmu.deta <- dtheta.deta(mu, .lmuuu , earg = newemu)  # eta1
 
     if ( .lmuuu == "nbcanlink")
       newemu$wrt.eta <- 2
-    dk.deta1 <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta2
+    dk.deta1 <- dtheta.deta(mu, .lmuuu , earg = newemu)  # eta2
 
     dk.deta2 <- dtheta.deta(kmat, .lsize , earg = .esize)
 
+
+
     myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
-                           dl.dk  * dk.deta2)
+                            dl.dk  * dk.deta2)
 
 
     if ( .lmuuu == "nbcanlink") {
-      myderiv[, 1:NOS] =
-      myderiv[, 1:NOS] + c(w) * dl.dk * dk.deta1
+      if ( iter%% 2 == 0) {
+        myderiv[, 1:NOS] <- dl.dk  * dk.deta1
+      } else {
+      }
     }
 
 
 
 
-    myderiv[, interleave.VGAM(M, M = Musual)]
+    if ( FALSE && .lmuuu == "nbcanlink") {  # 20130823 FALSE added
+      if ( iter%% 2 == 1)
+      myderiv[, 1:NOS] <-
+      myderiv[, 1:NOS] + c(w) * dl.dk * dk.deta1 * 1  # 20130823 Annul this
+    }
+
+
+    myderiv <- myderiv[, interleave.VGAM(M, M = Musual)]
+
+
+    if ( .alternate.derivs || ( .lmuuu == "nbcanlink")) {  # 20130823 added
+    }
+
+
+    myderiv
   }), list( .lmuuu = lmuuu, .lsize = lsize,
+            .alternate.derivs = alternate.derivs,
             .emuuu = emuuu, .esize = esize))),
 
   weight = eval(substitute(expression({
@@ -3646,11 +3683,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
     if (is.null( .nsimEIM )) {
-      fred2 <- dotFortran(name = "enbin9", ans = double(n*NOS),
+      fred2 <- .Fortran("enbin9", ans = double(n*NOS),
                   as.double(kmat), as.double(mu), as.double( .cutoff ),
                   as.integer(n), ok = as.integer(1), as.integer(NOS),
                   sumpdf = double(1), as.double( .Machine$double.eps ),
-                  as.integer( .Maxiter ))
+                  as.integer( .Maxiter ), PACKAGE = "VGAM")
       if (fred2$ok != 1)
         stop("error in Fortran subroutine exnbin9")
       dim(fred2$ans) <- c(n, NOS)
@@ -3663,7 +3700,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
       run.varcov <- matrix(0, n, NOS)
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rnbinom(n = n*NOS, mu = c(mu), size = c(kmat))
         if (NOS > 1) dim(ysim) = c(n, NOS)
         dl.dk <- digamma(ysim + kmat) - digamma(kmat) -
@@ -3686,18 +3723,37 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
+
+
     if ( .lmuuu == "nbcanlink") {
+      if ( iter%% 2 == 0) {
+        wz[, Musual*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
+      } else {
+      }
+    }
+
+
+
+
+
+    if ( FALSE && .lmuuu == "nbcanlink") {  # 20130823 FALSE added
+      if ( iter%% 2 == 1)
       wz[, Musual*(1:NOS)-1] <-
-      wz[, Musual*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2
+      wz[, Musual*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2 * 1  # 20130823
 
+      if (FALSE)
       wz <- cbind(wz,
-                 kronecker(ned2l.dk2 * dk.deta1 * dk.deta2,
-                           if (NOS > 1) cbind(1, 0) else 1))
+                  kronecker(ned2l.dk2 * dk.deta1 * dk.deta2,
+                            if (NOS > 1) cbind(1, 0) else 1))
     }
 
 
 
 
+
+
+
+
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
   }), list( .cutoff = cutoff,
             .Maxiter = Maxiter,
@@ -3764,16 +3820,16 @@ polya.control <- function(save.weight = TRUE, ...) {
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
     stop("bad input for argument 'isize'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
      stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
      stop("bad input for argument 'shrinkage.init'")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
     stop("bad input for argument 'nsimEIM'")
   if (nsimEIM <= 10)
@@ -3791,7 +3847,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
 
-  ans =
+  ans <-
   new("vglmff",
   blurb = c("Polya (negative-binomial) distribution\n\n",
             "Links:    ",
@@ -3856,7 +3912,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     if (!length(etastart)) {
       mu.init <- y
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         use.this <- if ( .imethod == 1) {
           weighted.mean(y[, iii], w[, iii]) + 1/16
         } else if ( .imethod == 3) {
@@ -3889,7 +3945,7 @@ polya.control <- function(save.weight = TRUE, ...) {
         k.grid <- 2^((-7):7)
         k.grid <- 2^(seq(-8, 8, length = 40))
         kayy.init <- matrix(0, nrow = n, ncol = NOS)
-        for(spp. in 1:NOS) {
+        for (spp. in 1:NOS) {
           kayy.init[, spp.] <- getMaxMin(k.grid,
                              objfun = negbinomial.Loglikfun,
                              y = y[, spp.], x = x, w = w,
@@ -3934,7 +3990,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .eprob
       misc$earg[[Musual*ii  ]] <- .esize
     }
@@ -3999,18 +4055,18 @@ polya.control <- function(save.weight = TRUE, ...) {
   }), list( .lprob = lprob, .lsize = lsize,
             .eprob = eprob, .esize = esize))),
   weight = eval(substitute(expression({
-    wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
 
     ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
     mumat <- as.matrix(mu)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       run.varcov <- 0
       kvec <- kmat[, spp.]
       pvec <- pmat[, spp.]
 
-      for(ii in 1:( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
         ysim <- rnbinom(n = n, prob = pvec, size = kvec)
 
         dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
@@ -4031,14 +4087,14 @@ polya.control <- function(save.weight = TRUE, ...) {
                   dThetas.detas[, Musual * (spp. - 1) + ind1$col]
 
 
-      for(jay in 1:Musual)
-          for(kay in jay:Musual) {
+      for (jay in 1:Musual)
+          for (kay in jay:Musual) {
               cptr <- iam((spp. - 1) * Musual + jay,
                          (spp. - 1) * Musual + kay,
                          M = M)
               wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
           }
-    } # End of for(spp.) loop
+    } # End of for (spp.) loop
 
 
 
@@ -4048,7 +4104,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (deviance.arg) ans at deviance = eval(substitute(
+  if (deviance.arg) ans at deviance <- eval(substitute(
       function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     Musual <- 2
     NOS <- ncol(eta) / Musual
@@ -4145,8 +4201,10 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
  studentt <-  function(ldf = "loglog", idf = NULL,
-                       tol1 = 0.1, imethod = 1)
-{
+                       tol1 = 0.1, imethod = 1) {
+
+
+
 
 
   ldof <- as.list(substitute(ldf))
@@ -4162,7 +4220,7 @@ polya.control <- function(save.weight = TRUE, ...) {
   if (!is.Numeric(tol1, positive  = TRUE))
     stop("argument 'tol1' should be positive")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
@@ -4307,7 +4365,7 @@ polya.control <- function(save.weight = TRUE, ...) {
   idof <- idf
  
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
@@ -4430,7 +4488,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", Musual * NOS)
     names(misc$earg) <- temp.names
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-2]] <- .eloc
       misc$earg[[Musual*ii-1]] <- .esca
       misc$earg[[Musual*ii  ]] <- .edof
@@ -4580,7 +4638,7 @@ polya.control <- function(save.weight = TRUE, ...) {
     if (!is.Numeric(doff, positive = TRUE))
     stop("argument 'df' must be positive")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
@@ -4594,7 +4652,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
   new("vglmff",
-  blurb = c("Student t-distribution\n\n",
+  blurb = c("Student t-distribution (2-parameter)\n\n",
             "Link:     ",
             namesof("location", lloc, earg = eloc), ", ",
             namesof("scale",    lsca, earg = esca), "\n",
@@ -4623,7 +4681,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
 
-    extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$Musual <- Musual
     M <- Musual * ncoly #
 
@@ -4678,7 +4736,7 @@ polya.control <- function(save.weight = TRUE, ...) {
     names(misc$link) <- temp.names
     misc$earg <- vector("list", Musual * NOS)
     names(misc$earg) <- temp.names
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .eloc
       misc$earg[[Musual*ii-0]] <- .esca
     }
@@ -4830,7 +4888,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -4879,8 +4937,8 @@ dsimplex <- function(x, mu = 0.5, dispersion = 1, log = FALSE) {
       (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
   logpdf <- (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
             1.5 * log1p(-x) - 0.5 * deeFun(x, mu) / sigma^2)
-  logpdf[x     <= 0.0] <- -Inf # log(0.0)
-  logpdf[x     >= 1.0] <- -Inf # log(0.0)
+  logpdf[x     <= 0.0] <- -Inf  # log(0.0)
+  logpdf[x     >= 1.0] <- -Inf  # log(0.0)
   logpdf[mu    <= 0.0] <- NaN
   logpdf[mu    >= 1.0] <- NaN
   logpdf[sigma <= 0.0] <- NaN
@@ -4891,7 +4949,7 @@ dsimplex <- function(x, mu = 0.5, dispersion = 1, log = FALSE) {
 rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
                stop("bad input for argument 'n'") else n
 
   oneval <- (length(mu) == 1 && length(dispersion) == 1)
@@ -4901,7 +4959,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   Kay1 <- 3 * (dispersion * mu * (1-mu))^2
 
   if (oneval) {
-    Kay1 <- Kay1[1] # Since oneval means there is only one unique value
+    Kay1 <- Kay1[1]  # Since oneval means there is only one unique value
     mymu <-   mu[1]
     myroots <- polyroot(c(-mymu^2, Kay1+2*mymu^2, -3*Kay1+1-2*mymu, 2*Kay1))
     myroots <- myroots[abs(Im(myroots)) < 0.00001]
@@ -4909,7 +4967,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     myroots <- myroots[myroots >= 0.0]
     myroots <- myroots[myroots <= 1.0]
     pdfmax <- dsimplex(myroots, mymu, dispersion[1])
-    pdfmax <- rep(max(pdfmax), length.out = use.n) # For multiple peaks
+    pdfmax <- rep(max(pdfmax), length.out = use.n)  # For multiple peaks
   } else {
     pdfmax <- numeric(use.n)
     for (ii in 1:use.n) {
@@ -4926,7 +4984,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   index <- 1:use.n
   nleft <- length(index)
   while (nleft > 0) {
-    xx <- runif(nleft) # , 0, 1
+    xx <- runif(nleft)  # , 0, 1
     yy <- runif(nleft, max = pdfmax[index])
     newindex <- (1:nleft)[yy < dsimplex(xx, mu[index], dispersion[index])]
     if (length(newindex)) {
@@ -4963,11 +5021,11 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   lsigma <- attr(esigma, "function.name")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
        imethod > 3)
       stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
       shrinkage.init < 0 ||
       shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -5090,12 +5148,12 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- rig <- function(lmu = "identity", llambda = "loge",
-                 imu = NULL, ilambda = 1) {
+ rigff <- function(lmu = "identity", llambda = "loge",
+                   imu = NULL, ilambda = 1) {
 
 
   if (!is.Numeric(ilambda, positive = TRUE))
-      stop("bad input for 'ilambda'")
+    stop("bad input for 'ilambda'")
 
 
   lmu <- as.list(substitute(lmu))
@@ -5146,7 +5204,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   }, list( .lmu = lmu,
            .emu = emu, .elambda = elambda ))),
   last = eval(substitute(expression({
-    misc$d3 <- d3    # because save.weights = FALSE
+    misc$d3 <- d3  # because save.weights = FALSE
     misc$link <-    c(mu = .lmu , lambda = .llambda )
     misc$earg <- list(mu = .emu , lambda = .elambda )
     misc$pooled.weight <- pooled.weight
@@ -5157,54 +5215,57 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
       lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
       if (residuals) stop("loglikelihood residuals not ",
                           "implemented yet") else
-      sum(c(w) * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
+      sum(c(w) * (-0.5 * log(y) + 0.5 * log(lambda) -
+                  (0.5 * lambda/y) * (y - mu)^2))
   }, list( .llambda = llambda,
-           .emu = emu, .elambda = elambda ))),
-  vfamily = c("rig"),
+           .elambda = elambda,
+           .emu = emu ))),
+  vfamily = c("rigff"),
   deriv = eval(substitute(expression({
-      if (iter == 1) {
-          d3 <- deriv3(~ w * 
-               (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
-                      c("mu", "lambda"), hessian= TRUE)
-      }
+    if (iter == 1) {
+      d3 <- deriv3( ~ w *
+           (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
+                  c("mu", "lambda"), hessian = TRUE)
+    }
 
-      lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
+    lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
 
-      eval.d3 <- eval(d3)
-      dl.dthetas <-  attr(eval.d3, "gradient")
+    eval.d3 <- eval(d3)
+    dl.dthetas <-  attr(eval.d3, "gradient")
 
-      dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
-      dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
-      dtheta.detas <- cbind(dmu.deta, dlambda.deta)
+    dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+    dtheta.detas <- cbind(dmu.deta, dlambda.deta)
 
-      dl.dthetas * dtheta.detas
+    dl.dthetas * dtheta.detas
   }), list( .lmu = lmu, .llambda = llambda,
             .emu = emu, .elambda = elambda ))),
   weight = eval(substitute(expression({
-      d2l.dthetas2 =  attr(eval.d3, "hessian")
-
-      wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
-      wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
-      wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
-      wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
-                                               dtheta.detas[, 2]
-      if (!.expected) {
-          d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
-          d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
-          wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
-          wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
-      }
+    d2l.dthetas2 <- attr(eval.d3, "hessian")
 
-      if (intercept.only) {
-          sumw <- sum(w)
-          for(ii in 1:ncol(wz))
-              wz[, ii] <- sum(wz[, ii]) / sumw
-          pooled.weight <- TRUE
-          wz <- c(w) * wz   # Put back the weights
-      } else
-          pooled.weight <- FALSE
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
+    wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
+    wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
+    wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
+                                             dtheta.detas[, 2]
+    if (! .expected ) {
+      d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
+      d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
+      wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
+      wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
+    }
 
-      wz
+    if (intercept.only) {
+      sumw <- sum(w)
+      for (ii in 1:ncol(wz))
+        wz[, ii] <- sum(wz[, ii]) / sumw
+      pooled.weight <- TRUE
+      wz <- c(w) * wz   # Put back the weights
+    } else {
+      pooled.weight <- FALSE
+    }
+
+    wz
   }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
             .emu = emu, .elambda = elambda ))))
 }
@@ -5460,7 +5521,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     d2l.dthetas2[, 2, 2] <- c(w) * (-0.25*trigamma((lambda+1)/2) +
                                  0.25*trigamma(1+lambda/2))
 
-    wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
     wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
     wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
     wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
@@ -5474,7 +5535,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
     if (intercept.only) {
         sumw <- sum(w)
-        for(ii in 1:ncol(wz))
+        for (ii in 1:ncol(wz))
           wz[, ii] <- sum(wz[, ii]) / sumw
         pooled.weight <- TRUE
         wz <- c(w) * wz # Put back the weights
@@ -5643,7 +5704,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     stop("bad input for argument 'zero'")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -5771,7 +5832,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
         if (intercept.only) {
           sumw <- sum(w)
-          for(ii in 1:ncol(wz))
+          for (ii in 1:ncol(wz))
             wz[, ii] <- sum(wz[, ii]) / sumw
           pooled.weight <- TRUE
           wz <- c(w) * wz   # Put back the weights
@@ -6048,9 +6109,9 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
 
 
 
- prentice74 <- function(
-        llocation = "identity", lscale = "loge", lshape = "identity",
-        ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
+ prentice74 <-
+  function(llocation = "identity", lscale = "loge", lshape = "identity",
+           ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -6459,15 +6520,15 @@ plog  <- function(q, prob, log.p = FALSE) {
         bigans <- 1 + bigans / log1p(-ppp)
     }
 
-    floorq <- pmax(1, floor(q)) # Ensures at least one element per q value
+    floorq <- pmax(1, floor(q))  # Ensures at least one element per q value
     floorq[owen1965] <- 1
     seqq <- sequence(floorq)
     seqp <- rep(prob, floorq)
     onevector <- (seqp^seqq / seqq) / (-log1p(-seqp))
-    rlist <-  dotC(name = "tyee_C_cum8sum",
+    rlist <-  .C("tyee_C_cum8sum",
                   as.double(onevector), answer = double(N),
                   as.integer(N), as.double(seqq),
-                  as.integer(length(onevector)), notok=integer(1))
+                  as.integer(length(onevector)), notok=integer(1), PACKAGE = "VGAM")
     if (rlist$notok != 0) stop("error in 'cum8sum'")
     ans <- if (log.p) log(rlist$answer) else rlist$answer
     if (specialCase)
@@ -6486,13 +6547,13 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
                stop("bad input for argument 'n'") else n
 
-  if (!is.Numeric(prob, allowable.length = 1, positive = TRUE) ||
+  if (!is.Numeric(prob, length.arg = 1, positive = TRUE) ||
       max(prob) >= 1)
     stop("bad input for argument 'prob'")
-  if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(Smallno, positive = TRUE, length.arg = 1) ||
       Smallno > 0.01 ||
      Smallno < 2 * .Machine$double.eps)
     stop("bad input for argument 'Smallno'")
@@ -6501,7 +6562,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
   ptr1 <- 1; ptr2 <- 0
   a <- -1 / log1p(-prob)
   mean <- a*prob/(1-prob)    # E(Y)
-  sigma <- sqrt(a * prob * (1 - a * prob)) / (1 - prob) # sd(Y)
+  sigma <- sqrt(a * prob * (1 - a * prob)) / (1 - prob)  # sd(Y)
   ymax <- dlog(x = 1, prob)
   while(ptr2 < use.n) {
     Lower <- 0.5 # A continuity correction is used = 1 - 0.5.
@@ -6531,7 +6592,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
  logff <- function(link = "logit", init.c = NULL, zero = NULL) {
   if (length(init.c) &&
      (!is.Numeric(init.c, positive = TRUE) || max(init.c) >= 1))
-      stop("init.c must be in (0,1)")
+    stop("init.c must be in (0,1)")
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
@@ -6596,7 +6657,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
                        n, M, byrow = TRUE)
 
       if (!length( .init.c ))
-        for(ilocal in 1:ncoly) {
+        for (ilocal in 1:ncoly) {
           prob.grid <- seq(0.05, 0.95, by = 0.05)
           Init.c[, ilocal] <- getMaxMin(prob.grid,
                                         objfun = logff.Loglikfun,
@@ -6620,7 +6681,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .earg
     }
 
@@ -6662,7 +6723,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
 
 
-  delta.known = is.Numeric(delta, allowable.length = 1)
+  delta.known = is.Numeric(delta, length.arg = 1)
 
   link.gamma <- as.list(substitute(link.gamma))
   earg <- link2list(link.gamma)
@@ -6710,7 +6771,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
                          diff(range(y,na.rm = TRUE))
                    }
       gamma.init <- if (length( .igamma)) .igamma else
-                   median(y - delta.init) # = 1/median(1/(y-delta.init))
+                   median(y - delta.init)  # = 1/median(1/(y-delta.init))
       gamma.init <- rep(gamma.init, length = length(y))
       etastart <-
         cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
@@ -6769,7 +6830,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
             .delta.known = delta.known,
             .delta = delta ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # M = if (delta is known) 1 else 2
     wz[, iam(1, 1, M)] <- 1 * dgamma.deta^2 
     if (! .delta.known) {
       wz[, iam(1, 2, M)] <-  3 * dgamma.deta
@@ -6793,15 +6854,15 @@ dlino <- function(x, shape1, shape2, lambda = 1, log = FALSE) {
   rm(log)
 
   loglik <-  dbeta(x = x, shape1 = shape1, shape2 = shape2, log = TRUE) +
-            shape1 * log(lambda) -
-            (shape1+shape2) * log1p(-(1-lambda)*x)
+             shape1 * log(lambda) -
+            (shape1+shape2) * log1p(-(1-lambda) * x)
   if (log.arg) loglik else exp(loglik)
 }
 
 
 plino <- function(q, shape1, shape2, lambda = 1) {
-  ans <- pbeta(q = lambda * q / (1 - (1-lambda)*q),
-              shape1 = shape1, shape2 = shape2)
+  ans <- pbeta(q = lambda * q / (1 - (1-lambda) * q),
+               shape1 = shape1, shape2 = shape2)
   ans[lambda <= 0] <- NaN
   ans
 }
@@ -6832,9 +6893,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'zero'")
+    stop("bad input for argument 'zero'")
   if (!is.Numeric(ilambda, positive = TRUE))
-      stop("bad input for argument 'ilambda'")
+    stop("bad input for argument 'ilambda'")
 
 
 
@@ -6859,7 +6920,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
             namesof("lambda", llambda, earg = elambda), "\n", 
             "Mean:     something complicated"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= 0 || max(y) >= 1)
@@ -6881,40 +6942,40 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
 
 
 
-      if (!length(etastart)) {
-        lambda.init <- rep(if (length( .ilambda )) .ilambda else 1,
-                          length = n)
-        sh1.init <- if (length( .ishape1 ))
-                     rep( .ishape1, length = n) else NULL
-        sh2.init <- if (length( .ishape2 ))
-                     rep( .ishape2, length = n) else NULL
-            txY.init <- lambda.init * y / (1+lambda.init*y - y)
-            mean1 <- mean(txY.init)
-            mean2 <- mean(1/txY.init)
-            if (!is.Numeric(sh1.init))
-                sh1.init <- rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
-            if (!is.Numeric(sh2.init))
-                sh2.init <- rep(sh1.init * (1-mean1) / mean1, length = n)
-            etastart <-
-              cbind(theta2eta(sh1.init, .lshape1 , earg = .eshape1),
-                    theta2eta(sh2.init, .lshape2 , earg = .eshape2),
-                    theta2eta(lambda.init, .llambda , earg = .elambda ))
-        }
+    if (!length(etastart)) {
+      lambda.init <- rep(if (length( .ilambda )) .ilambda else 1,
+                        length = n)
+      sh1.init <- if (length( .ishape1 ))
+                  rep( .ishape1, length = n) else NULL
+      sh2.init <- if (length( .ishape2 ))
+                  rep( .ishape2, length = n) else NULL
+      txY.init <- lambda.init * y / (1+lambda.init*y - y)
+      mean1 <- mean(txY.init)
+      mean2 <- mean(1/txY.init)
+      if (!is.Numeric(sh1.init))
+        sh1.init <- rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
+      if (!is.Numeric(sh2.init))
+        sh2.init <- rep(sh1.init * (1-mean1) / mean1, length = n)
+      etastart <-
+        cbind(theta2eta(sh1.init, .lshape1 , earg = .eshape1),
+              theta2eta(sh2.init, .lshape2 , earg = .eshape2),
+              theta2eta(lambda.init, .llambda , earg = .elambda ))
+    }
   }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
             .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
-            .ishape1=ishape1, .ishape2=ishape2, .ilambda = ilambda ))),
+            .ishape1 = ishape1, .ishape2 = ishape2, .ilambda = ilambda ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
-    sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
     lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
     rep(as.numeric(NA), length = nrow(eta))
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
            .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
   last = eval(substitute(expression({
     misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2 ,
-                     lambda = .llambda )
+                      lambda = .llambda )
     misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 ,
-                     lambda = .elambda )
+                      lambda = .elambda )
   }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
             .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
   loglikelihood = eval(substitute(
@@ -7027,8 +7088,8 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
   }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))), 
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      shapes <- eta2theta(eta, .link , earg = .earg )
-    ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2]-1), NA)
+    shapes <- eta2theta(eta, .link , earg = .earg )
+    ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2] - 1), NA)
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <- c(shape1 = .link , shape2 = .link)
@@ -7039,9 +7100,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
     shapes <- eta2theta(eta, .link , earg = .earg )
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
-        sum(c(w) *((shapes[, 1]-1) * log(y) -
-                 lbeta(shapes[, 1], shapes[, 2]) -
-                (shapes[, 2]+shapes[, 1]) * log1p(y)))
+      sum(c(w) * ((shapes[, 1]-1) * log(y) -
+                  lbeta(shapes[, 1], shapes[, 2]) -
+                 (shapes[, 2]+shapes[, 1]) * log1p(y)))
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = "betaprime",
@@ -7060,11 +7121,11 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
     d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
     d2l.dshape1shape2 <- temp2
 
-    wz <- matrix(as.numeric(NA), n, dimm(M))   #3=dimm(M)
+    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
     wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
     wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
     wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
-                      dshapes.deta[, 1] * dshapes.deta[, 2]
+                          dshapes.deta[, 1] * dshapes.deta[, 2]
 
     -c(w) * wz
   }))
@@ -7081,20 +7142,23 @@ dmaxwell <- function(x, a, log = FALSE) {
   rm(log)
 
   L <- max(length(x), length(a))
-  x <- rep(x, length.out = L); a = rep(a, length.out = L);
+  x <- rep(x, length.out = L)
+  a <- rep(a, length.out = L)
   logdensity <- rep(log(0), length.out = L)
-  xok <- (x > 0)
+  xok <- (x >= 0)
   logdensity[xok] <- 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
                      2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
   logdensity[a <= 0] <- NaN
+  logdensity[x == Inf] <- log(0)
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
+
 pmaxwell <- function(q, a) {
   L <- max(length(q), length(a))
-  q <- rep(q, length.out = L);
-  a <- rep(a, length.out = L); 
+  q <- rep(q, length.out = L)
+  a <- rep(a, length.out = L) 
   ans <- ifelse(q > 0,
                 erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi),
                 0)
@@ -7103,31 +7167,25 @@ pmaxwell <- function(q, a) {
 }
 
 
-rmaxwell <- function(n, a) {
+qmaxwell <- function(p, a) {
 
-  sqrt(2 * rgamma(n = n, 1.5) / a)
+  sqrt(2 * qgamma(p = p, 1.5) / a)
 }
 
 
-qmaxwell <- function(p, a) {
-  if (!is.Numeric(p, positive = TRUE) || any(p >= 1)) 
-    stop("bad input for argument 'p'")
-  if (any(a <= 0))
-    stop("argument 'a' must be positive")
+rmaxwell <- function(n, a) {
 
-  N <- max(length(p), length(a));
-  p <- rep(p, length.out = N);
-  a <- rep(a, length.out = N)
-  sqrt(2 * qgamma(p = p, 1.5) / a)
+  sqrt(2 * rgamma(n = n, 1.5) / a)
 }
 
 
 
 
+
  maxwell <- function(link = "loge", zero = NULL) {
 
 
-  link <- as.list(substitute(link)) # orig
+  link <- as.list(substitute(link))  # orig
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
@@ -7197,7 +7255,7 @@ qmaxwell <- function(p, a) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ilocal in 1:ncoly) {
+    for (ilocal in 1:ncoly) {
       misc$earg[[ilocal]] <- .earg
     }
 
@@ -7249,12 +7307,13 @@ dnaka <- function(x, shape, scale = 1, log = FALSE) {
     L <- max(length(x), length(shape), length(scale))
     x     <- rep(x,     length.out = L)
     shape <- rep(shape, length.out = L)
-    scale <- rep(scale, length.out = L);
+    scale <- rep(scale, length.out = L)
 
     logdensity <- rep(log(0), length.out = L)
     xok <- (x > 0)
     logdensity[xok] <- dgamma(x = x[xok]^2, shape = shape[xok],
-                             scale = scale[xok]/shape[xok], log = TRUE) +
+                              scale = scale[xok] / shape[xok],
+                              log = TRUE) +
                       log(2) + log(x[xok])
     if (log.arg) logdensity else exp(logdensity)
 }
@@ -7262,15 +7321,17 @@ dnaka <- function(x, shape, scale = 1, log = FALSE) {
 
 pnaka <- function(q, shape, scale = 1) {
     if (!is.Numeric(q))
-        stop("bad input for argument 'q'")
+      stop("bad input for argument 'q'")
     if (!is.Numeric(shape, positive = TRUE))
-        stop("bad input for argument 'shape'")
+      stop("bad input for argument 'shape'")
     if (!is.Numeric(scale, positive = TRUE))
-        stop("bad input for argument 'scale'")
+      stop("bad input for argument 'scale'")
+
     L <- max(length(q), length(shape), length(scale))
     q     <- rep(q,     length.out = L)
     shape <- rep(shape, length.out = L)
-    scale <- rep(scale, length.out = L);
+    scale <- rep(scale, length.out = L)
+
     ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
 }
 
@@ -7284,19 +7345,20 @@ qnaka <- function(p, shape, scale = 1, ...) {
     stop("bad input for argument 'scale'")
 
   L <- max(length(p), length(shape), length(scale))
-  p <- rep(p, length.out = L); shape = rep(shape, length.out = L);
-  scale <- rep(scale, length.out = L);
+  p     <- rep(p,     length.out = L)
+  shape <- rep(shape, length.out = L)
+  scale <- rep(scale, length.out = L)
   ans <- rep(0.0, length.out = L)
 
   myfun <- function(x, shape, scale = 1, p)
     pnaka(q = x, shape = shape, scale = scale) - p
-  for(ii in 1:L) {
+  for (ii in 1:L) {
     EY <- sqrt(scale[ii]/shape[ii]) *
           gamma(shape[ii] + 0.5) / gamma(shape[ii])
     Upper <- 5 * EY
     while(pnaka(q = Upper, shape = shape[ii],
                            scale = scale[ii]) < p[ii])
-        Upper <- Upper + scale[ii]
+      Upper <- Upper + scale[ii]
     ans[ii] <- uniroot(f = myfun, lower = 0, upper = Upper,
                        shape = shape[ii], scale = scale[ii],
                        p = p[ii], ...)$root
@@ -7309,20 +7371,21 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
-  if (!is.Numeric(scale, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(scale, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'scale'")
-  if (!is.Numeric(shape, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(shape, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'shape'")
-  if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(Smallno, positive = TRUE, length.arg = 1) ||
       Smallno > 0.01 ||
       Smallno < 2 * .Machine$double.eps)
     stop("bad input for argument 'Smallno'")
   ans <- rep(0.0, length.out = use.n)
 
-  ptr1 <- 1; ptr2 <- 0
+  ptr1 <- 1
+  ptr2 <- 0
   ymax <- dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
                shape = shape, scale = scale)
   while(ptr2 < use.n) {
@@ -7332,7 +7395,7 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
       Upper <- Upper + scale
     x <- runif(2*use.n, min = 0, max = Upper)
     index <- runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
-                                               scale = scale)
+                                                scale = scale)
     sindex <- sum(index)
     if (sindex) {
       ptr2 <- min(use.n, ptr1 + sindex - 1)
@@ -7390,15 +7453,15 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
 
 
     if (!length(etastart)) {
-        init2 <- if (is.Numeric( .iscale, positive = TRUE))
-                    rep( .iscale, length.out = n) else
-                    rep(1, length.out = n)
-        init1 <- if (is.Numeric( .ishape, positive = TRUE))
-                    rep( .ishape, length.out = n) else
-                rep(init2 / (y+1/8)^2, length.out = n)
-        etastart <-
-          cbind(theta2eta(init1, .lshape , earg = .eshape ),
-                theta2eta(init2, .lscale , earg = .escale ))
+      init2 <- if (is.Numeric( .iscale, positive = TRUE))
+                  rep( .iscale, length.out = n) else
+                  rep(1, length.out = n)
+      init1 <- if (is.Numeric( .ishape, positive = TRUE))
+                  rep( .ishape, length.out = n) else
+              rep(init2 / (y+1/8)^2, length.out = n)
+      etastart <-
+        cbind(theta2eta(init1, .lshape , earg = .eshape ),
+              theta2eta(init2, .lscale , earg = .escale ))
     }
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape,
@@ -7455,12 +7518,14 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
     stop("bad input for argument 'log'")
   rm(log)
 
-  L <- max(length(x), length(scale))
-  x <- rep(x, length.out = L); scale = rep(scale, length.out = L);
+  L     <- max(length(x), length(scale))
+  x     <- rep(x,     length.out = L)
+  scale <- rep(scale, length.out = L)
+
   logdensity <- rep(log(0), length.out = L)
   xok <- (x > 0)
   logdensity[xok] <- log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
-                    2 * log(scale[xok])
+                     2 * log(scale[xok])
   if (log.arg) logdensity else exp(logdensity)
 }
 
@@ -7468,9 +7533,12 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
 prayleigh <- function(q, scale = 1) {
   if (any(scale <= 0))
     stop("argument 'scale' must be positive")
-  L <- max(length(q), length(scale)) 
-  q <- rep(q, length.out = L); scale = rep(scale, length.out = L);
-  ifelse(q > 0,  -expm1(-0.5*(q/scale)^2), 0)
+
+  L     <- max(length(q), length(scale)) 
+  q     <- rep(q,     length.out = L)
+  scale <- rep(scale, length.out = L)
+
+  ifelse(q > 0, -expm1(-0.5 * (q / scale)^2), 0)
 }
 
 
@@ -7499,7 +7567,7 @@ rrayleigh <- function(n, scale = 1) {
   lscale <- attr(escale, "function.name")
 
 
-  if (!is.Numeric(nrfs, allowable.length = 1) ||
+  if (!is.Numeric(nrfs, length.arg = 1) ||
       nrfs < 0 ||
       nrfs > 1)
     stop("bad input for 'nrfs'")
@@ -7576,7 +7644,7 @@ rrayleigh <- function(n, scale = 1) {
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[ii]] <- .escale
     }
 
@@ -7632,27 +7700,28 @@ rrayleigh <- function(n, scale = 1) {
 
 dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
                       shape = 1, log = FALSE) {
-    if (!is.logical(log.arg <- log) || length(log) != 1)
-      stop("bad input for argument 'log'")
-    rm(log)
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
 
-    N <- max(length(x), length(location), length(scale),
-            length(inequality), length(shape))
-    x <- rep(x, length.out = N);
-    location <- rep(location, length.out = N)
-    scale <- rep(scale, length.out = N);
-    inequality <- rep(inequality, length.out = N)
-    shape <- rep(shape, length.out = N)
-
-    logdensity <- rep(log(0), length.out = N)
-    xok <- (x > location)
-    zedd <- (x - location) / scale
-    logdensity[xok] <- log(shape[xok]) -
-                      log(scale[xok]) -  log(inequality[xok]) +
-                      (1/inequality[xok]-1) * log(zedd[xok]) - 
-                      (shape[xok]+1) *
+  N <- max(length(x), length(location), length(scale),
+          length(inequality), length(shape))
+  if (length(x)          != N) x          <- rep(x,          length.out = N)
+  if (length(location)   != N) location   <- rep(location,   length.out = N)
+  if (length(inequality) != N) inequality <- rep(inequality, length.out = N)
+  if (length(shape)      != N) shape      <- rep(shape,      length.out = N)
+  if (length(scale)      != N) scale      <- rep(scale,      length.out = N)
+
+
+  logdensity <- rep(log(0), length.out = N)
+  xok <- (x > location)
+  zedd <- (x - location) / scale
+  logdensity[xok] <- log(shape[xok]) -
+                    log(scale[xok]) -  log(inequality[xok]) +
+                    (1/inequality[xok]-1) * log(zedd[xok]) - 
+                    (shape[xok]+1) *
                       log1p(zedd[xok]^(1/inequality[xok]))
-    if (log.arg) logdensity else exp(logdensity)
+  if (log.arg) logdensity else exp(logdensity)
 }
 
 
@@ -7669,11 +7738,12 @@ pparetoIV <-
 
   N <- max(length(q), length(location), length(scale),
           length(inequality), length(shape))
-  q <- rep(q, length.out = N);
-  location <- rep(location, length.out = N)
-  scale <- rep(scale, length.out = N);
-  inequality <- rep(inequality, length.out = N)
-  shape <- rep(shape, length.out = N)
+  if (length(q)          != N) q          <- rep(q,          length.out = N)
+  if (length(location)   != N) location   <- rep(location,   length.out = N)
+  if (length(inequality) != N) inequality <- rep(inequality, length.out = N)
+  if (length(shape)      != N) shape      <- rep(shape,      length.out = N)
+  if (length(scale)      != N) scale      <- rep(scale,      length.out = N)
+
   answer <- q * 0
   ii <- q > location
   zedd <- (q[ii] - location[ii]) / scale[ii]
@@ -7697,7 +7767,7 @@ qparetoIV <-
 }
 
 
-rparetoIV =
+rparetoIV <-
   function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
   if (!is.Numeric(inequality, positive = TRUE)) 
     stop("bad input for argument 'inequality'")
@@ -7745,9 +7815,9 @@ rparetoII <- function(n, location = 0, scale = 1, shape = 1)
             inequality = 1, shape = shape)
 
 
-dparetoI <- function(x, scale = 1, shape = 1)
+dparetoI <- function(x, scale = 1, shape = 1, log = FALSE)
   dparetoIV(x = x, location = scale, scale = scale, inequality = 1,
-            shape = shape)
+            shape = shape, log = log)
 
 pparetoI <- function(q, scale = 1, shape = 1)
   pparetoIV(q = q, location = scale, scale = scale, inequality = 1,
@@ -7778,14 +7848,14 @@ rparetoI <- function(n, scale = 1, shape = 1)
     stop("argument 'iinequality' must be positive")
   if (is.Numeric(ishape) && any(ishape <= 0))
     stop("argument 'ishape' must be positive")
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE) ||
       imethod > 2)
     stop("bad input for argument 'imethod'")
 
-  if (linequality == "nloge" && location != 0)
+  if (linequality == "negloge" && location != 0)
       warning("The Burr distribution has 'location = 0' and ",
-              "'linequality = nloge'")
+              "'linequality = negloge'")
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -8068,7 +8138,7 @@ rparetoI <- function(n, scale = 1, shape = 1)
     weight = eval(substitute(expression({
         d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
         d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
-        wz <- matrix(0, n, M) # It is diagonal
+        wz <- matrix(0, n, M)  # It is diagonal
         wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
         wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
         c(w) * wz
@@ -8143,12 +8213,11 @@ rparetoI <- function(n, scale = 1, shape = 1)
                     shape.init <- max(-1/fittemp$coef["X"], 0.01)
                 if (!length(scale.init))
                     scale.init <- exp(fittemp$coef["Intercept"])
-            }
-            etastart=cbind(
-            theta2eta(rep(scale.init, length.out = n),
-                      .lscale , earg = .escale ),
-            theta2eta(rep(shape.init, length.out = n),
-                      .lshape , earg = .eshape ))
+          }
+          etastart <- cbind(theta2eta(rep(scale.init, length.out = n),
+                                      .lscale , earg = .escale ),
+                            theta2eta(rep(shape.init, length.out = n),
+                                      .lshape , earg = .eshape ))
         }
     }), list( .location = location, .lscale = lscale,
               .escale = escale, .eshape = eshape, 
@@ -8265,9 +8334,9 @@ rpareto <- function(n, location, shape) {
 
 
 
- pareto1 <- function(lshape = "loge", location = NULL) {
+ paretoff <- function(lshape = "loge", location = NULL) {
   if (is.Numeric(location) && location <= 0)
-      stop("argument 'location' must be positive")
+    stop("argument 'location' must be positive")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -8281,7 +8350,8 @@ rpareto <- function(n, location, shape) {
   blurb = c("Pareto distribution ",
             "f(y) = shape * location^shape / y^(shape+1),",
             " 0<location<y, shape>0\n",
-            "Link:    ", namesof("shape", lshape, earg = earg), "\n", "\n",
+            "Link:    ", namesof("shape", lshape, earg = earg),
+            "\n", "\n",
             "Mean:    location*shape/(shape-1) for shape>1"),
   initialize = eval(substitute(expression({
 
@@ -8294,9 +8364,9 @@ rpareto <- function(n, location, shape) {
       namesof("shape", .lshape , earg = .earg , tag = FALSE)
 
 
-    locationhat <- if (!length( .location)) {
+    locationhat <- if (!length( .location )) {
       locationEstimated <- TRUE
-      min(y) # - .smallno
+      min(y)  # - .smallno
     } else {
       locationEstimated <- FALSE
       .location
@@ -8308,8 +8378,8 @@ rpareto <- function(n, location, shape) {
     extra$locationEstimated <- locationEstimated
 
     if (!length(etastart)) {
-        k.init <- (y + 1/8) / (y - locationhat + 1/8)
-        etastart <- theta2eta(k.init, .lshape , earg = .earg )
+      k.init <- (y + 1/8) / (y - locationhat + 1/8)
+      etastart <- theta2eta(k.init, .lshape , earg = .earg )
     }
   }), list( .lshape = lshape, .earg = earg,
             .location = location ))),
@@ -8333,10 +8403,10 @@ rpareto <- function(n, location, shape) {
                         "not implemented yet") else {
 
 
-      sum(c(w) * (log(k) + k * log(location) - (k+1) * log(y )))
+      sum(c(w) * (log(k) + k * log(location) - (k+1) * log(y)))
     }
   }, list( .lshape = lshape, .earg = earg ))),
-  vfamily = c("pareto1"),
+  vfamily = c("paretoff"),
   deriv = eval(substitute(expression({
     location <- extra$location
     k <- eta2theta(eta, .lshape , earg = .earg )
@@ -8355,7 +8425,7 @@ rpareto <- function(n, location, shape) {
 
 
 
-dtpareto <- function(x, lower, upper, shape, log = FALSE) {
+dtruncpareto <- function(x, lower, upper, shape, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -8371,10 +8441,10 @@ dtpareto <- function(x, lower, upper, shape, log = FALSE) {
     stop("argument 'shape' must be positive")
 
   L <- max(length(x), length(lower), length(upper), length(shape))
-  x <- rep(x, length.out = L);
-  shape <- rep(shape, length.out = L)
-  lower <- rep(lower, length.out = L);
-  upper <- rep(upper, length.out = L);
+  if (length(x)     != L) x     <- rep(x,     length.out = L)
+  if (length(shape) != L) shape <- rep(shape, length.out = L)
+  if (length(lower) != L) lower <- rep(lower, length.out = L)
+  if (length(upper) != L) upper <- rep(upper, length.out = L)
 
 
   logdensity <- rep(log(0), length.out = L)
@@ -8391,15 +8461,15 @@ dtpareto <- function(x, lower, upper, shape, log = FALSE) {
 }
 
 
-ptpareto <- function(q, lower, upper, shape) {
+ptruncpareto <- function(q, lower, upper, shape) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
 
   L <- max(length(q), length(lower), length(upper), length(shape)) 
-  q <- rep(q, length.out = L);
-  lower <- rep(lower, length.out = L);
-  upper <- rep(upper, length.out = L);
-  shape <- rep(shape, length.out = L)
+  if (length(q)     != L) q     <- rep(q,     length.out = L)
+  if (length(shape) != L) shape <- rep(shape, length.out = L)
+  if (length(lower) != L) lower <- rep(lower, length.out = L)
+  if (length(upper) != L) upper <- rep(upper, length.out = L)
 
   ans <- q * 0
   xok <- (0 < lower) & (lower < q) & (q < upper) & (shape > 0)
@@ -8416,13 +8486,13 @@ ptpareto <- function(q, lower, upper, shape) {
 }
 
 
-qtpareto <- function(p, lower, upper, shape) {
+qtruncpareto <- function(p, lower, upper, shape) {
   if (!is.Numeric(p, positive = TRUE))
     stop("bad input for argument 'p'")
   if (max(p) >= 1)
     stop("argument 'p' must be in (0, 1)")
 
-  ans <- lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
+  ans <- lower / (1 - p * (1 - (lower/upper)^shape))^(1/shape)
   ans[lower <= 0] <- NaN
   ans[upper <= 0] <- NaN
   ans[shape <= 0] <- NaN
@@ -8431,9 +8501,10 @@ qtpareto <- function(p, lower, upper, shape) {
 }
 
 
-rtpareto <- function(n, lower, upper, shape) {
+rtruncpareto <- function(n, lower, upper, shape) {
 
-  ans <- qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
+  ans <- qtruncpareto(p = runif(n), lower = lower,
+                      upper = upper, shape = shape)
   ans[lower <= 0] <- NaN
   ans[upper <= 0] <- NaN
   ans[shape <= 0] <- NaN
@@ -8443,12 +8514,12 @@ rtpareto <- function(n, lower, upper, shape) {
 
 
 
- tpareto1 <- function(lower, upper, lshape = "loge",
-                      ishape = NULL, imethod = 1) {
+ truncpareto <- function(lower, upper, lshape = "loge",
+                         ishape = NULL, imethod = 1) {
 
-  if (!is.Numeric(lower, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(lower, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'lower'")
-  if (!is.Numeric(upper, positive = TRUE, allowable.length = 1))
+  if (!is.Numeric(upper, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'upper'")
   if (lower >= upper)
     stop("lower < upper is required")
@@ -8464,7 +8535,7 @@ rtpareto <- function(n, lower, upper, shape) {
   earg <- eshape
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -8503,15 +8574,15 @@ rtpareto <- function(n, lower, upper, shape) {
       if ( .imethod == 2) {
         0 * y + median(rep((y + 1/8) / (y - .lower + 1/8), times = w))
       } else {
-        tpareto1.Loglikfun <- function(shape, y, x, w, extraargs) {
-           myratio <- .lower / .upper
-           sum(c(w) * (log(shape) + shape * log( .lower) -
-                    (shape+1) * log(y) - log1p(-myratio^shape)))
+        truncpareto.Loglikfun <- function(shape, y, x, w, extraargs) {
+          myratio <- .lower / .upper
+          sum(c(w) * (log(shape) + shape * log( .lower ) -
+                     (shape + 1) * log(y) - log1p(-myratio^shape)))
         }
         shape.grid <- 2^((-4):4)
-        try.this <- getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
-                             y = y,  x = x, w = w)
-        try.this = rep(try.this, length.out = n)
+        try.this <- getMaxMin(shape.grid, objfun = truncpareto.Loglikfun,
+                              y = y,  x = x, w = w)
+        try.this <- rep(try.this, length.out = n)
         try.this
       }
       etastart <- theta2eta(shape.init, .lshape , earg = .earg )
@@ -8542,14 +8613,14 @@ rtpareto <- function(n, lower, upper, shape) {
     shape <- eta2theta(eta, .lshape , earg = .earg )
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
-      ans <- sum(c(w) * dtpareto(x = y, lower = .lower ,
-                                upper = .upper ,
-                                shape = shape, log = TRUE))
+      ans <- sum(c(w) * dtruncpareto(x = y, lower = .lower ,
+                                     upper = .upper ,
+                                     shape = shape, log = TRUE))
       ans
     }
   }, list( .lshape = lshape, .earg = earg,
            .lower = lower, .upper = upper ))),
-  vfamily = c("tpareto1"),
+  vfamily = c("truncpareto"),
   deriv = eval(substitute(expression({
     shape <- eta2theta(eta, .lshape , earg = .earg )
     myratio <- .lower / .upper
@@ -8574,15 +8645,8 @@ rtpareto <- function(n, lower, upper, shape) {
 
 
 
-erf <- function(x)
-    2 * pnorm(x * sqrt(2)) - 1
-
-erfc <- function(x)
-    2 * pnorm(x * sqrt(2), lower.tail = FALSE)
 
-
-
- wald <- function(link.lambda = "loge", init.lambda = NULL) {
+ waldff <- function(link.lambda = "loge", init.lambda = NULL) {
 
   link.lambda <- as.list(substitute(link.lambda))
   earg <- link2list(link.lambda)
@@ -8608,51 +8672,53 @@ erfc <- function(x)
 
 
     predictors.names <-
-      namesof("lambda", .link.lambda, earg = .earg , short = TRUE)
+      namesof("lambda", .link.lambda , earg = .earg , short = TRUE)
 
 
     if (!length(etastart)) {
-      initlambda <- if (length( .init.lambda)) .init.lambda else
-                   1 / (0.01 + (y-1)^2)
+      initlambda <- if (length( .init.lambda )) .init.lambda else
+                    1 / (0.01 + (y-1)^2)
       initlambda <- rep(initlambda, length.out = n)
       etastart <-
         cbind(theta2eta(initlambda,
                         link = .link.lambda , earg = .earg ))
       }
   }), list( .link.lambda = link.lambda, .earg = earg,
-           .init.lambda=init.lambda ))),
+           .init.lambda = init.lambda ))),
   linkinv = function(eta, extra = NULL) {
-      0*eta + 1
+      0 * eta + 1
   },
-    last = eval(substitute(expression({
-        misc$link <-    c(lambda = .link.lambda )
+  last = eval(substitute(expression({
+    misc$link <-    c(lambda = .link.lambda )
 
-        misc$earg <- list(lambda = .earg )
+    misc$earg <- list(lambda = .earg )
 
-    }), list( .link.lambda = link.lambda, .earg = earg ))),
-    loglikelihood = eval(substitute(
-             function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-        lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
-        if (residuals) stop("loglikelihood residuals ",
-                            "not implemented yet") else
-        sum(c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
-    }, list( .link.lambda = link.lambda, .earg = earg ))),
-    vfamily = "wald",
-    deriv = eval(substitute(expression({
-        lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
-        dl.dlambda <- 0.5 / lambda + 1 - 0.5 * (y + 1/y)
-        dlambda.deta <- dtheta.deta(theta=lambda, link=.link.lambda, earg = .earg )
-        c(w) * cbind(dl.dlambda * dlambda.deta)
-    }), list( .link.lambda = link.lambda, .earg = earg ))),
-    weight = eval(substitute(expression({
-        d2l.dlambda2 <- 0.5 / (lambda^2)
-        c(w) * cbind(dlambda.deta^2 * d2l.dlambda2)
-    }), list( .link.lambda = link.lambda, .earg = earg ))))
+  }), list( .link.lambda = link.lambda, .earg = earg ))),
+  loglikelihood = eval(substitute(
+         function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
+    if (residuals) stop("loglikelihood residuals ",
+                        "not implemented yet") else
+    sum(c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
+  }, list( .link.lambda = link.lambda, .earg = earg ))),
+  vfamily = "waldff",
+  deriv = eval(substitute(expression({
+    lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
+    dl.dlambda <- 0.5 / lambda + 1 - 0.5 * (y + 1/y)
+    dlambda.deta <- dtheta.deta(lambda, .link.lambda , earg = .earg )
+    c(w) * cbind(dl.dlambda * dlambda.deta)
+  }), list( .link.lambda = link.lambda, .earg = earg ))),
+  weight = eval(substitute(expression({
+    d2l.dlambda2 <- 0.5 / (lambda^2)
+    c(w) * cbind(dlambda.deta^2 * d2l.dlambda2)
+  }), list( .link.lambda = link.lambda, .earg = earg ))))
 }
 
 
+
+
  expexp <- function(lshape = "loge", lscale = "loge",
-                    ishape = 1.1, iscale = NULL, # ishape cannot be 1
+                    ishape = 1.1, iscale = NULL,  # ishape cannot be 1
                     tolerance = 1.0e-6,
                     zero = NULL) {
 
@@ -8661,7 +8727,7 @@ erfc <- function(x)
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
+  if (!is.Numeric(tolerance, positive = TRUE, length.arg = 1) ||
       tolerance > 1.0e-2)
     stop("bad input for argument 'tolerance'")
   if (!is.Numeric(ishape, positive = TRUE))
@@ -8706,27 +8772,27 @@ erfc <- function(x)
 
 
       if (!length(etastart)) {
-            shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
-                   stop("argument 'ishape' must be positive") else
-                   rep( .ishape, length.out = n)
-            scale.init <- if (length( .iscale ))
-                        rep( .iscale, length.out = n) else
-                        (digamma(shape.init+1) - digamma(1)) / (y+1/8)
-            scale.init <- rep(weighted.mean(scale.init, w = w),
-                             length.out = n)
-            etastart <-
-              cbind(theta2eta(shape.init, .lshape , earg = .eshape ),
-                    theta2eta(scale.init, .lscale , earg = .escale ))
-        }
-    }), list( .lshape = lshape, .lscale = lscale,
-              .iscale = iscale, .ishape = ishape,
-              .eshape = eshape, .escale = escale))),
-    linkinv = eval(substitute(function(eta, extra = NULL) {
-        shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-        scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-        (digamma(shape+1)-digamma(1)) / scale
-    }, list( .lshape = lshape, .lscale = lscale,
-             .eshape = eshape, .escale = escale))),
+        shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
+               stop("argument 'ishape' must be positive") else
+               rep( .ishape, length.out = n)
+        scale.init <- if (length( .iscale ))
+                    rep( .iscale, length.out = n) else
+                    (digamma(shape.init+1) - digamma(1)) / (y+1/8)
+        scale.init <- rep(weighted.mean(scale.init, w = w),
+                          length.out = n)
+        etastart <-
+          cbind(theta2eta(shape.init, .lshape , earg = .eshape ),
+                theta2eta(scale.init, .lscale , earg = .escale ))
+    }
+  }), list( .lshape = lshape, .lscale = lscale,
+            .iscale = iscale, .ishape = ishape,
+            .eshape = eshape, .escale = escale))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+      shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+      scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+      (digamma(shape+1) - digamma(1)) / scale
+  }, list( .lshape = lshape, .lscale = lscale,
+           .eshape = eshape, .escale = escale))),
   last = eval(substitute(expression({
     misc$link <-    c("shape" = .lshape , "scale" = .lscale )
     misc$earg <- list("shape" = .eshape , "scale" = .escale )
@@ -8860,7 +8926,7 @@ erfc <- function(x)
   linkinv = eval(substitute(function(eta, extra = NULL) {
     scale <- eta2theta(eta, .lscale , earg = .escale )
     temp7 <-  -expm1(-scale*extra$yvector)
-    shape <- -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
+    shape <- -extra$sumw / sum(extra$w*log(temp7))  # \gamma(\theta)
     (digamma(shape+1)-digamma(1)) / scale
   }, list( .lscale = lscale,
            .escale = escale))),
@@ -8869,7 +8935,7 @@ erfc <- function(x)
     misc$earg <- list("scale" = .escale )
 
     temp7 <-  -expm1(-scale*y)
-    shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+    shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
     misc$shape <- shape   # Store the ML estimate here
     misc$pooled.weight <- pooled.weight
   }), list( .lscale = lscale, .escale = escale))),
@@ -8877,7 +8943,7 @@ erfc <- function(x)
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     scale <- eta2theta(eta, .lscale , earg = .escale )
     temp7 <-  -expm1(-scale*y)
-    shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+    shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else
     sum(c(w) * (log(shape) + log(scale) + 
@@ -8889,7 +8955,7 @@ erfc <- function(x)
 
     temp6 <- exp(-scale*y)
     temp7 <- 1-temp6
-    shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+    shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
     d1 <- 1/scale + (shape-1)*y*temp6/temp7 - y
 
     c(w) * cbind(d1 * dtheta.deta(scale, .lscale , earg = .escale ))
@@ -8905,7 +8971,7 @@ erfc <- function(x)
 
     if (FALSE && intercept.only) {
       sumw <- sum(w)
-      for(ii in 1:ncol(wz))
+      for (ii in 1:ncol(wz))
           wz[, ii] <- sum(wz[, ii]) / sumw
       pooled.weight <- TRUE
       wz <- c(w) * wz   # Put back the weights
@@ -8932,7 +8998,7 @@ erfc <- function(x)
   ilocat <- ilocation
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
@@ -9010,7 +9076,7 @@ erfc <- function(x)
         scale.init <- sqrt(3) * apply(y, 2, sd) / pi
       } else {
         locat.init <- scale.init <- NULL
-        for(ii in 1:ncoly) {
+        for (ii in 1:ncoly) {
           locat.init <- c(locat.init, median(rep(y[, ii], w[, ii])))
           scale.init <- c(scale.init, sqrt(3) * sum(w[, ii] *
                         (y[, ii] - locat.init[ii])^2) / (sum(w[, ii]) * pi))
@@ -9053,7 +9119,7 @@ erfc <- function(x)
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .elocat
       misc$earg[[Musual*ii  ]] <- .escale
     }
@@ -9075,8 +9141,8 @@ erfc <- function(x)
     Scale <- eta2theta(eta[, (1:ncoly)*Musual  ], .lscale , earg = .escale )
     if (residuals) stop("loglikelihood residuals ",
                         "not implemented yet") else {
-        sum(c(w) * dlogis(x = y, location = locat,
-                          scale = Scale, log = TRUE))
+      sum(c(w) * dlogis(x = y, location = locat,
+                        scale = Scale, log = TRUE))
     }
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale))),
@@ -9105,7 +9171,7 @@ erfc <- function(x)
     ned2l.dlocat2 <- 1 / (3 * Scale^2)
     ned2l.dscale2 <- (3 + pi^2) / (9 * Scale^2)
 
-    wz <- matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = M)  # diagonal
     wz[, (1:ncoly) * Musual - 1] <- ned2l.dlocat2 * dlocat.deta^2
     wz[, (1:ncoly) * Musual    ] <- ned2l.dscale2 * dscale.deta^2
 
@@ -9149,11 +9215,11 @@ erfc <- function(x)
   if (length(imu) && !is.Numeric(imu, positive = TRUE))
     stop("bad input for argument 'imu'")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -9204,7 +9270,7 @@ erfc <- function(x)
 
 
     M <- Musual * ncol(y) 
-    NOS <- ncoly <- ncol(y) # Number of species
+    NOS <- ncoly <- ncol(y)  # Number of species
     mynames1 <- paste("mu", if (NOS > 1) 1:NOS else "", sep = "")
     predictors.names <-
       namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
@@ -9216,7 +9282,7 @@ erfc <- function(x)
 
     if (!length(etastart)) {
       mu.init <- y
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         use.this <- if ( .imethod == 1) {
           weighted.mean(y[, iii], w[, iii]) + 1/16
         } else if ( .imethod == 3) {
@@ -9237,7 +9303,7 @@ erfc <- function(x)
 
           mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
         }
-      } # of for(iii)
+      } # of for (iii)
 
 
     kmat <- matrix( .size , n, NOS, byrow = TRUE)
@@ -9291,7 +9357,7 @@ erfc <- function(x)
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[ii]] <- newemu
     }
 
@@ -9355,7 +9421,7 @@ erfc <- function(x)
 
     if ( .lmu == "nbcanlink")
       newemu$wrt.eta <- 1
-    dmu.deta <- dtheta.deta(mu, .lmu , earg = newemu) # eta1
+    dmu.deta <- dtheta.deta(mu, .lmu , earg = newemu)  # eta1
 
     myderiv <- c(w) * dl.dmu * dmu.deta
     myderiv
diff --git a/R/family.vglm.R b/R/family.vglm.R
index f2d25e1..c7ffc2d 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -11,24 +11,24 @@ family.vglm <- function(object, ...)
 
 
 if (FALSE)
-print.vfamily <- function(x, ...)
-{
-    f <- x$vfamily
-    if (is.null(f))
-        stop("not a VGAM family function")
-
-    nn <- x$blurb
-    if (is.null(nn))
-        invisible(return(x))
-
-    cat("Family: ", f[1], "\n") 
-    if (length(f)>1) cat("Classes:", paste(f, collapse=", "), "\n")
-    cat("\n")
-
-    for(i in 1:length(nn))
-        cat(nn[i])
-    cat("\n")
+print.vfamily <- function(x, ...) {
+  f <- x$vfamily
+  if (is.null(f))
+    stop("not a VGAM family function")
+
+  nn <- x$blurb
+  if (is.null(nn))
     invisible(return(x))
+
+  cat("Family: ", f[1], "\n") 
+  if (length(f)>1)
+    cat("Classes:", paste(f, collapse=", "), "\n")
+  cat("\n")
+
+  for (ii in 1:length(nn))
+    cat(nn[ii])
+  cat("\n")
+  invisible(return(x))
 }
 
 
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index cb92cd8..baae15c 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -26,17 +26,17 @@ dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
 
   LLL <- max(length(x), length(pobs0), length(prob), length(size))
   if (length(x)     != LLL) x     <- rep(x,     len = LLL)
-  if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+  if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
   if (length(prob)  != LLL) prob  <- rep(prob,  len = LLL)
-  if (length(size)  != LLL) size  <- rep(size,  len = LLL);
+  if (length(size)  != LLL) size  <- rep(size,  len = LLL)
 
   ans <- rep(0.0, len = LLL)
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
     stop("argument 'pobs0' must be in [0,1]")
   if (!is.Numeric(prob, positive = TRUE))
-    stop("argument 'prob' must be in [0,Inf)")
+    stop("argument 'prob' must be in (0,Inf)")
   if (!is.Numeric(size, positive = TRUE))
-    stop("argument 'size' must be in [0,Inf)")
+    stop("argument 'size' must be in (0,Inf)")
   index0 <- x == 0
 
   if (log.arg) {
@@ -62,10 +62,10 @@ pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
   }
 
   LLL <- max(length(q), length(pobs0), length(prob), length(size))
-  if (length(q)     != LLL) q     <- rep(q,     len = LLL);
-  if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
-  if (length(prob)  != LLL) prob  <- rep(prob,  len = LLL);
-  if (length(size)  != LLL) size  <- rep(size,  len = LLL);
+  if (length(q)     != LLL) q     <- rep(q,     len = LLL)
+  if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
+  if (length(prob)  != LLL) prob  <- rep(prob,  len = LLL)
+  if (length(size)  != LLL) size  <- rep(size,  len = LLL)
   ans <- rep(0.0, len = LLL)
 
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
@@ -88,10 +88,10 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
   }
 
   LLL <- max(length(p), length(pobs0), length(prob), length(size))
-  if (length(p)     != LLL) p      <- rep(p,     len = LLL);
-  if (length(pobs0) != LLL) pobs0  <- rep(pobs0, len = LLL);
-  if (length(prob)  != LLL) prob   <- rep(prob,  len = LLL);
-  if (length(size)  != LLL) size   <- rep(size,  len = LLL);
+  if (length(p)     != LLL) p      <- rep(p,     len = LLL)
+  if (length(pobs0) != LLL) pobs0  <- rep(pobs0, len = LLL)
+  if (length(prob)  != LLL) prob   <- rep(prob,  len = LLL)
+  if (length(size)  != LLL) size   <- rep(size,  len = LLL)
   ans <- rep(0.0, len = LLL)
 
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
@@ -99,10 +99,10 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
   ans <- p
   ans[p <= pobs0] <- 0
   pindex <- (p > pobs0)
-  ans[pindex] <- qposnegbin((p[pindex] -
-                            pobs0[pindex]) / (1 - pobs0[pindex]),
-                            prob = prob[pindex],
-                            size = size[pindex])
+  ans[pindex] <-
+    qposnegbin((p[pindex] - pobs0[pindex]) / (1 - pobs0[pindex]),
+               prob = prob[pindex],
+               size = size[pindex])
   ans
 }
 
@@ -110,7 +110,7 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
 rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   if (length(munb)) {
@@ -138,9 +138,9 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
   rm(log)
 
   LLL <- max(length(x), length(lambda), length(pobs0))
-  if (length(x)      != LLL) x      <- rep(x,      len = LLL);
-  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
-  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL);
+  if (length(x)      != LLL) x      <- rep(x,      len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL)
   ans <- rep(0.0, len = LLL)
 
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
@@ -151,11 +151,11 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
   if (log.arg) {
     ans[ index0] <- log(pobs0[index0])
     ans[!index0] <- log1p(-pobs0[!index0]) +
-                   dpospois(x[!index0], lambda[!index0], log = TRUE)
+                    dpospois(x[!index0], lambda[!index0], log = TRUE)
   } else {
     ans[ index0] <- pobs0[index0]
     ans[!index0] <- (1 - pobs0[!index0]) *
-                   dpospois(x[!index0], lambda[!index0])
+                    dpospois(x[!index0], lambda[!index0])
   }
   ans
 }
@@ -164,15 +164,15 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
 
 pzapois <- function(q, lambda, pobs0 = 0) {
   LLL <- max(length(q), length(lambda), length(pobs0))
-  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
-  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
-  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL);
+  if (length(q)      != LLL) q      <- rep(q,      len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL)
   ans <- rep(0.0, len = LLL)
 
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
     stop("argument 'pobs0' must be in [0,1]")
   ans[q >  0] <-    pobs0[q > 0] +
-                (1-pobs0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
+                 (1-pobs0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
   ans[q <  0] <- 0
   ans[q == 0] <- pobs0[q == 0]
   ans
@@ -181,9 +181,9 @@ pzapois <- function(q, lambda, pobs0 = 0) {
 
 qzapois <- function(p, lambda, pobs0 = 0) {
   LLL <- max(length(p), length(lambda), length(pobs0))
-  if (length(p)      != LLL) p      <- rep(p,      len = LLL);
-  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
-  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL);
+  if (length(p)      != LLL) p      <- rep(p,      len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(pobs0)  != LLL) pobs0  <- rep(pobs0,  len = LLL)
 
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
     stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -191,7 +191,7 @@ qzapois <- function(p, lambda, pobs0 = 0) {
   ind4 <- (p > pobs0)
   ans[!ind4] <- 0
   ans[ ind4] <- qpospois((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
-                        lambda = lambda[ind4])
+                         lambda = lambda[ind4])
   ans
 }
 
@@ -199,14 +199,14 @@ qzapois <- function(p, lambda, pobs0 = 0) {
 rzapois <- function(n, lambda, pobs0 = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
   ans <- rpospois(use.n, lambda)
   if (length(pobs0) != use.n)
     pobs0 <- rep(pobs0, length = use.n)
   if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
-    stop("argument 'pobs0' must be between 0 and 1 inclusive")
+    stop("argument 'pobs0' must in [0,1]")
 
   ifelse(runif(use.n) < pobs0, 0, ans)
 }
@@ -224,9 +224,9 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
   rm(log)
 
   LLL <- max(length(x), length(lambda), length(pstr0))
-  if (length(x)      != LLL) x      <- rep(x,      len = LLL);
-  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
-  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+  if (length(x)      != LLL) x      <- rep(x,      len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL)
 
   ans <- x + lambda + pstr0
 
@@ -241,12 +241,13 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
   } else {
     ans[ index0] <-      pstr0[ index0] + (1 - pstr0[ index0]) *
                        dpois(x[ index0], lambda[ index0])
-    ans[!index0] <- (1 - pstr0[!index0]) * dpois(x[!index0], lambda[!index0])
+    ans[!index0] <- (1 - pstr0[!index0]) *
+                    dpois(x[!index0], lambda[!index0])
   }
 
 
-  deflat_limit <- -1 / expm1(lambda)
-  ans[pstr0 < deflat_limit] <- NaN
+  deflat.limit <- -1 / expm1(lambda)
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
   ans
@@ -256,16 +257,16 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
 pzipois <- function(q, lambda, pstr0 = 0) {
 
   LLL <- max(length(pstr0), length(lambda), length(q))
-  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
-  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
-  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(q)      != LLL) q      <- rep(q,      len = LLL)
 
   ans <- ppois(q, lambda)
   ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
 
 
-  deflat_limit <- -1 / expm1(lambda)
-  ans[pstr0 < deflat_limit] <- NaN
+  deflat.limit <- -1 / expm1(lambda)
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
 
@@ -276,30 +277,31 @@ pzipois <- function(q, lambda, pstr0 = 0) {
 qzipois <- function(p, lambda, pstr0 = 0) {
 
   LLL <- max(length(p), length(lambda), length(pstr0))
-  ans =
-  p      <- rep(p,      len = LLL)
-  lambda <- rep(lambda, len = LLL)
-  pstr0  <- rep(pstr0,  len = LLL)
+  if (length(p)      != LLL) p      <- rep(p,      len = LLL)
+  if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL)
+  ans    <- p
 
   ans[p <= pstr0] <- 0 
   pindex <- (p > pstr0)
-  ans[pindex] <- qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
-                      lambda = lambda[pindex])
+  ans[pindex] <-
+    qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
+          lambda = lambda[pindex])
 
 
-  deflat_limit <- -1 / expm1(lambda)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
+  deflat.limit <- -1 / expm1(lambda)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * exp(-lambda[ind0])
     ans[p[ind0] <= pobs0] <- 0 
     pindex <- (1:LLL)[ind0 & (p > pobs0)]
     Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * exp(-lambda[pindex])
     ans[pindex] <- qpospois((p[pindex] - Pobs0) / (1 - Pobs0),
-                           lambda = lambda[pindex])
+                            lambda = lambda[pindex])
   }
 
 
-  ans[pstr0 < deflat_limit] <- NaN
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
 
@@ -313,11 +315,11 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
-  if (length(pstr0)  != use.n) pstr0  <- rep(pstr0,  len = use.n);
-  if (length(lambda) != use.n) lambda <- rep(lambda, len = use.n);
+  if (length(pstr0)  != use.n) pstr0  <- rep(pstr0,  len = use.n)
+  if (length(lambda) != use.n) lambda <- rep(lambda, len = use.n)
  
   ans <- rpois(use.n, lambda)
   ans <- ifelse(runif(use.n) < pstr0, 0, ans)
@@ -325,15 +327,15 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
   prob0 <- exp(-lambda)
-  deflat_limit <- -1 / expm1(lambda)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
+  deflat.limit <- -1 / expm1(lambda)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
     ans[ind0] <- rpospois(sum(ind0), lambda[ind0]) 
     ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
   }
 
-  ans[pstr0 < deflat_limit] <- NaN
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
   ans
@@ -351,6 +353,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
 
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -460,27 +464,33 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
 
- zapoisson <- function(lpobs0 = "logit", llambda = "loge",
-                       zero = NULL) {
 
+ zapoisson <-
+  function(lpobs0 = "logit", llambda = "loge",
+           type.fitted = c("mean", "pobs0", "onempobs0"),
+           zero = NULL) {
 
 
 
-  lpobs_0 <- as.list(substitute(lpobs0))
-  epobs_0 <- link2list(lpobs_0)
-  lpobs_0 <- attr(epobs_0, "function.name")
+
+  lpobs.0 <- as.list(substitute(lpobs0))
+  epobs.0 <- link2list(lpobs.0)
+  lpobs.0 <- attr(epobs.0, "function.name")
 
   llambda <- as.list(substitute(llambda))
   elambda <- link2list(llambda)
   llambda <- attr(elambda, "function.name")
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
+
 
 
   new("vglmff",
   blurb = c("Zero-altered Poisson ",
             "(Bernoulli and positive-Poisson conditional model)\n\n",
             "Links:    ",
-            namesof("pobs0",  lpobs_0, earg = epobs_0, tag = FALSE), ", ",
+            namesof("pobs0",  lpobs.0, earg = epobs.0, tag = FALSE), ", ",
             namesof("lambda", llambda, earg = elambda, tag = FALSE), "\n",
             "Mean:     (1 - pobs0) * lambda / (1 - exp(-lambda))"),
 
@@ -490,10 +500,15 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     Musual <- 2
     eval(negzero.expression)
   }), list( .zero = zero ))),
+
   infos = eval(substitute(function(...) {
     list(Musual = 2,
+         type.fitted  = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
   initialize = eval(substitute(expression({
     Musual <- 2
     if (any(y < 0))
@@ -514,22 +529,24 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted      <- .type.fitted
 
     mynames1 <- if (ncoly == 1) "pobs0"    else
                 paste("pobs0",    1:ncoly, sep = "")
     mynames2 <- if (ncoly == 1) "lambda" else
                 paste("lambda", 1:ncoly, sep = "")
     predictors.names <-
-        c(namesof(mynames1, .lpobs_0, earg = .epobs_0, tag = FALSE),
+        c(namesof(mynames1, .lpobs.0, earg = .epobs.0, tag = FALSE),
           namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
           interleave.VGAM(Musual*NOS, M = Musual)]
 
     if (!length(etastart)) {
       etastart <-
         cbind(theta2eta((0.5 + w*y0) / (1+w),
-                        .lpobs_0, earg = .epobs_0 ),
+                        .lpobs.0, earg = .epobs.0 ),
               matrix(1, n, NOS))  # 1 here is any old value
-      for(spp. in 1:NOS) {
+      for (spp. in 1:NOS) {
         sthese <- skip.these[, spp.]
         etastart[!sthese, NOS+spp.] =
           theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
@@ -537,26 +554,51 @@ rzipois <- function(n, lambda, pstr0 = 0) {
       }
       etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
     }
-  }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
-            .epobs_0 = epobs_0, .elambda = elambda ))), 
+  }), list( .lpobs.0 = lpobs.0, .llambda = llambda,
+            .epobs.0 = epobs.0, .elambda = elambda,
+            .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+
     NOS <- extra$NOS
     Musual <- 2
 
 
-    pobs_0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                              .lpobs_0, earg = .epobs_0 ))
+    pobs.0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                              .lpobs.0, earg = .epobs.0 ))
     lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
                               .llambda, earg = .elambda ))
 
-    (1 - pobs_0) * lambda / (-expm1(-lambda))
-  }, list( .lpobs_0 = lpobs_0, .llambda = llambda,
-           .epobs_0 = epobs_0, .elambda = elambda ))),
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - pobs.0) * lambda / (-expm1(-lambda)),
+                  "pobs0"     =      pobs.0,  # P(Y=0)
+                  "onempobs0" =  1 - pobs.0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lpobs.0 = lpobs.0, .llambda = llambda,
+           .epobs.0 = epobs.0, .elambda = elambda ))),
   last = eval(substitute(expression({
     misc$expected <- TRUE
     misc$multipleResponses <- TRUE
 
-    temp.names <- c(rep( .lpobs_0 , len = NOS),
+    temp.names <- c(rep( .lpobs.0 , len = NOS),
                     rep( .llambda , len = NOS))
     temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
     misc$link  <- temp.names
@@ -565,28 +607,30 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
     misc$earg <- vector("list", Musual * NOS)
     names(misc$earg) <- names(misc$link)
-    for(ii in 1:NOS) {
-      misc$earg[[Musual*ii-1]] <- .epobs_0
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-1]] <- .epobs.0
       misc$earg[[Musual*ii  ]] <- .elambda
     }
-  }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
-            .epobs_0 = epobs_0, .elambda = elambda ))),
+  }), list( .lpobs.0 = lpobs.0, .llambda = llambda,
+            .epobs.0 = epobs.0, .elambda = elambda ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     NOS <- extra$NOS
     Musual <- 2
 
-    pobs0    <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                             .lpobs_0, earg = .epobs_0))
+    pobs0  <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                              .lpobs.0, earg = .epobs.0))
     lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
-                             .llambda, earg = .elambda ))
+                              .llambda, earg = .elambda ))
 
-    if (residuals)
-      stop("loglikelihood residuals not implemented yet") else {
-      sum(c(w) * dzapois(x = y, pobs0 = pobs0, lambda = lambda, log = TRUE))
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      sum(c(w) * dzapois(x = y, pobs0 = pobs0, lambda = lambda,
+                         log = TRUE))
     }
-  }, list( .lpobs_0 = lpobs_0, .llambda = llambda,
-           .epobs_0 = epobs_0, .elambda = elambda ))),
+  }, list( .lpobs.0 = lpobs.0, .llambda = llambda,
+           .epobs.0 = epobs.0, .elambda = elambda ))),
   vfamily = c("zapoisson"),
   deriv = eval(substitute(expression({
     Musual <- 2
@@ -595,24 +639,24 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     skip <- extra$skip.these
 
     phimat <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                              .lpobs_0, earg = .epobs_0 ))
+                              .lpobs.0, earg = .epobs.0 ))
     lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
                               .llambda, earg = .elambda ))
 
     dl.dlambda <- y / lambda + 1 / expm1(-lambda)
-    dl.dphimat <- -1 / (1 - phimat) # For y > 0 obsns
+    dl.dphimat <- -1 / (1 - phimat)  # For y > 0 obsns
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       dl.dphimat[skip[, spp.], spp.] <- 1 / phimat[skip[, spp.], spp.]
       dl.dlambda[skip[, spp.], spp.] <- 0
     }
     dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda)
     mu.phi0 <- phimat
 
-    temp3 <- if (.lpobs_0 == "logit") {
+    temp3 <- if (.lpobs.0 == "logit") {
       c(w) * (y0 - mu.phi0)
     } else {
-      c(w) * dtheta.deta(mu.phi0, link = .lpobs_0 , earg = .epobs_0 ) *
+      c(w) * dtheta.deta(mu.phi0, link = .lpobs.0 , earg = .epobs.0 ) *
             dl.dphimat
     }
 
@@ -620,8 +664,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
                  c(w) * dl.dlambda * dlambda.deta)
     ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
     ans
-  }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
-            .epobs_0 = epobs_0, .elambda = elambda ))),
+  }), list( .lpobs.0 = lpobs.0, .llambda = llambda,
+            .epobs.0 = epobs.0, .elambda = elambda ))),
   weight = eval(substitute(expression({
 
     wz <- matrix(0.0, n, Musual * NOS)
@@ -635,16 +679,16 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
     tmp100 <- mu.phi0 * (1.0 - mu.phi0)
-    tmp200 <- if ( .lpobs_0 == "logit" && is.empty.list( .epobs_0 )) {
+    tmp200 <- if ( .lpobs.0 == "logit" && is.empty.list( .epobs.0 )) {
         cbind(c(w) * tmp100)
     } else {
       cbind(c(w) * (1 / tmp100) *
-            dtheta.deta(mu.phi0, link = .lpobs_0, earg = .epobs_0)^2)
+            dtheta.deta(mu.phi0, link = .lpobs.0, earg = .epobs.0)^2)
     }
 
 
   if (FALSE)
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       index200 <- abs(tmp200[, ii]) < .Machine$double.eps
       if (any(index200)) {
         tmp200[index200, ii] <- 10.0 * .Machine$double.eps^(3/4)
@@ -659,9 +703,249 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
     wz
-  }), list( .lpobs_0 = lpobs_0,
-            .epobs_0 = epobs_0 ))))
-} #   End of zapoisson
+  }), list( .lpobs.0 = lpobs.0,
+            .epobs.0 = epobs.0 ))))
+}  # End of zapoisson
+
+
+
+
+
+ zapoissonff <-
+  function(llambda = "loge", lonempobs0 = "logit",
+           type.fitted = c("mean", "pobs0", "onempobs0"),
+           zero = -2) {
+
+
+
+  llambda <- as.list(substitute(llambda))
+  elambda <- link2list(llambda)
+  llambda <- attr(elambda, "function.name")
+
+  lonempobs0 <- as.list(substitute(lonempobs0))
+  eonempobs0 <- link2list(lonempobs0)
+  lonempobs0 <- attr(eonempobs0, "function.name")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
+
+
+  new("vglmff",
+  blurb = c("Zero-altered Poisson ",
+            "(Bernoulli and positive-Poisson conditional model)\n\n",
+            "Links:    ",
+            namesof("lambda",     llambda,    earg = elambda,    tag = FALSE), ", ",
+            namesof("onempobs0",  lonempobs0, earg = eonempobs0, tag = FALSE), "\n",
+            "Mean:     onempobs0 * lambda / (1 - exp(-lambda))"),
+
+  constraints = eval(substitute(expression({
+
+    dotzero <- .zero
+    Musual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
+  initialize = eval(substitute(expression({
+    Musual <- 2
+    if (any(y < 0))
+      stop("the response must not have negative values")
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+    extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+
+    extra$dimnamesy   <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+
+    mynames1 <- if (ncoly == 1) "lambda"    else
+                paste("lambda",    1:ncoly, sep = "")
+    mynames2 <- if (ncoly == 1) "onempobs0" else
+                paste("onempobs0", 1:ncoly, sep = "")
+
+    predictors.names <-
+        c(namesof(mynames1, .llambda,     earg = .elambda    , tag = FALSE),
+          namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
+          interleave.VGAM(Musual*NOS, M = Musual)]
+
+    if (!length(etastart)) {
+      etastart <-
+        cbind(matrix(1, n, NOS),  # 1 here is any old value
+              theta2eta(1 - (0.5 + w * y0) / (1 + w),
+                        .lonempobs0 , earg = .eonempobs0 ))
+      for (spp. in 1:NOS) {
+        sthese <- skip.these[, spp.]
+        etastart[!sthese, 0 * NOS + spp.] <-
+          theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
+                    .llambda, earg = .elambda )
+      }
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+    }
+  }), list( .lonempobs0 = lonempobs0, .llambda = llambda,
+            .eonempobs0 = eonempobs0, .elambda = elambda,
+            .type.fitted = type.fitted ))), 
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+
+    NOS <- extra$NOS
+    Musual <- 2
+
+    lambda    <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                                 .llambda    , earg = .elambda    ))
+    onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                                 .lonempobs0 , earg = .eonempobs0 ))
+
+
+    ans <- switch(type.fitted,
+                  "mean"      =    (onempobs0) * lambda / (-expm1(-lambda)),
+                  "pobs0"     = 1 - onempobs0,  # P(Y=0)
+                  "onempobs0" =     onempobs0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempobs0 = lonempobs0, .llambda = llambda,
+           .eonempobs0 = eonempobs0, .elambda = elambda ))),
+  last = eval(substitute(expression({
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
+
+    temp.names <- c(rep( .llambda    , len = NOS),
+                    rep( .lonempobs0 , len = NOS))
+    temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+    misc$link  <- temp.names
+    names(misc$link) <-
+      c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+    misc$earg <- vector("list", Musual * NOS)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-1]] <- .elambda
+      misc$earg[[Musual*ii  ]] <- .eonempobs0
+    }
+  }), list( .lonempobs0 = lonempobs0, .llambda = llambda,
+            .eonempobs0 = eonempobs0, .elambda = elambda ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    NOS <- extra$NOS
+    Musual <- 2
+
+    lambda     <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                                  .llambda    , earg = .elambda    ))
+    onempobs0  <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                                  .lonempobs0 , earg = .eonempobs0 ))
+
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      sum(c(w) * dzapois(x = y, lambda = lambda, pobs0 = 1 - onempobs0,
+                         log = TRUE))
+    }
+  }, list( .lonempobs0 = lonempobs0, .llambda = llambda,
+           .eonempobs0 = eonempobs0, .elambda = elambda ))),
+  vfamily = c("zapoissonff"),
+  deriv = eval(substitute(expression({
+    Musual <- 2
+    NOS <- extra$NOS
+    y0 <- extra$y0
+    skip <- extra$skip.these
+
+    lambda   <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                                .llambda, earg = .elambda ))
+    omphimat <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                                .lonempobs0, earg = .eonempobs0 ))
+    phimat <- 1 - omphimat
+
+
+    dl.dlambda <- y / lambda + 1 / expm1(-lambda)
+    dl.dPHImat <- +1 / (omphimat)  # For y > 0 obsns
+
+    for (spp. in 1:NOS) {
+      dl.dPHImat[skip[, spp.], spp.] <- -1 / phimat[skip[, spp.], spp.]
+      dl.dlambda[skip[, spp.], spp.] <-  0
+    }
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+    mu.phi0 <- omphimat
+
+    temp3 <- if ( FALSE && .lonempobs0 == "logit") {
+    } else {
+      c(w) * dtheta.deta(mu.phi0, link = .lonempobs0 , earg = .eonempobs0 ) *
+            dl.dPHImat
+    }
+
+    ans <- cbind(c(w) * dl.dlambda * dlambda.deta,
+                 temp3)
+    ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+    ans
+  }), list( .lonempobs0 = lonempobs0, .llambda = llambda,
+            .eonempobs0 = eonempobs0, .elambda = elambda ))),
+  weight = eval(substitute(expression({
+
+    wz <- matrix(0.0, n, Musual * NOS)
+
+    temp5 <- expm1(lambda)
+
+    ned2l.dlambda2 <- (1 - phimat) * (temp5 + 1) *
+                      (1 / lambda - 1 / temp5) / temp5
+
+
+    wz[, 0 * NOS + (1:NOS)] <- c(w) * ned2l.dlambda2 * dlambda.deta^2
+
+
+    tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+    tmp200 <- if ( .lonempobs0 == "logit" && is.empty.list( .eonempobs0 )) {
+        cbind(c(w) * tmp100)
+    } else {
+      cbind(c(w) * (1 / tmp100) *
+            dtheta.deta(mu.phi0, link = .lonempobs0, earg = .eonempobs0)^2)
+    }
+
+
+    wz[, 1 * NOS + (1:NOS)] <-  tmp200
+
+    wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+
+
+
+    wz
+  }), list( .lonempobs0 = lonempobs0,
+            .eonempobs0 = eonempobs0 ))))
+}  # End of zapoissonff
+
+
 
 
 
@@ -675,8 +959,9 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
 
  zanegbinomial <-
   function(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+           type.fitted = c("mean", "pobs0"),
            ipobs0 = NULL,                    isize = NULL,
-           zero = c(-1, -3),
+           zero = -3,  # Prior to 20130917 the default was: c(-1, -3),
            imethod = 1,
            nsimEIM = 250,
            shrinkage.init = 0.95) {
@@ -686,7 +971,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1,
+  if (!is.Numeric(nsimEIM, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE))
     stop("argument 'nsimEIM' must be a positive integer")
   if (nsimEIM <= 30)
@@ -696,18 +981,25 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
   if (length(ipobs0) && (!is.Numeric(ipobs0, positive = TRUE) ||
      max(ipobs0) >= 1))
     stop("If given, argument 'ipobs0' must contain values in (0,1) only")
+
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
     stop("If given, argument 'isize' must contain positive values only")
-  if (!is.Numeric(imethod, allowable.length = 1,
+
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
      shrinkage.init < 0 ||
      shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
 
 
+  lpobs0 <- as.list(substitute(lpobs0))
+  epobs0 <- link2list(lpobs0)
+  lpobs0 <- attr(epobs0, "function.name")
+
   lmunb <- as.list(substitute(lmunb))
   emunb <- link2list(lmunb)
   lmunb <- attr(emunb, "function.name")
@@ -716,11 +1008,9 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
   esize <- link2list(lsize)
   lsize <- attr(esize, "function.name")
 
-  lpobs0 <- as.list(substitute(lpobs0))
-  epobs0 <- link2list(lpobs0)
-  lpobs0 <- attr(epobs0, "function.name")
-
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0"))[1]
 
 
   new("vglmff",
@@ -738,6 +1028,16 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
     Musual <- 3
     eval(negzero.expression)
   }), list( .zero = zero ))),
+
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 3,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
   initialize = eval(substitute(expression({
     Musual <- 3
 
@@ -756,8 +1056,11 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
     y <- temp5$y
 
 
-    extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
-    M <- Musual * ncoly # 
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    M <- Musual * ncoly
+
+    extra$dimnamesy   <- dimnames(y)
+    extra$type.fitted <- .type.fitted
 
     mynames1 <- if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
     mynames2 <- if (NOS == 1) "munb"  else paste("munb",  1:NOS, sep = "")
@@ -775,7 +1078,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
 
     if (!length(etastart)) {
       mu.init <- y
-      for(iii in 1:ncol(y)) {
+      for (iii in 1:ncol(y)) {
         index.posy <- (y[, iii] > 0)
         if ( .imethod == 1) {
           use.this <- weighted.mean(y[index.posy, iii],
@@ -789,20 +1092,18 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
             weighted.mean(y[index.posy, iii],
                           w[index.posy, iii])) / 2
         }
-if (TRUE) {
         max.use.this <-  7 * use.this + 10
         vecTF <- (mu.init[, iii] > max.use.this)
         if (any(vecTF))
           mu.init[vecTF, iii] <- max.use.this
-}
       }
 
       pnb0 <- matrix(if (length( .ipobs0 )) .ipobs0 else -1,
-                      nrow = n, ncol = NOS, byrow = TRUE)
-      for(spp. in 1:NOS) {
+                     nrow = n, ncol = NOS, byrow = TRUE)
+      for (spp. in 1:NOS) {
         if (any(pnb0[, spp.] < 0)) {
           index.y0 <- y[, spp.] < 0.5
-          pnb0[, spp.] <- max(min(sum(index.y0)/n, 0.97), 0.03)
+          pnb0[, spp.] <- max(min(sum(index.y0) / n, 0.97), 0.03)
         }
       }
 
@@ -817,36 +1118,62 @@ if (TRUE) {
         }
         k.grid <- 2^((-6):6)
         kmat0 <- matrix(0, nrow = n, ncol = NOS) 
-        for(spp. in 1:NOS) {
-          index.posy <- y[, spp.] > 0
+        for (spp. in 1:NOS) {
+          index.posy <- (y[, spp.] > 0)
           posy <- y[index.posy, spp.]
-          kmat0[, spp.] <- getMaxMin(k.grid,
-                                   objfun = posnegbinomial.Loglikfun,
-                                   y = posy, x = x[index.posy, ],
-                                   w = w[index.posy, spp.],
-                                   extraargs = mu.init[index.posy, spp.])
+          kmat0[, spp.] <-
+            getMaxMin(k.grid,
+                      objfun = posnegbinomial.Loglikfun,
+                      y = posy, x = x[index.posy, ],
+                      w = w[index.posy, spp.],
+                      extraargs = mu.init[index.posy, spp.])
         }
       }
 
       etastart <- cbind(theta2eta(pnb0,    .lpobs0 , earg = .epobs0 ),
-                       theta2eta(mu.init, .lmunb  , earg = .emunb  ),
-                       theta2eta(kmat0,   .lsize  , earg = .esize  ))
+                        theta2eta(mu.init, .lmunb  , earg = .emunb  ),
+                        theta2eta(kmat0,   .lsize  , earg = .esize  ))
       etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
-    } # End of if (!length(etastart))
+    }  # End of if (!length(etastart))
 
 
   }), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
             .epobs0 = epobs0, .emunb = emunb, .esize = esize,
             .ipobs0 = ipobs0,                 .isize = isize,
-            .imethod = imethod, .sinit = shrinkage.init ))), 
+            .imethod = imethod, .sinit = shrinkage.init,
+            .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0"))[1]
+
     Musual <- 3
     NOS <- extra$NOS
     phi0 <- eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
     munb <- eta2theta(eta[, Musual*(1:NOS)-1], .lmunb  , earg = .emunb  )
     kmat <- eta2theta(eta[, Musual*(1:NOS)  ], .lsize  , earg = .esize  )
     pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
-    (1 - phi0) * munb / (1 - pnb0)
+
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - phi0) * munb / (1 - pnb0),
+                  "pobs0"     = phi0)  # P(Y=0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
   }, list( .lpobs0 = lpobs0, .lsize = lsize, .lmunb = lmunb,
            .epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
   last = eval(substitute(expression({
@@ -862,7 +1189,7 @@ if (TRUE) {
 
     misc$earg <- vector("list", Musual*NOS)
     names(misc$earg) <- temp.names
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-2]] <- .epobs0
       misc$earg[[Musual*ii-1]] <- .emunb
       misc$earg[[Musual*ii  ]] <- .esize
@@ -906,7 +1233,6 @@ if (TRUE) {
                      .lsize , earg = .esize )
     skip <- extra$skip.these
 
-
     dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
     dmunb.deta <- dtheta.deta(munb, .lmunb  , earg = .emunb  )
     dsize.deta <- dtheta.deta(kmat, .lsize  , earg = .esize  )
@@ -922,22 +1248,22 @@ if (TRUE) {
 
     dl.dphi0 <- -1 / (1 - phi0)
     dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
-               df0.dmunb / oneminusf0
+                df0.dmunb / oneminusf0
     dl.dsize <- digamma(y + kmat) - digamma(kmat) -
-               (y + kmat)/(munb + kmat) + 1 + log(tempk) +
-               df0.dkmat / oneminusf0
+                (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+                df0.dkmat / oneminusf0
 
 
 
     dl.dphi0[y == 0] <- 1 / phi0[y == 0]  # Do it in one line
     skip <- extra$skip.these
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       dl.dsize[skip[, spp.], spp.] <-
       dl.dmunb[skip[, spp.], spp.] <- 0
     }
 
     dl.deta23 <- c(w) * cbind(dl.dmunb * dmunb.deta,
-                             dl.dsize * dsize.deta)
+                              dl.dsize * dsize.deta)
 
 
     muphi0 <- phi0
@@ -955,8 +1281,7 @@ if (TRUE) {
   weight = eval(substitute(expression({
 
     six <- dimm(Musual)
-    wz =
-    run.varcov <- matrix(0.0, n, six*NOS-1)
+    wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
     Musualm1 <- Musual - 1
 
 
@@ -964,13 +1289,12 @@ if (TRUE) {
 
 
 
-
     ind2 <- iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
 
 
-    for(ii in 1:( .nsimEIM )) {
+    for (ii in 1:( .nsimEIM )) {
       ysim <- rzanegbin(n = n*NOS, pobs0 = phi0,
-                       size = kmat, mu = munb)
+                        size = kmat, mu = munb)
       dim(ysim) <- c(n, NOS)
 
 
@@ -978,10 +1302,10 @@ if (TRUE) {
 
       dl.dphi0 <- -1 / (1 - phi0)
       dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
-                 df0.dmunb / oneminusf0
+                  df0.dmunb / oneminusf0
       dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
-                 (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
-                 df0.dkmat / oneminusf0
+                  (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
+                  df0.dkmat / oneminusf0
 
 
 
@@ -989,29 +1313,29 @@ if (TRUE) {
       dl.dphi0[ysim == 0] <- 1 / phi0[ysim == 0]  # Do it in one line
       ysim0 <- ifelse(ysim == 0, 1, 0)
       skip.sim <- matrix(as.logical(ysim0), n, NOS)
-      for(spp. in 1:NOS) {
+      for (spp. in 1:NOS) {
         dl.dsize[skip.sim[, spp.], spp.] <-
         dl.dmunb[skip.sim[, spp.], spp.] <- 0
       }
 
 
-      for(kk in 1:NOS) {
+      for (kk in 1:NOS) {
         temp2 <- cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
-                      dl.dsize[, kk] * dsize.deta[, kk])
+                       dl.dsize[, kk] * dsize.deta[, kk])
         small.varcov <- temp2[, ind2$row.index] *
                        temp2[, ind2$col.index]
 
 
 
 
-        run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] =
+        run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] <-
         run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] +
           c(small.varcov[, 1:Musualm1])
-        run.varcov[, M + (kk-1)*Musual + 2] =
+        run.varcov[, M + (kk-1)*Musual + 2] <-
         run.varcov[, M + (kk-1)*Musual + 2] +
           c(small.varcov[, Musualm1 + 1])
-      } # kk; end of NOS
-    } # ii; end of nsimEIM
+      }  # kk; end of NOS
+    }  # ii; end of nsimEIM
 
 
     run.varcov <- cbind(run.varcov / .nsimEIM )
@@ -1022,9 +1346,9 @@ if (TRUE) {
 
 
 
-    wzind1 <- sort(c( Musual*(1:NOS) - 1,
-                     Musual*(1:NOS) - 0,
-                 M + Musual*(1:NOS) - 1))
+    wzind1 <- sort(c(    Musual*(1:NOS) - 1,
+                         Musual*(1:NOS) - 0,
+                     M + Musual*(1:NOS) - 1))
     wz[, wzind1] <- c(w) * run.varcov[, wzind1]
 
 
@@ -1036,13 +1360,13 @@ if (TRUE) {
     } else {
       c(w) * cbind(dphi0.deta^2 / tmp100)
     }
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       index200 <- abs(tmp200[, ii]) < .Machine$double.eps
       if (any(index200)) {
-        tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad 
+        tmp200[index200, ii] <- .Machine$double.eps  # Diagonal 0's are bad 
       }
     }
-    wz[, Musual*(1:NOS)-2] <-  tmp200
+    wz[, Musual*(1:NOS)-2] <- tmp200
 
 
 
@@ -1050,117 +1374,100 @@ if (TRUE) {
   }), list( .lpobs0 = lpobs0,
             .epobs0 = epobs0,
             .nsimEIM = nsimEIM ))))
-} # End of zanegbinomial()
-
-
+}  # End of zanegbinomial()
 
 
 
 
+zanegbinomialff.control <- function(save.weight = TRUE, ...) {
+  list(save.weight = save.weight)
+}
 
 
 
+ zanegbinomialff <-
+  function(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
+           type.fitted = c("mean", "pobs0", "onempobs0"),
+           isize = NULL, ionempobs0 = NULL,
+           zero = c(-2, -3),
+           imethod = 1,
+           nsimEIM = 250,
+           shrinkage.init = 0.95) {
 
- if (FALSE)
-rposnegbin <- function(n, munb, size) {
-  if (!is.Numeric(size, positive = TRUE))
-    stop("argument 'size' must be positive")
-  if (!is.Numeric(munb, positive = TRUE))
-    stop("argument 'munb' must be positive")
-  if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
-                  allowable.length = 1))
-    stop("argument 'n' must be a positive integer")
-  ans <- rnbinom(n = n, mu = munb, size = size)
-  munb <- rep(munb, length = n)
-  size <- rep(size, length = n)
-  index <- ans == 0
-  while(any(index)) {
-    more <- rnbinom(n = sum(index), mu = munb[index], size = size[index])
-    ans[index] <- more
-    index <- ans == 0
-  }
-  ans
-}
 
- if (FALSE)
-dposnegbin <- function(x, munb, size, log = FALSE) {
-    if (!is.Numeric(size, positive = TRUE))
-        stop("argument 'size' must be positive")
-    if (!is.Numeric(munb, positive = TRUE))
-        stop("argument 'munb' must be positive")
-    ans <- dnbinom(x = x, mu = munb, size = size, log=log)
-    ans0 <- dnbinom(x=0, mu = munb, size = size, log = FALSE)
-    ans <- if (log) ans - log1p(-ans0) else ans/(1-ans0)
-    ans[x == 0] <- if (log) -Inf else 0
-    ans
-}
 
+  if (!is.Numeric(nsimEIM, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE))
+    stop("argument 'nsimEIM' must be a positive integer")
+  if (nsimEIM <= 30)
+    warning("argument 'nsimEIM' should be greater than 30, say")
 
 
+  if (length(ionempobs0) && (!is.Numeric(ionempobs0, positive = TRUE) ||
+     max(ionempobs0) >= 1))
+    stop("If given, argument 'ionempobs0' must contain values in (0,1) only")
 
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("If given, argument 'isize' must contain positive values only")
 
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
 
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
+     shrinkage.init < 0 ||
+     shrinkage.init > 1)
+    stop("bad input for argument 'shrinkage.init'")
 
+  lmunb <- as.list(substitute(lmunb))
+  emunb <- link2list(lmunb)
+  lmunb <- attr(emunb, "function.name")
 
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
 
+  lonempobs0 <- as.list(substitute(lonempobs0))
+  eonempobs0 <- link2list(lonempobs0)
+  lonempobs0 <- attr(eonempobs0, "function.name")
 
 
- zipoisson <- function(lpstr0 = "logit", llambda = "loge",
-                       ipstr0 = NULL,    ilambda = NULL,
-                       imethod = 1,
-                       shrinkage.init = 0.8, zero = NULL) {
-  ipstr00 <- ipstr0
-
-
-  lpstr0 <- as.list(substitute(lpstr0))
-  epstr00 <- link2list(lpstr0)
-  lpstr00 <- attr(epstr00, "function.name")
-
-  llambda <- as.list(substitute(llambda))
-  elambda <- link2list(llambda)
-  llambda <- attr(elambda, "function.name")
-
-
-
-
-  if (length(ipstr00))
-    if (!is.Numeric(ipstr00, positive = TRUE) ||
-        any(ipstr00 >= 1))
-      stop("argument 'ipstr0' values must be inside the interval (0,1)")
-  if (length(ilambda))
-    if (!is.Numeric(ilambda, positive = TRUE))
-      stop("argument 'ilambda' values must be positive")
-
-
-  if (!is.Numeric(imethod, allowable.length = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
-
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
 
 
   new("vglmff",
-  blurb = c("Zero-inflated Poisson\n\n",
+  blurb = c("Zero-altered negative binomial (Bernoulli and\n",
+            "positive-negative binomial conditional model)\n\n",
             "Links:    ",
-            namesof("pstr0",  lpstr00, earg = epstr00 ), ", ",
-            namesof("lambda", llambda, earg = elambda ), "\n",
-            "Mean:     (1 - pstr0) * lambda"),
-
+            namesof("munb",  lmunb,  earg = emunb,  tag = FALSE), ", ",
+            namesof("size",  lsize,  earg = esize,  tag = FALSE), ", ",
+            namesof("onempobs0", lonempobs0, earg = eonempobs0,
+                    tag = FALSE), "\n",
+            "Mean:     onempobs0 * munb / (1 - (size / (size + ",
+                                                 "munb))^size)"),
   constraints = eval(substitute(expression({
+
     dotzero <- .zero
-    Musual <- 2
+    Musual <- 3
     eval(negzero.expression)
   }), list( .zero = zero ))),
 
+
   infos = eval(substitute(function(...) {
-    list(Musual = 2,
+    list(Musual = 3,
+         type.fitted  = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
   initialize = eval(substitute(expression({
+    Musual <- 3
+
+    if (any(y < 0))
+      stop("the response must not have negative values")
 
     temp5 <-
     w.y.check(w = w, y = y,
@@ -1174,706 +1481,1964 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
     y <- temp5$y
 
 
-
-    ncoly <- ncol(y)
-    Musual <- 2
-    extra$ncoly <- ncoly
-    extra$Musual <- Musual
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     M <- Musual * ncoly
 
-    if (any(round(y) != y))
-      stop("integer-valued responses only allowed for ",
-           "the 'zipoisson' family")
+    extra$dimnamesy   <- dimnames(y)
+    extra$type.fitted <- .type.fitted
 
-    mynames1 <- paste("pstr0",   if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("lambda",  if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- if (NOS == 1) "munb"  else paste("munb",  1:NOS, sep = "")
+    mynames2 <- if (NOS == 1) "size"  else paste("size",  1:NOS, sep = "")
+    mynames3 <- if (NOS == 1) "onempobs0" else paste("onempobs0", 1:NOS,
+                                                     sep = "")
     predictors.names <-
-        c(namesof(mynames1, .lpstr00 , earg = .epstr00 , tag = FALSE),
-          namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
-          interleave.VGAM(M, M = Musual)]
-
-
-
-    if (!length(etastart)) {
-
-      matL <- matrix(if (length( .ilambda )) .ilambda else 0,
-                     n, ncoly, byrow = TRUE)
-      matP <- matrix(if (length( .ipstr00 )) .ipstr00 else 0,
-                     n, ncoly, byrow = TRUE)
+        c(namesof(mynames1, .lmunb  , earg = .emunb  , tag = FALSE),
+          namesof(mynames2, .lsize  , earg = .esize  , tag = FALSE),
+          namesof(mynames3, .lonempobs0 , earg = .eonempobs0 ,
+                  tag = FALSE))[
+          interleave.VGAM(Musual*NOS, M = Musual)]
 
 
-      for (spp. in 1:ncoly) {
-        yvec <- y[, spp.]
+    extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+    extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-        Phi.init <- 1 - 0.85 * sum(w[yvec > 0]) / sum(w)
-        Phi.init[Phi.init <= 0.02] <- 0.02 # Last resort
-        Phi.init[Phi.init >= 0.98] <- 0.98 # Last resort
 
-        if ( length(mustart)) {
-          mustart <- matrix(mustart, n, ncoly) # Make sure right size
-          Lambda.init <- mustart / (1 - Phi.init)
-        } else if ( .imethod == 2) {
-          mymean <- weighted.mean(yvec[yvec > 0],
-                                     w[yvec > 0]) + 1/16
-          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * mymean
+    if (!length(etastart)) {
+      mu.init <- y
+      for (iii in 1:ncol(y)) {
+        index.posy <- (y[, iii] > 0)
+        if ( .imethod == 1) {
+          use.this <- weighted.mean(y[index.posy, iii],
+                                    w[index.posy, iii])
+          mu.init[ index.posy, iii] <- (1 - .sinit ) * y[index.posy, iii] +
+                                            .sinit   * use.this
+          mu.init[!index.posy, iii] <- use.this
         } else {
-          use.this <- median(yvec[yvec > 0]) + 1 / 16
-          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * use.this
+          use.this <-
+          mu.init[, iii] <- (y[, iii] +
+            weighted.mean(y[index.posy, iii],
+                          w[index.posy, iii])) / 2
         }
+        max.use.this <-  7 * use.this + 10
+        vecTF <- (mu.init[, iii] > max.use.this)
+        if (any(vecTF))
+          mu.init[vecTF, iii] <- max.use.this
+      }
 
-        zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
-          sum(c(w) * dzipois(x = y, pstr0 = phival,
-                          lambda = extraargs$lambda,
-                          log = TRUE))
+      pnb0 <- matrix(if (length( .ionempobs0 )) 1 - .ionempobs0 else -1,
+                     nrow = n, ncol = NOS, byrow = TRUE)
+      for (spp. in 1:NOS) {
+        if (any(pnb0[, spp.] < 0)) {
+          index.y0 <- y[, spp.] < 0.5
+          pnb0[, spp.] <- max(min(sum(index.y0) / n, 0.97), 0.03)
         }
-        phi.grid <- seq(0.02, 0.98, len = 21)
-        Phimat.init <- getMaxMin(phi.grid,
-                                 objfun = zipois.Loglikfun,
-                                 y = y, x = x, w = w,
-                                 extraargs = list(lambda = Lambda.init))
+      }
 
-        if (length(mustart)) {
-          Lambda.init <- Lambda.init / (1 - Phimat.init)
+
+      if ( is.Numeric( .isize )) {
+        kmat0 <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
+      } else {
+        posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
+         munb <- extraargs
+         sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
+                               log = TRUE))
+        }
+        k.grid <- 2^((-6):6)
+        kmat0 <- matrix(0, nrow = n, ncol = NOS) 
+        for (spp. in 1:NOS) {
+          index.posy <- (y[, spp.] > 0)
+          posy <- y[index.posy, spp.]
+          kmat0[, spp.] <-
+            getMaxMin(k.grid,
+                      objfun = posnegbinomial.Loglikfun,
+                      y = posy, x = x[index.posy, ],
+                      w = w[index.posy, spp.],
+                      extraargs = mu.init[index.posy, spp.])
         }
+      }
 
-        if (!length( .ipstr00 ))
-          matP[, spp.] <- Phimat.init
-        if (!length( .ilambda ))
-          matL[, spp.] <- Lambda.init
-      } # spp.
+      etastart <-
+        cbind(theta2eta(mu.init , .lmunb      , earg = .emunb      ),
+              theta2eta(kmat0   , .lsize      , earg = .esize      ),
+              theta2eta(1 - pnb0, .lonempobs0 , earg = .eonempobs0 ))
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+    }  # End of if (!length(etastart))
 
-      etastart <- cbind(theta2eta(matP, .lpstr00, earg = .epstr00 ),
-                        theta2eta(matL, .llambda, earg = .elambda ))[,
-                        interleave.VGAM(M, M = Musual)]
-      mustart <- NULL  # Since etastart has been computed.
-    } # End of !length(etastart)
-  }), list( .lpstr00 = lpstr00, .llambda = llambda,
-            .epstr00 = epstr00, .elambda = elambda,
-            .ipstr00 = ipstr00, .ilambda = ilambda,
-            .imethod = imethod, .sinit = shrinkage.init ))),
+
+  }), list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
+            .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
+            .ionempobs0 = ionempobs0,                 .isize = isize,
+            .imethod = imethod, .sinit = shrinkage.init,
+            .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
-    lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
-    (1 - phimat) * lambda
-  }, list( .lpstr00 = lpstr00, .llambda = llambda,
-           .epstr00 = epstr00, .elambda = elambda ))),
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+
+    Musual <- 3
+    NOS <- extra$NOS
+    munb <- eta2theta(eta[, Musual*(1:NOS)-2], .lmunb  , earg = .emunb  )
+    kmat <- eta2theta(eta[, Musual*(1:NOS)-1], .lsize  , earg = .esize  )
+    onempobs0 <- eta2theta(eta[, Musual*(1:NOS)  ], .lonempobs0 ,
+                           earg = .eonempobs0 )
+    pnb0 <- (kmat / (kmat + munb))^kmat  # p(0) from negative binomial
+
+
+    ans <- switch(type.fitted,
+                  "mean"      =    (onempobs0) * munb / (1 - pnb0),
+                  "pobs0"     = 1 - onempobs0,  # P(Y=0)
+                  "onempobs0" =     onempobs0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempobs0 = lonempobs0, .lsize = lsize, .lmunb = lmunb,
+           .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
   last = eval(substitute(expression({
-    Musual <- extra$Musual
     misc$link <-
-      c(rep( .lpstr00 , length = ncoly),
-        rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
-    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+      c(rep( .lmunb      , length = NOS),
+        rep( .lsize      , length = NOS),
+        rep( .lonempobs0 , length = NOS))[
+        interleave.VGAM(Musual*NOS, M = Musual)]
+    temp.names <- c(mynames1,
+                    mynames2,
+                    mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
     names(misc$link) <- temp.names
 
-    misc$earg <- vector("list", M)
+    misc$earg <- vector("list", Musual*NOS)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
-      misc$earg[[Musual*ii-1]] <- .epstr00
-      misc$earg[[Musual*ii  ]] <- .elambda
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-2]] <- .emunb
+      misc$earg[[Musual*ii-1]] <- .esize
+      misc$earg[[Musual*ii  ]] <- .eonempobs0
     }
 
-    misc$Musual <- Musual
+    misc$nsimEIM <- .nsimEIM
     misc$imethod <- .imethod
-    misc$expected <- TRUE
+    misc$ionempobs0  <- .ionempobs0
+    misc$isize <- .isize
     misc$multipleResponses <- TRUE
-
-      misc$pobs0 <- phimat + (1 - phimat) * exp(-lambda)  # P(Y=0)
-      if (length(dimnames(y)[[2]]) > 0)
-        dimnames(misc$pobs0) <- dimnames(y)
-
-      misc$pstr0 <- phimat
-      if (length(dimnames(y)[[2]]) > 0)
-        dimnames(misc$pstr0) <- dimnames(y)
-  }), list( .lpstr00 = lpstr00, .llambda = llambda,
-            .epstr00 = epstr00, .elambda = elambda,
+  }), list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
+            .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
+            .ionempobs0 = ionempobs0, .isize = isize,
+            .nsimEIM = nsimEIM,
             .imethod = imethod ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
-    lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+    NOS <- extra$NOS
+    Musual <- 3
+    munb <- eta2theta(eta[, Musual*(1:NOS)-2], .lmunb  , earg = .emunb  )
+    kmat <- eta2theta(eta[, Musual*(1:NOS)-1], .lsize  , earg = .esize  )
+    onempobs0 <- eta2theta(eta[, Musual*(1:NOS)  ], .lonempobs0 ,
+                           earg = .eonempobs0 )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-      sum(c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
-                         log = TRUE))
+      sum(c(w) * dzanegbin(x = y, pobs0 = 1 - onempobs0,
+                           munb = munb, size = kmat,
+                           log = TRUE))
     }
-  }, list( .lpstr00 = lpstr00, .llambda = llambda,
-           .epstr00 = epstr00, .elambda = elambda ))),
-  vfamily = c("zipoisson"),
+  }, list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
+           .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
+  vfamily = c("zanegbinomialff"),
   deriv = eval(substitute(expression({
-    Musual <- 2
-    phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
-                        earg = .epstr00 )
-    lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
-                        earg = .elambda )
+    Musual <- 3
+    NOS <- extra$NOS
+    y0 <- extra$y0
 
-    prob0 <- exp(-lambda)
-    pobs0 <- phimat + (1 - phimat) * prob0
-    index0 <- as.matrix(y == 0)
+    munb      <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                           .lmunb      , earg = .emunb )
+    kmat      <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                           .lsize      , earg = .esize )
+    onempobs0 <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                           .lonempobs0 , earg = .eonempobs0 )
+    skip <- extra$skip.these
+    phi0 <- 1 - onempobs0
 
-    dl.dphimat <- -expm1(-lambda) / pobs0
-    dl.dphimat[!index0] <- -1 / (1 - phimat[!index0])
+    dmunb.deta      <- dtheta.deta(munb, .lmunb  , earg = .emunb  )
+    dsize.deta      <- dtheta.deta(kmat, .lsize  , earg = .esize  )
+    donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
+                                   earg = .eonempobs0 )
 
-    dl.dlambda <- -(1 - phimat) * exp(-lambda) / pobs0
-    dl.dlambda[!index0] <- (y[!index0] - lambda[!index0]) / lambda[!index0]
 
-    dphimat.deta <- dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
-    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+    tempk <- kmat / (kmat + munb)
+    tempm <- munb / (kmat + munb)
+    prob0  <- tempk^kmat
+    oneminusf0  <- 1 - prob0
+    df0.dmunb   <- -tempk * prob0
+    df0.dkmat   <- prob0 * (tempm + log(tempk))
 
-    ans <- c(w) * cbind(dl.dphimat * dphimat.deta,
-                        dl.dlambda * dlambda.deta)
-    ans <- ans[, interleave.VGAM(M, M = Musual)]
 
+    dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
+                df0.dmunb / oneminusf0
+    dl.dsize <- digamma(y + kmat) - digamma(kmat) -
+                (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+                df0.dkmat / oneminusf0
+    dl.donempobs0 <- +1 / (onempobs0)
 
-    if ( .llambda == "loge" && is.empty.list( .elambda ) &&
-       any(lambda[!index0] < .Machine$double.eps)) {
-      for(spp. in 1:(M / Musual)) {
-        ans[!index0[, spp.], Musual * spp.] <-
-          w[!index0[, spp.]] *
-         (y[!index0[, spp.], spp.] - lambda[!index0[, spp.], spp.])
-      }
+
+
+    dl.donempobs0[y == 0] <-
+      -1 / (1 - onempobs0[y == 0])  # Do it in 1 line
+    skip <- extra$skip.these
+    for (spp. in 1:NOS) {
+      dl.dsize[skip[, spp.], spp.] <-
+      dl.dmunb[skip[, spp.], spp.] <- 0
     }
 
-    ans
-  }), list( .lpstr00 = lpstr00, .llambda = llambda,
-            .epstr00 = epstr00, .elambda = elambda ))),
-  weight = eval(substitute(expression({
-    wz <- matrix(0.0, nrow = n, ncol = M + M-1)
+    dl.deta12 <- c(w) * cbind(dl.dmunb * dmunb.deta,
+                              dl.dsize * dsize.deta)
 
-    ned2l.dphimat2 <- -expm1(-lambda) / ((1 - phimat) * pobs0)
-    ned2l.dphimatlambda <- -exp(-lambda) / pobs0
-    ned2l.dlambda2 <- (1 - phimat) / lambda -
-                      phimat * (1 - phimat) * exp(-lambda) / pobs0
 
+    muphi0 <- onempobs0  # Originally: phi0
+    dl.deta3 <- if (FALSE &&
+                    .lonempobs0 == "logit") {
+    } else {
 
+      c(w) * donempobs0.deta * dl.donempobs0
+    }
+    ans <- cbind(dl.deta12, dl.deta3)
+    ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+    ans
+  }), list( .lonempobs0 = lonempobs0 , .lmunb = lmunb , .lsize = lsize ,
+            .eonempobs0 = eonempobs0 , .emunb = emunb , .esize = esize  ))),
 
+  weight = eval(substitute(expression({
 
-    wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
-                  c(w) * ned2l.dlambda2 * dlambda.deta^2,
-                  c(w) * ned2l.dphimatlambda * dphimat.deta * dlambda.deta),
-                dim = c(n, M / Musual, 3))
-    wz <- arwz2wz(wz, M = M, Musual = Musual)
+    six <- dimm(Musual)
+    wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
+    Musualm1 <- Musual - 1
 
 
 
 
 
-    wz
-  }), list( .llambda = llambda, .elambda = elambda ))))
-}  # zipoisson
-
+    ind2 <- iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
 
 
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rzanegbin(n = n*NOS, pobs0 = phi0,
+                        size = kmat, mu = munb)
+      dim(ysim) <- c(n, NOS)
 
 
+      dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
+                  df0.dmunb / oneminusf0
+      dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
+                  (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
+                  df0.dkmat / oneminusf0
+      dl.donempobs0 <- +1 / (onempobs0)
 
 
 
+      dl.donempobs0[ysim == 0] <-
+        -1 / (1 - onempobs0[ysim == 0])  # Do it in 1 line
+      ysim0 <- ifelse(ysim == 0, 1, 0)
+      skip.sim <- matrix(as.logical(ysim0), n, NOS)
+      for (spp. in 1:NOS) {
+        dl.dsize[skip.sim[, spp.], spp.] <-
+        dl.dmunb[skip.sim[, spp.], spp.] <- 0
+      }
 
- zibinomial <- function(lpstr0 = "logit", lprob = "logit",
-                        ipstr0 = NULL,
-                        zero = 1, mv = FALSE, imethod = 1) {
-  if (as.logical(mv))
-    stop("argument 'mv' must be FALSE")
 
-  lpstr0 <- as.list(substitute(lpstr0))
-  epstr0 <- link2list(lpstr0)
-  lpstr0 <- attr(epstr0, "function.name")
+      for (kk in 1:NOS) {
+        temp2 <- cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
+                       dl.dsize[, kk] * dsize.deta[, kk])
+        small.varcov <- temp2[, ind2$row.index] *
+                        temp2[, ind2$col.index]
 
-  lprob <- as.list(substitute(lprob))
-  eprob <- link2list(lprob)
-  lprob <- attr(eprob, "function.name")
 
+        run.varcov[, ((kk-1)*Musual+2-1):(kk*Musual-1)] <-
+        run.varcov[, ((kk-1)*Musual+2-1):(kk*Musual-1)] +
+          c(small.varcov[, 1:Musualm1])
+        run.varcov[, M + (kk-1)*Musual + 2-1] <-
+        run.varcov[, M + (kk-1)*Musual + 2-1] +
+          c(small.varcov[, Musualm1 + 1])
+      }  # kk; end of NOS
+    }  # ii; end of nsimEIM
 
-  if (is.Numeric(ipstr0))
-    if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
-      stop("'ipstr0' values must be inside the interval (0,1)")
-  if (!is.Numeric(imethod, allowable.length = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
 
+    run.varcov <- cbind(run.varcov / .nsimEIM )
+    run.varcov <- if (intercept.only)
+      matrix(colMeans(run.varcov),
+             n, ncol(run.varcov), byrow = TRUE) else run.varcov
 
 
-  new("vglmff",
-  blurb = c("Zero-inflated binomial\n\n",
-            "Links:    ",
-            namesof("pstr0", lpstr0, earg = epstr0), ", ",
-            namesof("prob" , lprob , earg = eprob ), "\n",
-            "Mean:     (1 - pstr0) * prob"),
-  constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
-  }), list( .zero = zero ))),
-  initialize = eval(substitute(expression({
-    if (!all(w == 1))
-      extra$orig.w <- w
 
+    wzind1 <- sort(c(    Musual*(1:NOS) - 1 - 1,
+                         Musual*(1:NOS) - 0 - 1,
+                     M + Musual*(1:NOS) - 1 - 1))
+    wz[, wzind1] <- c(w) * run.varcov[, wzind1]
 
-    {
-        NCOL <- function (x)
-          if (is.array(x) && length(dim(x)) > 1 ||
-          is.data.frame(x)) ncol(x) else as.integer(1)
-
-        if (NCOL(y) == 1) {
-            if (is.factor(y)) y <- y != levels(y)[1]
-            nn <- rep(1, n)
-            if (!all(y >= 0 & y <= 1))
-                stop("response values must be in [0, 1]")
-            if (!length(mustart) && !length(etastart))
-                mustart <- (0.5 + w * y) / (1.0 + w)
-
-
-            no.successes <- y
-            if (min(y) < 0)
-              stop("Negative data not allowed!")
-            if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
-              stop("Number of successes must be integer-valued")
-
-        } else if (NCOL(y) == 2) {
-            if (min(y) < 0)
-              stop("Negative data not allowed!")
-            if (any(abs(y - round(y)) > 1.0e-8))
-              stop("Count data must be integer-valued")
-            y <- round(y)
-            nvec <- y[, 1] + y[, 2]
-            y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
-            w <- w * nvec
-            if (!length(mustart) && !length(etastart))
-              mustart <- (0.5 + nvec * y) / (1 + nvec)
-        } else {
-            stop("for the binomialff family, response 'y' must be a ",
-                 "vector of 0 and 1's\n",
-                 "or a factor ",
-                 "(first level = fail, other levels = success),\n",
-                 "or a 2-column matrix where col 1 is the no. of ",
-                 "successes and col 2 is the no. of failures")
-        }
 
+    tmp100 <- muphi0 * (1 - muphi0)
+    tmp200 <- if (FALSE &&
+                  .lpobs0 == "logit") {
+    } else {
+      c(w) * cbind(donempobs0.deta^2 / tmp100)
     }
-
-    if ( .imethod == 1)
-      mustart <- (mustart + y) / 2
+    for (ii in 1:NOS) {
+      index200 <- abs(tmp200[, ii]) < .Machine$double.eps
+      if (any(index200)) {
+        tmp200[index200, ii] <- .Machine$double.eps  # Diagonal 0's are bad 
+      }
+    }
+    wz[, Musual*(1:NOS)  ] <- tmp200
 
 
 
+    wz
+  }), list( .lonempobs0 = lonempobs0,
+            .eonempobs0 = eonempobs0,
+            .nsimEIM = nsimEIM ))))
+}  # End of zanegbinomialff()
 
 
 
-    predictors.names <-
-        c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
-          namesof("prob" , .lprob  , earg = .eprob  , tag = FALSE))
 
 
-    phi.init <- if (length( .ipstr0 )) .ipstr0 else {
-        prob0.est <- sum(w[y == 0]) / sum(w)
-        if ( .imethod == 1) {
-          (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
-        } else {
-          prob0.est
-        }
-    }
+ if (FALSE)
+rposnegbin <- function(n, munb, size) {
+  if (!is.Numeric(size, positive = TRUE))
+    stop("argument 'size' must be positive")
+  if (!is.Numeric(munb, positive = TRUE))
+    stop("argument 'munb' must be positive")
+  if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
+                  length.arg = 1))
+    stop("argument 'n' must be a positive integer")
+  ans <- rnbinom(n = n, mu = munb, size = size)
+  munb <- rep(munb, length = n)
+  size <- rep(size, length = n)
+  index <- ans == 0
+  while(any(index)) {
+    more <- rnbinom(n = sum(index), mu = munb[index], size = size[index])
+    ans[index] <- more
+    index <- ans == 0
+  }
+  ans
+}
 
-    phi.init[phi.init <= -0.10] <- 0.10 # Lots of sample variation
-    phi.init[phi.init <=  0.05] <- 0.15 # Last resort
-    phi.init[phi.init >=  0.80] <- 0.80 # Last resort
+ if (FALSE)
+dposnegbin <- function(x, munb, size, log = FALSE) {
+  if (!is.Numeric(size, positive = TRUE))
+    stop("argument 'size' must be positive")
+  if (!is.Numeric(munb, positive = TRUE))
+    stop("argument 'munb' must be positive")
+  ans <- dnbinom(x = x, mu = munb, size = size, log=log)
+  ans0 <- dnbinom(x=0, mu = munb, size = size, log = FALSE)
+  ans <- if (log) ans - log1p(-ans0) else ans/(1-ans0)
+  ans[x == 0] <- if (log) -Inf else 0
+  ans
+}
 
-    if ( length(mustart) && !length(etastart))
-      mustart <- cbind(rep(phi.init, len = n),
-                       mustart) # 1st coln not a real mu
-  }), list( .lpstr0 = lpstr0, .lprob = lprob,
-            .epstr0 = epstr0, .eprob = eprob,
-            .ipstr0 = ipstr0,
-            .imethod = imethod ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
-    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
-    (1 - pstr0) * mubin
-  }, list( .lpstr0 = lpstr0, .lprob = lprob,
-           .epstr0 = epstr0, .eprob = eprob ))),
-  last = eval(substitute(expression({
-    misc$link <-    c("pstr0" = .lpstr0 , "prob" = .lprob )
 
-    misc$earg <- list("pstr0" = .epstr0 , "prob" = .eprob )
 
-    misc$imethod <- .imethod
 
 
-      misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w  # [1]  # P(Y=0)
-      misc$pstr0 <- phi
-  }), list( .lpstr0 = lpstr0, .lprob = lprob,
-            .epstr0 = epstr0, .eprob = eprob,
-            .imethod = imethod ))),
-  linkfun = eval(substitute(function(mu, extra = NULL) {
-    cbind(theta2eta(mu[, 1], .lpstr0 , earg = .epstr0 ),
-          theta2eta(mu[, 2], .lprob  , earg = .eprob  ))
-  }, list( .lpstr0 = lpstr0, .lprob = lprob,
-           .epstr0 = epstr0, .eprob = eprob ))),
-  loglikelihood = eval(substitute( 
-    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
-    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
-    if (residuals) stop("loglikelihood residuals not ",
-                        "implemented yet") else {
-      sum(dzibinom(x = round(w * y), size = w, prob = mubin,
-                   log = TRUE, pstr0 = pstr0))
-    }
-  }, list( .lpstr0 = lpstr0, .lprob = lprob,
-           .epstr0 = epstr0, .eprob = eprob ))),
-  vfamily = c("zibinomial"),
-  deriv = eval(substitute(expression({
-    phi   <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
-    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
 
-    prob0 <- (1 - mubin)^w # Actually q^w
-    pobs0 <- phi + (1 - phi) * prob0
-    index <- (y == 0)
-    dl.dphi <- (1 - prob0) / pobs0
-    dl.dphi[!index] <- -1 / (1 - phi[!index])
 
-    dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / pobs0
-    dl.dmubin[!index] <- w[!index] *
-        (    y[!index]  /      mubin[!index]   -
-        (1 - y[!index]) / (1 - mubin[!index]))
 
-    dphi.deta   <- dtheta.deta(phi,   .lpstr0 , earg = .epstr0 )
-    dmubin.deta <- dtheta.deta(mubin, .lprob  , earg = .eprob  )
 
-    ans <- cbind(dl.dphi   * dphi.deta,
-                 dl.dmubin * dmubin.deta)
 
-      if ( .lprob == "logit") {
-        ans[!index, 2] <- w[!index] * (y[!index] - mubin[!index])
-      }
 
-      ans
-  }), list( .lpstr0 = lpstr0, .lprob = lprob,
-            .epstr0 = epstr0, .eprob = eprob ))),
-  weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ zipoisson <- function(lpstr0 = "logit", llambda = "loge",
+                       type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+                       ipstr0 = NULL,    ilambda = NULL,
+                       imethod = 1,
+                       shrinkage.init = 0.8, zero = NULL) {
+  ipstr00 <- ipstr0
 
 
+  lpstr0 <- as.list(substitute(lpstr0))
+  epstr00 <- link2list(lpstr0)
+  lpstr00 <- attr(epstr00, "function.name")
 
-    ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * pobs0)
+  llambda <- as.list(substitute(llambda))
+  elambda <- link2list(llambda)
+  llambda <- attr(elambda, "function.name")
 
 
-    ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / pobs0
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
 
+  if (length(ipstr00))
+    if (!is.Numeric(ipstr00, positive = TRUE) ||
+        any(ipstr00 >= 1))
+      stop("argument 'ipstr0' values must be inside the interval (0,1)")
+  if (length(ilambda))
+    if (!is.Numeric(ilambda, positive = TRUE))
+      stop("argument 'ilambda' values must be positive")
 
 
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
 
-    ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
-                     (1 - mubin - w * mubin * (1 - mubin)^w * phi / pobs0)
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
+     shrinkage.init < 0 ||
+     shrinkage.init > 1)
+    stop("bad input for argument 'shrinkage.init'")
 
 
+  new("vglmff",
+  blurb = c("Zero-inflated Poisson\n\n",
+            "Links:    ",
+            namesof("pstr0",  lpstr00, earg = epstr00 ), ", ",
+            namesof("lambda", llambda, earg = elambda ), "\n",
+            "Mean:     (1 - pstr0) * lambda"),
 
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    Musual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero ))),
 
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+  initialize = eval(substitute(expression({
 
-    wz[,iam(1, 1, M)] <- ned2l.dphi2     * dphi.deta^2
-    wz[,iam(2, 2, M)] <- ned2l.dmubin2   * dmubin.deta^2
-    wz[,iam(1, 2, M)] <- ned2l.dphimubin * dphi.deta * dmubin.deta
-    if (TRUE) {
-      ind6 <- (wz[, iam(2, 2, M)] < .Machine$double.eps)
-      if (any(ind6))
-        wz[ind6, iam(2, 2, M)] <- .Machine$double.eps
-    }
-    wz
-  }), list( .lpstr0 = lpstr0, .lprob = lprob,
-            .epstr0 = epstr0, .eprob = eprob ))))
-}
+    temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
 
 
+    ncoly <- ncol(y)
+    Musual <- 2
+    extra$ncoly <- ncoly
+    extra$Musual <- Musual
+    extra$dimnamesy <- dimnames(y)
+    M <- Musual * ncoly
+    extra$type.fitted      <- .type.fitted
 
 
+    if (any(round(y) != y))
+      stop("integer-valued responses only allowed for ",
+           "the 'zipoisson' family")
 
+    mynames1 <- paste("pstr0",   if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("lambda",  if (ncoly > 1) 1:ncoly else "", sep = "")
+    predictors.names <-
+        c(namesof(mynames1, .lpstr00 , earg = .epstr00 , tag = FALSE),
+          namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
+          interleave.VGAM(M, M = Musual)]
 
 
 
+    if (!length(etastart)) {
 
-dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+      matL <- matrix(if (length( .ilambda )) .ilambda else 0,
+                     n, ncoly, byrow = TRUE)
+      matP <- matrix(if (length( .ipstr00 )) .ipstr00 else 0,
+                     n, ncoly, byrow = TRUE)
 
-  LLL <- max(length(x), length(size), length(prob), length(pstr0))
-  if (length(x)     != LLL) x     <- rep(x,     len = LLL);
-  if (length(size)  != LLL) size  <- rep(size,  len = LLL);
-  if (length(prob)  != LLL) prob  <- rep(prob,  len = LLL);
-  if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
 
-  ans <- dbinom(x = x, size = size, prob = prob, log = TRUE)
+      for (spp. in 1:ncoly) {
+        yvec <- y[, spp.]
 
+        Phi.init <- 1 - 0.85 * sum(w[yvec > 0]) / sum(w)
+        Phi.init[Phi.init <= 0.02] <- 0.02 # Last resort
+        Phi.init[Phi.init >= 0.98] <- 0.98 # Last resort
 
-  ans <- if (log.arg) {
-    ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
-  } else {
-    ifelse(x == 0,     pstr0 + (1-pstr0) * exp(ans) ,
-                    (1-pstr0) * exp(ans))
-  }
+        if ( length(mustart)) {
+          mustart <- matrix(mustart, n, ncoly)  # Make sure right size
+          Lambda.init <- mustart / (1 - Phi.init)
+        } else if ( .imethod == 2) {
+          mymean <- weighted.mean(yvec[yvec > 0],
+                                     w[yvec > 0]) + 1/16
+          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * mymean
+        } else {
+          use.this <- median(yvec[yvec > 0]) + 1 / 16
+          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * use.this
+        }
 
+        zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
+          sum(c(w) * dzipois(x = y, pstr0 = phival,
+                          lambda = extraargs$lambda,
+                          log = TRUE))
+        }
+        phi.grid <- seq(0.02, 0.98, len = 21)
+        Phimat.init <- getMaxMin(phi.grid,
+                                 objfun = zipois.Loglikfun,
+                                 y = y, x = x, w = w,
+                                 extraargs = list(lambda = Lambda.init))
 
-  prob0 <- (1 - prob)^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+        if (length(mustart)) {
+          Lambda.init <- Lambda.init / (1 - Phimat.init)
+        }
 
+        if (!length( .ipstr00 ))
+          matP[, spp.] <- Phimat.init
+        if (!length( .ilambda ))
+          matL[, spp.] <- Lambda.init
+      } # spp.
 
-  ans
-}
+      etastart <- cbind(theta2eta(matP, .lpstr00, earg = .epstr00 ),
+                        theta2eta(matL, .llambda, earg = .elambda ))[,
+                        interleave.VGAM(M, M = Musual)]
+      mustart <- NULL  # Since etastart has been computed.
+    } # End of !length(etastart)
+  }), list( .lpstr00 = lpstr00, .llambda = llambda,
+            .epstr00 = epstr00, .elambda = elambda,
+            .ipstr00 = ipstr00, .ilambda = ilambda,
+            .imethod = imethod,
+            .type.fitted = type.fitted,
+            .sinit = shrinkage.init ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
 
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
-pzibinom <- function(q, size, prob, pstr0 = 0,
-                    lower.tail = TRUE, log.p = FALSE) {
+    phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+    lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
 
-  LLL <- max(length(pstr0), length(size), length(prob), length(q))
-  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
-  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
-  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
-  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+    
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - phimat) * lambda,
+                  "pobs0"     = phimat + (1-phimat)*exp(-lambda),  # P(Y=0)
+                  "pstr0"     =     phimat,
+                  "onempstr0" = 1 - phimat)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lpstr00 = lpstr00, .llambda = llambda,
+           .epstr00 = epstr00, .elambda = elambda,
+           .type.fitted = type.fitted
+         ))),
+  last = eval(substitute(expression({
+    Musual <- extra$Musual
+    misc$link <-
+      c(rep( .lpstr00 , length = ncoly),
+        rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+    names(misc$link) <- temp.names
 
-  ans <- pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
-  ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[Musual*ii-1]] <- .epstr00
+      misc$earg[[Musual*ii  ]] <- .elambda
+    }
 
+    misc$Musual <- Musual
+    misc$imethod <- .imethod
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
 
-  prob0 <- (1 - prob)^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+      misc$pobs0 <- phimat + (1 - phimat) * exp(-lambda)  # P(Y=0)
+      if (length(dimnames(y)[[2]]) > 0)
+        dimnames(misc$pobs0) <- dimnames(y)
 
-  ans
-}
+      misc$pstr0 <- phimat
+      if (length(dimnames(y)[[2]]) > 0)
+        dimnames(misc$pstr0) <- dimnames(y)
+  }), list( .lpstr00 = lpstr00, .llambda = llambda,
+            .epstr00 = epstr00, .elambda = elambda,
+            .imethod = imethod ))),
+  loglikelihood = eval(substitute( 
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+    lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
+                         log = TRUE))
+    }
+  }, list( .lpstr00 = lpstr00, .llambda = llambda,
+           .epstr00 = epstr00, .elambda = elambda ))),
+  vfamily = c("zipoisson"),
+  deriv = eval(substitute(expression({
+    Musual <- 2
+    phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
+                        earg = .epstr00 )
+    lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+                        earg = .elambda )
 
+    prob0 <- exp(-lambda)
+    pobs0 <- phimat + (1 - phimat) * prob0
+    index0 <- as.matrix(y == 0)
 
-qzibinom <- function(p, size, prob, pstr0 = 0,
-                    lower.tail = TRUE, log.p = FALSE) {
-  LLL <- max(length(p), length(size), length(prob), length(pstr0))
-  p     <- rep(p,     length = LLL)
-  size  <- rep(size,  length = LLL)
-  prob  <- rep(prob,  length = LLL)
-  pstr0 <- rep(pstr0, length = LLL)
+    dl.dphimat <- -expm1(-lambda) / pobs0
+    dl.dphimat[!index0] <- -1 / (1 - phimat[!index0])
 
+    dl.dlambda <- -(1 - phimat) * exp(-lambda) / pobs0
+    dl.dlambda[!index0] <- (y[!index0] - lambda[!index0]) / lambda[!index0]
 
-  ans <- p 
-  ans[p <= pstr0] <- 0 
-  ans[p >  pstr0] <-
-    qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
-           size[p > pstr0],
-           prob[p > pstr0],
-           lower.tail = lower.tail, log.p = log.p)
+    dphimat.deta <- dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
 
+    ans <- c(w) * cbind(dl.dphimat * dphimat.deta,
+                        dl.dlambda * dlambda.deta)
+    ans <- ans[, interleave.VGAM(M, M = Musual)]
 
 
-  prob0 <- (1 - prob)^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
-  if (any(ind0)) {
-    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[p[ind0] <= pobs0] <- 0 
-    pindex <- (1:LLL)[ind0 & (p > pobs0)]
-    Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
-    ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
-                             size = size[pindex],
-                             prob = prob[pindex])
-  }
+    if ( .llambda == "loge" && is.empty.list( .elambda ) &&
+       any(lambda[!index0] < .Machine$double.eps)) {
+      for (spp. in 1:(M / Musual)) {
+        ans[!index0[, spp.], Musual * spp.] <-
+          w[!index0[, spp.]] *
+         (y[!index0[, spp.], spp.] - lambda[!index0[, spp.], spp.])
+      }
+    }
 
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+    ans
+  }), list( .lpstr00 = lpstr00, .llambda = llambda,
+            .epstr00 = epstr00, .elambda = elambda ))),
+  weight = eval(substitute(expression({
+
+    ned2l.dphimat2 <- -expm1(-lambda) / ((1 - phimat) * pobs0)
+    ned2l.dphimatlambda <- -exp(-lambda) / pobs0
+    ned2l.dlambda2 <- (1 - phimat) / lambda -
+                      phimat * (1 - phimat) * exp(-lambda) / pobs0
 
 
-  ans
-}
 
 
-rzibinom <- function(n, size, prob, pstr0 = 0) {
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
-              stop("bad input for argument 'n'") else n
+    wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
+                  c(w) * ned2l.dlambda2 * dlambda.deta^2,
+                  c(w) * ned2l.dphimatlambda * dphimat.deta * dlambda.deta),
+                dim = c(n, M / Musual, 3))
+    wz <- arwz2wz(wz, M = M, Musual = Musual)
 
-  pstr0 <- rep(pstr0, len = use.n)
-  size  <- rep(size,  len = use.n)
-  prob  <- rep(prob,  len = use.n)
 
-  ans <- rbinom(use.n, size, prob)
-  ans[runif(use.n) < pstr0] <- 0
 
 
 
-  prob0 <- (1 - prob)^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
-  if (any(ind0)) {
-    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[ind0] <- rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
-    ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
-  }
+    wz
+  }), list( .llambda = llambda, .elambda = elambda ))))
+}  # zipoisson
+
+
+
 
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
 
-  ans
-}
 
 
 
 
+ zibinomial <-
+  function(lpstr0 = "logit", lprob = "logit",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ipstr0 = NULL,
+           zero = NULL,  # 20130917; was originally zero = 1,
+           mv = FALSE, imethod = 1) {
+  if (as.logical(mv))
+    stop("argument 'mv' must be FALSE")
 
+  lpstr0 <- as.list(substitute(lpstr0))
+  epstr0 <- link2list(lpstr0)
+  lpstr0 <- attr(epstr0, "function.name")
 
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
 
+  if (is.Numeric(ipstr0))
+    if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
+      stop("'ipstr0' values must be inside the interval (0,1)")
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
 
 
 
-dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
-                     log = FALSE) {
-  if (length(munb)) {
-    if (length(prob))
-      stop("arguments 'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
-  }
+  new("vglmff",
+  blurb = c("Zero-inflated binomial\n\n",
+            "Links:    ",
+            namesof("pstr0", lpstr0, earg = epstr0), ", ",
+            namesof("prob" , lprob , earg = eprob ), "\n",
+            "Mean:     (1 - pstr0) * prob"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+  }), list( .zero = zero ))),
 
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
 
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+      
+  initialize = eval(substitute(expression({
+    if (!all(w == 1))
+      extra$orig.w <- w
 
-  LLL <- max(length(pstr0), length(size), length(prob), length(x))
-  if (length(x)      != LLL) x      <- rep(x,      len = LLL);
-  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
-  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
-  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
 
 
-  ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
+    if (NCOL(y) == 1) {
+      if (is.factor(y))
+        y <- y != levels(y)[1]
+      nn <- rep(1, n)
+      if (!all(y >= 0 & y <= 1))
+        stop("response values must be in [0, 1]")
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + w * y) / (1.0 + w)
+
+
+      no.successes <- y
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+        stop("Number of successes must be integer-valued")
+
+    } else if (NCOL(y) == 2) {
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(y - round(y)) > 1.0e-8))
+        stop("Count data must be integer-valued")
+      y <- round(y)
+      nvec <- y[, 1] + y[, 2]
+      y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+      w <- w * nvec
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + nvec * y) / (1 + nvec)
+    } else {
+      stop("for the binomialff family, response 'y' must be a ",
+           "vector of 0 and 1's\n",
+           "or a factor ",
+           "(first level = fail, other levels = success),\n",
+           "or a 2-column matrix where col 1 is the no. of ",
+           "successes and col 2 is the no. of failures")
+    }
+
+
+    if ( .imethod == 1)
+      mustart <- (mustart + y) / 2
+
+
+    extra$type.fitted <- .type.fitted
+    extra$dimnamesy   <- dimnames(y)
 
-  ans <- if (log.arg)
-    ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
-    ifelse(x == 0,     pstr0+(1-pstr0)*    ans,       (1-pstr0) * ans)
 
 
 
-  prob0 <- prob^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+    predictors.names <-
+        c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
+          namesof("prob" , .lprob  , earg = .eprob  , tag = FALSE))
+
+
+    extra$w <- w  # Needed for @linkinv
+    phi.init <- if (length( .ipstr0 )) .ipstr0 else {
+        prob0.est <- sum(w[y == 0]) / sum(w)
+        if ( .imethod == 1) {
+          (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+        } else {
+          prob0.est
+        }
+    }
+
+    phi.init[phi.init <= -0.10] <- 0.10  # Lots of sample variation
+    phi.init[phi.init <=  0.05] <- 0.15  # Last resort
+    phi.init[phi.init >=  0.80] <- 0.80  # Last resort
+
+    if ( length(mustart) && !length(etastart))
+      mustart <- cbind(rep(phi.init, len = n),
+                       mustart)  # 1st coln not a real mu
+  }), list( .lpstr0 = lpstr0, .lprob = lprob,
+            .epstr0 = epstr0, .eprob = eprob,
+            .ipstr0 = ipstr0,
+            .type.fitted = type.fitted,          
+            .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
+
+
+    orig.w <- if (length(tmp3 <- extra$orig.w)) tmp3 else
+              rep(1, len = nrow(eta))
+    priorw <- extra$w
+    nvec <- priorw / orig.w
+
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - pstr0) * mubin,
+                  "pobs0"     = pstr0 + (1-pstr0)*(1-mubin)^nvec,  # P(Y=0)
+                  "pstr0"     =     pstr0,
+                  "onempstr0" = 1 - pstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lpstr0 = lpstr0, .lprob = lprob,
+           .epstr0 = epstr0, .eprob = eprob,
+           .type.fitted = type.fitted ))),
+  last = eval(substitute(expression({
+    misc$link <-    c("pstr0" = .lpstr0 , "prob" = .lprob )
+
+    misc$earg <- list("pstr0" = .epstr0 , "prob" = .eprob )
+
+    misc$imethod <- .imethod
+
+
+  }), list( .lpstr0 = lpstr0, .lprob = lprob,
+            .epstr0 = epstr0, .eprob = eprob,
+            .imethod = imethod ))),
+  linkfun = eval(substitute(function(mu, extra = NULL) {
+    cbind(theta2eta(mu[, 1], .lpstr0 , earg = .epstr0 ),
+          theta2eta(mu[, 2], .lprob  , earg = .eprob  ))
+  }, list( .lpstr0 = lpstr0, .lprob = lprob,
+           .epstr0 = epstr0, .eprob = eprob ))),
+  loglikelihood = eval(substitute( 
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(dzibinom(x = round(w * y), size = w, prob = mubin,
+                   log = TRUE, pstr0 = pstr0))
+    }
+  }, list( .lpstr0 = lpstr0, .lprob = lprob,
+           .epstr0 = epstr0, .eprob = eprob ))),
+  vfamily = c("zibinomial"),
+  deriv = eval(substitute(expression({
+    phi   <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+    mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
+
+    prob0 <- (1 - mubin)^w  # Actually q^w
+    pobs0 <- phi + (1 - phi) * prob0
+    index <- (y == 0)
+    dl.dphi <- (1 - prob0) / pobs0
+    dl.dphi[!index] <- -1 / (1 - phi[!index])
+
+    dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / pobs0
+    dl.dmubin[!index] <- w[!index] *
+        (    y[!index]  /      mubin[!index]   -
+        (1 - y[!index]) / (1 - mubin[!index]))
+
+    dphi.deta   <- dtheta.deta(phi,   .lpstr0 , earg = .epstr0 )
+    dmubin.deta <- dtheta.deta(mubin, .lprob  , earg = .eprob  )
+
+    ans <- cbind(dl.dphi   * dphi.deta,
+                 dl.dmubin * dmubin.deta)
+
+      if ( .lprob == "logit") {
+        ans[!index, 2] <- w[!index] * (y[!index] - mubin[!index])
+      }
+
+      ans
+  }), list( .lpstr0 = lpstr0, .lprob = lprob,
+            .epstr0 = epstr0, .eprob = eprob ))),
+  weight = eval(substitute(expression({
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+
+
+
+    ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * pobs0)
+
+
+    ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / pobs0
+
+
+
+
+
+
+    ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
+                     (1 - mubin - w * mubin *
+                     (1 - mubin)^w * phi / pobs0)
+
+
+
+
+
+    wz[,iam(1, 1, M)] <- ned2l.dphi2     * dphi.deta^2
+    wz[,iam(2, 2, M)] <- ned2l.dmubin2   * dmubin.deta^2
+    wz[,iam(1, 2, M)] <- ned2l.dphimubin * dphi.deta * dmubin.deta
+    if (TRUE) {
+      ind6 <- (wz[, iam(2, 2, M)] < .Machine$double.eps)
+      if (any(ind6))
+        wz[ind6, iam(2, 2, M)] <- .Machine$double.eps
+    }
+    wz
+  }), list( .lpstr0 = lpstr0, .lprob = lprob,
+            .epstr0 = epstr0, .eprob = eprob ))))
+}
+
+
+
+
+
+
+ zibinomialff <-
+  function(lprob = "logit", lonempstr0 = "logit",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ionempstr0 = NULL,
+           zero = 2,
+           mv = FALSE, imethod = 1) {
+
+
+
+
+
+
+  if (as.logical(mv))
+    stop("argument 'mv' must be FALSE")
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  lonempstr0 <- as.list(substitute(lonempstr0))
+  eonempstr0 <- link2list(lonempstr0)
+  lonempstr0 <- attr(eonempstr0, "function.name")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+
+  if (is.Numeric(ionempstr0))
+    if (!is.Numeric(ionempstr0, positive = TRUE) || any(ionempstr0 >= 1))
+      stop("'ionempstr0' values must be inside the interval (0,1)")
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
+
+
+
+  new("vglmff",
+  blurb = c("Zero-inflated binomial\n\n",
+            "Links:    ",
+            namesof("prob" ,     lprob     , earg = eprob     ), ", ",
+            namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
+            "Mean:     onempstr0 * prob"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+  }), list( .zero = zero ))),
+
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+      
+  initialize = eval(substitute(expression({
+    if (!all(w == 1))
+      extra$orig.w <- w
+
+
+
+    if (NCOL(y) == 1) {
+      if (is.factor(y))
+        y <- y != levels(y)[1]
+      nn <- rep(1, n)
+      if (!all(y >= 0 & y <= 1))
+        stop("response values must be in [0, 1]")
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + w * y) / (1.0 + w)
+
+
+      no.successes <- y
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+        stop("Number of successes must be integer-valued")
+
+    } else if (NCOL(y) == 2) {
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(y - round(y)) > 1.0e-8))
+        stop("Count data must be integer-valued")
+      y <- round(y)
+      nvec <- y[, 1] + y[, 2]
+      y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+      w <- w * nvec
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + nvec * y) / (1 + nvec)
+    } else {
+      stop("for the binomialff family, response 'y' must be a ",
+           "vector of 0 and 1's\n",
+           "or a factor ",
+           "(first level = fail, other levels = success),\n",
+           "or a 2-column matrix where col 1 is the no. of ",
+           "successes and col 2 is the no. of failures")
+    }
+
+
+    if ( .imethod == 1)
+      mustart <- (mustart + y) / 2
+
+
+    extra$type.fitted <- .type.fitted
+    extra$dimnamesy   <- dimnames(y)
+
+
+
+
+    predictors.names <-
+        c(namesof("prob"     , .lprob      , earg = .eprob      , tag = FALSE),
+          namesof("onempstr0", .lonempstr0 , earg = .eonempstr0 , tag = FALSE))
+
+
+    extra$w <- w  # Needed for @linkinv
+    onemphi.init <- if (length( .ionempstr0 )) .ionempstr0 else {
+        prob0.est <- sum(w[y == 0]) / sum(w)
+        if ( .imethod == 1) {
+          1 - (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+        } else {
+          1 - prob0.est
+        }
+    }
+
+    onemphi.init[onemphi.init <= -0.10] <- 0.10  # Lots of sample variation
+    onemphi.init[onemphi.init <=  0.05] <- 0.15  # Last resort
+    onemphi.init[onemphi.init >=  0.80] <- 0.80  # Last resort
+
+    if ( length(mustart) && !length(etastart))
+      mustart <- cbind(mustart,
+                       rep(onemphi.init, len = n))  # 1st coln not a real mu
+
+  }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+            .eonempstr0 = eonempstr0, .eprob = eprob,
+            .ionempstr0 = ionempstr0,
+            .type.fitted = type.fitted,          
+            .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    mubin     <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
+    onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+
+
+    orig.w <- if (length(tmp3 <- extra$orig.w)) tmp3 else
+              rep(1, len = nrow(eta))
+    priorw <- extra$w
+    nvec <- priorw / orig.w
+
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = (onempstr0) * mubin,
+                  "pobs0"     = 1 - onempstr0 + (onempstr0)*(1-mubin)^nvec,  # P(Y=0)
+                  "pstr0"     = 1 - onempstr0,
+                  "onempstr0" =     onempstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+           .eonempstr0 = eonempstr0, .eprob = eprob,
+           .type.fitted = type.fitted ))),
+  last = eval(substitute(expression({
+    misc$link <-    c("prob" = .lprob , "onempstr0" = .lonempstr0 )
+
+    misc$earg <- list("prob" = .eprob , "onempstr0" = .eonempstr0 )
+
+    misc$imethod <- .imethod
+
+
+      misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w  # [1]  # P(Y=0)
+      misc$pstr0 <- phi
+  }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+            .eonempstr0 = eonempstr0, .eprob = eprob,
+            .imethod = imethod ))),
+  linkfun = eval(substitute(function(mu, extra = NULL) {
+    cbind(theta2eta(mu[, 1], .lprob      , earg = .eprob      ),
+          theta2eta(mu[, 2], .lonempstr0 , earg = .eonempstr0 ))
+  }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+           .eonempstr0 = eonempstr0, .eprob = eprob ))),
+  loglikelihood = eval(substitute( 
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    mubin     <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
+    onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(dzibinom(x = round(w * y), size = w, prob = mubin,
+                   log = TRUE, pstr0 = 1 - onempstr0))
+    }
+  }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+           .eonempstr0 = eonempstr0, .eprob = eprob ))),
+  vfamily = c("zibinomialff"),
+  deriv = eval(substitute(expression({
+    mubin     <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
+    onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+    omphi     <-     onempstr0
+    phi       <- 1 - onempstr0
+
+
+    prob0 <- (1 - mubin)^w  # Actually q^w
+    pobs0 <- phi + (omphi) * prob0
+    index <- (y == 0)
+    dl.domphi <- -(1 - prob0) / pobs0  # Note "-"
+    dl.domphi[!index] <- +1 / (omphi[!index])  # Note "+"
+
+    dl.dmubin <- -w * (omphi) * (1 - mubin)^(w - 1) / pobs0
+    dl.dmubin[!index] <- w[!index] *
+        (    y[!index]  /      mubin[!index]   -
+        (1 - y[!index]) / (1 - mubin[!index]))
+
+    dmubin.deta <- dtheta.deta(mubin, .lprob      , earg = .eprob      )
+    domphi.deta <- dtheta.deta(omphi, .lonempstr0 , earg = .eonempstr0 )
+
+    ans <- cbind(dl.dmubin * dmubin.deta,
+                 dl.domphi * domphi.deta)
+
+      if ( .lprob == "logit") {
+        ans[!index, 1] <- w[!index] * (y[!index] - mubin[!index])
+      }
+
+      ans
+  }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+            .eonempstr0 = eonempstr0, .eprob = eprob ))),
+  weight = eval(substitute(expression({
+    wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+
+
+
+    ned2l.domphi2 <- (1 - prob0) / ((omphi) * pobs0)
+
+
+    ned2l.domphimubin <- +w * ((1 - mubin)^(w - 1)) / pobs0  # Note "+"
+
+
+
+
+
+
+    ned2l.dmubin2 <- (w * (omphi) / (mubin * (1 - mubin)^2)) *
+                     (1 - mubin - w * mubin *
+                     (1 - mubin)^w * phi / pobs0)
+
+
+
+
+
+    wz[,iam(1, 1, M)] <- ned2l.dmubin2     * dmubin.deta^2
+    wz[,iam(2, 2, M)] <- ned2l.domphi2     * domphi.deta^2
+    wz[,iam(1, 2, M)] <- ned2l.domphimubin * domphi.deta * dmubin.deta
+    if (TRUE) {
+      ind6 <- (wz[, iam(1, 1, M)] < .Machine$double.eps)
+      if (any(ind6))
+        wz[ind6, iam(1, 1, M)] <- .Machine$double.eps
+    }
+    wz
+  }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+            .eonempstr0 = eonempstr0, .eprob = eprob ))))
+}
+
+
+
+
+
+
+
+
+
+
+dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(size), length(prob), length(pstr0))
+  if (length(x)     != LLL) x     <- rep(x,     len = LLL);
+  if (length(size)  != LLL) size  <- rep(size,  len = LLL);
+  if (length(prob)  != LLL) prob  <- rep(prob,  len = LLL);
+  if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+
+  ans <- dbinom(x = x, size = size, prob = prob, log = TRUE)
+
+
+  ans <- if (log.arg) {
+    ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
+  } else {
+    ifelse(x == 0,     pstr0 + (1-pstr0) * exp(ans) ,
+                    (1-pstr0) * exp(ans))
+  }
+
+
+  prob0 <- (1 - prob)^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+
+  ans
+}
+
+
+pzibinom <- function(q, size, prob, pstr0 = 0,
+                    lower.tail = TRUE, log.p = FALSE) {
+
+  LLL <- max(length(pstr0), length(size), length(prob), length(q))
+  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
+  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
+  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+
+  ans <- pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
+  ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+  prob0 <- (1 - prob)^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+  ans
+}
+
+
+qzibinom <- function(p, size, prob, pstr0 = 0,
+                    lower.tail = TRUE, log.p = FALSE) {
+  LLL <- max(length(p), length(size), length(prob), length(pstr0))
+  p     <- rep(p,     length = LLL)
+  size  <- rep(size,  length = LLL)
+  prob  <- rep(prob,  length = LLL)
+  pstr0 <- rep(pstr0, length = LLL)
+
+
+  ans <- p 
+  ans[p <= pstr0] <- 0 
+  ans[p >  pstr0] <-
+    qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
+           size[p > pstr0],
+           prob[p > pstr0],
+           lower.tail = lower.tail, log.p = log.p)
+
+
+
+  prob0 <- (1 - prob)^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
+  if (any(ind0)) {
+    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+    ans[p[ind0] <= pobs0] <- 0 
+    pindex <- (1:LLL)[ind0 & (p > pobs0)]
+    Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+    ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
+                             size = size[pindex],
+                             prob = prob[pindex])
+  }
+
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+
+  ans
+}
+
+
+rzibinom <- function(n, size, prob, pstr0 = 0) {
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+              stop("bad input for argument 'n'") else n
+
+  pstr0 <- rep(pstr0, len = use.n)
+  size  <- rep(size,  len = use.n)
+  prob  <- rep(prob,  len = use.n)
+
+  ans <- rbinom(use.n, size, prob)
+  ans[runif(use.n) < pstr0] <- 0
+
+
+
+  prob0 <- (1 - prob)^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
+  if (any(ind0)) {
+    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+    ans[ind0] <- rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
+    ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+  }
+
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+  ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
+                     log = FALSE) {
+  if (length(munb)) {
+    if (length(prob))
+      stop("arguments 'prob' and 'munb' both specified")
+    prob <- size / (size + munb)
+  }
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+  LLL <- max(length(pstr0), length(size), length(prob), length(x))
+  if (length(x)      != LLL) x      <- rep(x,      len = LLL);
+  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
+  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+
+
+  ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
+
+  ans <- if (log.arg)
+    ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
+    ifelse(x == 0,     pstr0+(1-pstr0)*    ans,       (1-pstr0) * ans)
+
+
+
+  prob0 <- prob^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+
+  ans
+}
+
+
+pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
+  if (length(munb)) {
+    if (length(prob))
+      stop("arguments 'prob' and 'munb' both specified")
+    prob <- size / (size + munb)
+  }
+
+  LLL <- max(length(pstr0), length(size), length(prob), length(q))
+  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
+  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
+  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
+  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+
+
+
+  ans <- pnbinom(q = q, size = size, prob = prob)
+  ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+
+  prob0 <- prob^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+  ans
+}
+
+
+qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
+  if (length(munb)) {
+    if (length(prob))
+      stop("arguments 'prob' and 'munb' both specified")
+    prob <- size/(size + munb)
+  }
+  LLL <- max(length(p), length(prob), length(pstr0), length(size))
+  if (length(p)     != LLL) p      <- rep(p,     len = LLL)
+  if (length(pstr0) != LLL) pstr0  <- rep(pstr0, len = LLL);
+  if (length(prob)  != LLL) prob   <- rep(prob,  len = LLL)
+  if (length(size)  != LLL) size   <- rep(size,  len = LLL);
+
+  ans <- p 
+  ind4 <- (p > pstr0)
+  ans[!ind4] <- 0
+  ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
+                       size = size[ind4], prob = prob[ind4])
+
+
+
+  prob0 <- prob^size
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
+  if (any(ind0)) {
+    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+    ans[p[ind0] <= pobs0] <- 0 
+    pindex <- (1:LLL)[ind0 & (p > pobs0)]
+    Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+    ans[pindex] <- qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
+                              size = size[pindex],
+                              prob = prob[pindex])
+  }
+
+
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+
+
+  ans
+}
+
+
+rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+  if (length(munb)) {
+    if (length(prob))
+      stop("arguments 'prob' and 'munb' both specified")
+    prob <- size / (size + munb)
+  }
+
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+               stop("bad input for argument 'n'") else n
+
+
+  pstr0 <- rep(pstr0, len = use.n)
+  size  <- rep(size,  len = use.n)
+  prob  <- rep(prob,  len = use.n)
+
+
+  ans <- rnbinom(n = use.n, size = size, prob = prob)
+  ans <- ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
+
+
+
+  prob0 <- rep(prob^size, len = use.n)
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
+  if (any(ind0, na.rm = TRUE)) {
+    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+    ans[ind0] <- rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
+                    prob = prob[ind0])
+    ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+  }
+
+  ans[pstr0 < deflat.limit] <- NaN
+  ans[pstr0 > 1] <- NaN
+
+  ans
+}
+
+
+
+
+
+
+
+
+zinegbinomial.control <- function(save.weight = TRUE, ...) {
+  list(save.weight = save.weight)
+}
+
+
+ zinegbinomial <-
+  function(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ipstr0 = NULL,                    isize = NULL,
+           zero = -3,  # 20130917; used to be c(-1, -3)
+           imethod = 1, shrinkage.init = 0.95,
+           nsimEIM = 250) {
+
+
+  lpstr0 <- as.list(substitute(lpstr0))
+  epstr0 <- link2list(lpstr0)
+  lpstr0 <- attr(epstr0, "function.name")
+
+  lmunb <- as.list(substitute(lmunb))
+  emunb <- link2list(lmunb)
+  lmunb <- attr(emunb, "function.name")
+
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+
+
+  if (length(ipstr0) &&
+     (!is.Numeric(ipstr0, positive = TRUE) ||
+      any(ipstr0 >= 1)))
+    stop("argument 'ipstr0' must contain values in (0,1)")
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("argument 'isize' must contain positive values only")
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+    stop("argument 'imethod' must be 1, 2 or 3")
+
+  if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
+    stop("argument 'nsimEIM' must be a positive integer")
+  if (nsimEIM <= 50)
+    warning("argument 'nsimEIM' should be greater than 50, say")
+
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
+      shrinkage.init < 0 ||
+      shrinkage.init > 1)
+    stop("bad input for argument 'shrinkage.init'")
+
+
+
+
+  new("vglmff",
+  blurb = c("Zero-inflated negative binomial\n\n",
+            "Links:    ",
+            namesof("pstr0", lpstr0, earg = epstr0, tag = FALSE), ", ",
+            namesof("munb",  lmunb,  earg = emunb,  tag = FALSE), ", ",
+            namesof("size",  lsize,  earg = esize,  tag = FALSE), "\n",
+            "Mean:     (1 - pstr0) * munb"),
+  constraints = eval(substitute(expression({
+
+    dotzero <- .zero
+    Musual <- 3
+    eval(negzero.expression)
+  }), list( .zero = zero ))),
+
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 3,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
+      
+  initialize = eval(substitute(expression({
+    Musual <- 3
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$type.fitted      <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
+
+
+    
+    mynames1 <- if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
+    mynames2 <- if (NOS == 1) "munb"  else paste("munb",  1:NOS, sep = "")
+    mynames3 <- if (NOS == 1) "size"  else paste("size",  1:NOS, sep = "")
+    predictors.names <-
+      c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
+        namesof(mynames2, .lmunb  , earg = .emunb  , tag = FALSE),
+        namesof(mynames3, .lsize  , earg = .esize  , tag = FALSE))[
+        interleave.VGAM(Musual*NOS, M = Musual)]
+
+    if (!length(etastart)) {
+      mum.init <- if ( .imethod == 3) {
+        y + 1/16
+      } else {
+        mum.init <- y
+        for (iii in 1:ncol(y)) {
+          index <- (y[, iii] > 0)
+          mum.init[, iii] <- if ( .imethod == 2)
+              weighted.mean(y[index, iii], w     = w[index, iii]) else
+                 median(rep(y[index, iii], times = w[index, iii])) + 1/8
+        }
+        (1 - .sinit) * (y + 1/16) + .sinit * mum.init
+      }
+
+
+      pstr0.init <- if (length( .ipstr0 )) {
+        matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+      } else {
+        pstr0.init <- y
+        for (iii in 1:ncol(y))
+          pstr0.init[, iii] <- sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
+        pstr0.init[pstr0.init <= 0.02] <- 0.02 # Last resort
+        pstr0.init[pstr0.init >= 0.98] <- 0.98 # Last resort
+        pstr0.init
+      }
+
+        kay.init <-
+        if ( is.Numeric( .isize )) {
+          matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
+        } else {
+          zinegbin.Loglikfun <- function(kval, y, x, w, extraargs) {
+            index0 <- (y == 0)
+            pstr0vec <- extraargs$pstr0
+            muvec <- extraargs$mu
+
+            ans1 <- 0.0
+            if (any( index0))
+              ans1 <- ans1 + sum(w[ index0] *
+                     dzinegbin(x = y[ index0], size = kval,
+                               munb = muvec[ index0],
+                               pstr0 = pstr0vec[ index0], log = TRUE))
+            if (any(!index0))
+              ans1 <- ans1 + sum(w[!index0] *
+                     dzinegbin(x = y[!index0], size = kval,
+                               munb = muvec[!index0],
+                               pstr0 = pstr0vec[!index0], log = TRUE))
+            ans1
+          }
+          k.grid <- 2^((-6):6)
+          kay.init <- matrix(0, nrow = n, ncol = NOS)
+          for (spp. in 1:NOS) {
+            kay.init[, spp.] <- getMaxMin(k.grid,
+                              objfun = zinegbin.Loglikfun,
+                              y = y[, spp.], x = x, w = w[, spp.],
+                              extraargs = list(pstr0 = pstr0.init[, spp.],
+                                               mu  = mum.init[, spp.]))
+          }
+          kay.init
+        }
+
+        etastart <-
+          cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
+                theta2eta(mum.init,   .lmunb  , earg = .emunb  ),
+                theta2eta(kay.init,   .lsize  , earg = .esize  ))
+        etastart <-
+          etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+    }
+  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+            .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+            .ipstr0 = ipstr0,                 .isize = isize,
+            .type.fitted = type.fitted,
+            .sinit = shrinkage.init,
+            .imethod = imethod ))),
+      
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+    Musual <- 3
+    NOS <- extra$NOS
+    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                       .lpstr0 , earg = .epstr0 )
+    if (type.fitted %in% c("mean", "pobs0"))
+      munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                         .lmunb  , earg = .emunb  )
+    if (type.fitted %in% c("pobs0"))
+      kmat  <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                        .lsize , earg = .esize )
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - pstr0) * munb,
+                  "pobs0"     = pstr0 + (1 - pstr0) *
+                                (kmat / (kmat + munb))^kmat,  # P(Y=0)
+                  "pstr0"     =     pstr0,
+                  "onempstr0" = 1 - pstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
+           .epstr0 = epstr0, .esize = esize, .emunb = emunb,
+           .type.fitted = type.fitted ))),
+      
+  last = eval(substitute(expression({
+    misc$link <-
+      c(rep( .lpstr0 , length = NOS),
+        rep( .lmunb  , length = NOS),
+        rep( .lsize  , length = NOS))[interleave.VGAM(Musual*NOS,
+                                                      M = Musual)]
+    temp.names <-
+      c(mynames1,
+        mynames2,
+        mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", Musual*NOS)
+    names(misc$earg) <- temp.names
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-2]] <- .epstr0
+      misc$earg[[Musual*ii-1]] <- .emunb
+      misc$earg[[Musual*ii  ]] <- .esize
+    }
+
+    misc$imethod <- .imethod
+    misc$nsimEIM <- .nsimEIM
+    misc$expected <- TRUE
+    misc$Musual <- Musual
+    misc$ipstr0  <- .ipstr0
+    misc$isize <- .isize
+    misc$multipleResponses <- TRUE
+
+
+  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+            .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+            .ipstr0 = ipstr0,                 .isize = isize,
+            .nsimEIM = nsimEIM, .imethod = imethod ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    Musual <- 3
+    NOS <- extra$NOS
+    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                      .lpstr0 , earg = .epstr0 )
+    munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                      .lmunb , earg = .emunb )
+    kmat  <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                      .lsize , earg = .esize )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(c(w) * dzinegbin(x = y, size = kmat, munb = munb,
+                           pstr0 = pstr0, log = TRUE))
+    }
+  }, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+           .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+  vfamily = c("zinegbinomial"),
+  deriv = eval(substitute(expression({
+    Musual <- 3
+    NOS <- extra$NOS
+
+    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                      .lpstr0 , earg = .epstr0 )
+    munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                      .lmunb  , earg = .emunb  )
+    kmat  <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                      .lsize  , earg = .esize  )
+
+    dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
+    dmunb.deta  <- dtheta.deta(munb , .lmunb  , earg = .emunb  )
+    dsize.deta  <- dtheta.deta(kmat , .lsize  , earg = .esize  )
+    dthetas.detas <-
+        (cbind(dpstr0.deta,
+               dmunb.deta,
+               dsize.deta))[, interleave.VGAM(Musual*NOS, M = Musual)]
+
+
+
+    dl.dpstr0 <- -1 / (1 - pstr0)
+    dl.dmunb <- y / munb - (y + kmat) / (munb + kmat)
+    dl.dsize <- digamma(y + kmat) - digamma(kmat) -
+               (y + kmat) / (munb + kmat) + 1 +
+               log(kmat / (kmat + munb))
+
+
 
+    for (spp. in 1:NOS) {
+      index0 <- (y[, spp.] == 0)
+      if (all(index0) || all(!index0))
+        stop("must have some 0s AND some positive counts in the data")
 
-  ans
-}
+      kmat.  <-  kmat[index0, spp.]
+      munb.  <-  munb[index0, spp.]
+      pstr0. <- pstr0[index0, spp.]
 
 
-pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
-  if (length(munb)) {
-    if (length(prob))
-      stop("arguments 'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
-  }
+      tempk. <- kmat. / (kmat. + munb.)
+      tempm. <- munb. / (kmat. + munb.)
+      prob0. <- tempk.^kmat.
+      df0.dmunb.  <- -tempk.* prob0.
+      df0.dkmat.  <- prob0. * (tempm. + log(tempk.))
 
-  LLL <- max(length(pstr0), length(size), length(prob), length(q))
-  if (length(q)      != LLL) q      <- rep(q,      len = LLL);
-  if (length(size)   != LLL) size   <- rep(size,   len = LLL);
-  if (length(prob)   != LLL) prob   <- rep(prob,   len = LLL);
-  if (length(pstr0)  != LLL) pstr0  <- rep(pstr0,  len = LLL);
+      denom. <- pstr0. + (1 - pstr0.) * prob0.
+     dl.dpstr0[index0, spp.]  <- (1 - prob0.) / denom.
+      dl.dmunb[index0, spp.]  <- (1 - pstr0.) * df0.dmunb. / denom.
+      dl.dsize[index0, spp.]  <- (1 - pstr0.) * df0.dkmat. / denom.
+    }  # of spp.
 
 
+    dl.dthetas <-
+      cbind(dl.dpstr0,
+            dl.dmunb,
+            dl.dsize)[, interleave.VGAM(Musual*NOS, M = Musual)]
 
-  ans <- pnbinom(q = q, size = size, prob = prob)
-  ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
 
+      c(w) * dl.dthetas * dthetas.detas
+  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+            .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
 
+  weight = eval(substitute(expression({
 
-  prob0 <- prob^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
 
-  ans
-}
 
+    wz <- matrix(0, n, Musual*M - Musual)
 
-qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
-  if (length(munb)) {
-    if (length(prob))
-      stop("arguments 'prob' and 'munb' both specified")
-    prob <- size/(size + munb)
-  }
-  LLL <- max(length(p), length(prob), length(pstr0), length(size))
-  if (length(p)     != LLL) p      <- rep(p,     len = LLL)
-  if (length(pstr0) != LLL) pstr0  <- rep(pstr0, len = LLL);
-  if (length(prob)  != LLL) prob   <- rep(prob,  len = LLL)
-  if (length(size)  != LLL) size   <- rep(size,  len = LLL);
+    ind3 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
 
-  ans <- p 
-  ind4 <- (p > pstr0)
-  ans[!ind4] <- 0
-  ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
-                       size = size[ind4], prob = prob[ind4])
+    run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
 
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rzinegbin(n = n*NOS, pstr0 = pstr0,
+                        size = kmat, mu = munb)
+      dim(ysim) <- c(n, NOS)
+      index0 <- (ysim[, spp.] == 0)
 
+      dl.dpstr0 <- -1 / (1 - pstr0)
+      dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat)
+      dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
+                 (ysim + kmat) / (munb + kmat) + 1 +
+                 log(kmat / (kmat + munb))
 
-  prob0 <- prob^size
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
-  if (any(ind0)) {
-    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[p[ind0] <= pobs0] <- 0 
-    pindex <- (1:LLL)[ind0 & (p > pobs0)]
-    Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
-    ans[pindex] <- qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
-                              size = size[pindex],
-                              prob = prob[pindex])
-  }
 
+      for (spp. in 1:NOS) {
+        index0 <- (ysim[, spp.] == 0)
+        if (all(index0) || all(!index0)) {
+          repeat {
+            ysim[, spp.] <- rzinegbin(n = n,
+                                      pstr0 = pstr0[, spp.],
+                                      size  =  kmat[, spp.],
+                                      mu    =  munb[, spp.])
+            index0 <- (ysim[, spp.] == 0)
+            if (any(!index0) && any(index0))
+              break
+          }
+        }
 
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+        kmat.  <-  kmat[index0, spp.]
+        munb.  <-  munb[index0, spp.]
+        pstr0. <- pstr0[index0, spp.]
 
 
+        tempk. <- kmat. / (kmat. + munb.)
+        tempm. <- munb. / (kmat. + munb.)
+        prob0.  <- tempk.^kmat.
+        df0.dmunb.  <- -tempk.* prob0.
+        df0.dkmat.  <- prob0. * (tempm. + log(tempk.))
 
-  ans
-}
+        denom. <- pstr0. + (1 - pstr0.) * prob0.
+       dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
+        dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
+        dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
 
 
-rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
-  if (length(munb)) {
-    if (length(prob))
-      stop("arguments 'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
-  }
+        sdl.dthetas <- cbind(dl.dpstr0[, spp.],
+                             dl.dmunb[, spp.],
+                             dl.dsize[, spp.])
 
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
-               stop("bad input for argument 'n'") else n
+        temp3 <- sdl.dthetas
+        run.varcov[,, spp.] <- run.varcov[,, spp.] +
+                              temp3[, ind3$row.index] *
+                              temp3[, ind3$col.index]
 
 
-  pstr0 <- rep(pstr0, len = use.n)
-  size  <- rep(size,  len = use.n)
-  prob  <- rep(prob,  len = use.n)
+      }  # End of for (spp.) loop
+    }  # End of ii nsimEIM loop
 
+    run.varcov <- run.varcov / .nsimEIM
 
-  ans <- rnbinom(n = use.n, size = size, prob = prob)
-  ans <- ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
+    wz1 <- if (intercept.only) {
+      for (spp. in 1:NOS) {
+        for (jay in 1:length(ind3$row.index)) {
+          run.varcov[, jay, spp.] <- mean(run.varcov[, jay, spp.])
+        }
+      }
+      run.varcov
+    } else {
+      run.varcov
+    }
 
+    for (spp. in 1:NOS) {
+      wz1[,, spp.] <- wz1[,, spp.] *
+                     dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
+                     dthetas.detas[, Musual * (spp. - 1) + ind3$col]
+    }
 
+    for (spp. in 1:NOS) {
+      for (jay in 1:Musual) {
+        for (kay in jay:Musual) {
+          cptr <- iam((spp. - 1) * Musual + jay,
+                     (spp. - 1) * Musual + kay, M = M)
+          temp.wz1 <- wz1[,, spp.]
+          wz[, cptr] <- temp.wz1[, iam(jay, kay, M = Musual)]
+        }
+      }
+    }
 
-  prob0 <- rep(prob^size, len = use.n)
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
-  if (any(ind0, na.rm = TRUE)) {
-    pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[ind0] <- rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
-                    prob = prob[ind0])
-    ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
-  }
 
-  ans[pstr0 < deflat_limit] <- NaN
-  ans[pstr0 > 1] <- NaN
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+  }), list( .lpstr0 = lpstr0,
+            .epstr0 = epstr0, .nsimEIM = nsimEIM ))))
+}  # End of zinegbinomial
 
-  ans
-}
 
 
 
@@ -1881,22 +3446,20 @@ rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
 
 
 
-zinegbinomial.control <- function(save.weight = TRUE, ...) {
+zinegbinomialff.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
 
 
- zinegbinomial <-
-  function(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
-           ipstr0 = NULL,                    isize = NULL,
-           zero = c(-1, -3),
+ zinegbinomialff <-
+  function(lmunb = "loge", lsize = "loge", lonempstr0 = "logit", 
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           isize = NULL, ionempstr0 = NULL,  
+           zero = c(-2, -3),
            imethod = 1, shrinkage.init = 0.95,
            nsimEIM = 250) {
 
 
-  lpstr0 <- as.list(substitute(lpstr0))
-  epstr0 <- link2list(lpstr0)
-  lpstr0 <- attr(epstr0, "function.name")
 
   lmunb <- as.list(substitute(lmunb))
   emunb <- link2list(lmunb)
@@ -1906,28 +3469,34 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
   esize <- link2list(lsize)
   lsize <- attr(esize, "function.name")
 
+  lonempstr0 <- as.list(substitute(lonempstr0))
+  eonempstr0 <- link2list(lonempstr0)
+  lonempstr0 <- attr(eonempstr0, "function.name")
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
 
-  if (length(ipstr0) &&
-     (!is.Numeric(ipstr0, positive = TRUE) ||
-      any(ipstr0 >= 1)))
-    stop("argument 'ipstr0' must contain values in (0,1)")
+
+  if (length(ionempstr0) &&
+     (!is.Numeric(ionempstr0, positive = TRUE) ||
+      any(ionempstr0 >= 1)))
+    stop("argument 'ionempstr0' must contain values in (0,1)")
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
     stop("argument 'isize' must contain positive values only")
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1, 2 or 3")
 
-  if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+  if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
     stop("argument 'nsimEIM' must be a positive integer")
   if (nsimEIM <= 50)
     warning("argument 'nsimEIM' should be greater than 50, say")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
       shrinkage.init < 0 ||
       shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -1938,9 +3507,10 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Zero-inflated negative binomial\n\n",
             "Links:    ",
-            namesof("pstr0", lpstr0, earg = epstr0, tag = FALSE), ", ",
             namesof("munb",  lmunb,  earg = emunb,  tag = FALSE), ", ",
-            namesof("size",  lsize,  earg = esize,  tag = FALSE), "\n",
+            namesof("size",  lsize,  earg = esize,  tag = FALSE), ", ",
+            namesof("onempstr0", lonempstr0, earg = eonempstr0, tag = FALSE),
+            "\n",
             "Mean:     (1 - pstr0) * munb"),
   constraints = eval(substitute(expression({
 
@@ -1948,6 +3518,17 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
     Musual <- 3
     eval(negzero.expression)
   }), list( .zero = zero ))),
+
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 3,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
+      
   initialize = eval(substitute(expression({
     Musual <- 3
 
@@ -1963,19 +3544,20 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
     y <- temp5$y
 
 
-
-
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$type.fitted <- .type.fitted
+    extra$dimnamesy   <- dimnames(y)
 
-    mynames1 <- if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
-    mynames2 <- if (NOS == 1) "munb"  else paste("munb",  1:NOS, sep = "")
-    mynames3 <- if (NOS == 1) "size"  else paste("size",  1:NOS, sep = "")
+
+    
+    mynames1 <- if (NOS == 1) "munb"  else paste("munb",  1:NOS, sep = "")
+    mynames2 <- if (NOS == 1) "size"  else paste("size",  1:NOS, sep = "")
+    mynames3 <- if (NOS == 1) "onempstr0" else paste("onempstr0", 1:NOS,
+                                                     sep = "")
     predictors.names <-
-      c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
-        namesof(mynames2, .lmunb  , earg = .emunb  , tag = FALSE),
-        namesof(mynames3, .lsize  , earg = .esize  , tag = FALSE))[
+      c(namesof(mynames1, .lmunb  , earg = .emunb  , tag = FALSE),
+        namesof(mynames2, .lsize  , earg = .esize  , tag = FALSE),
+        namesof(mynames3, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
         interleave.VGAM(Musual*NOS, M = Musual)]
 
     if (!length(etastart)) {
@@ -1983,7 +3565,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
         y + 1/16
       } else {
         mum.init <- y
-        for(iii in 1:ncol(y)) {
+        for (iii in 1:ncol(y)) {
           index <- (y[, iii] > 0)
           mum.init[, iii] <- if ( .imethod == 2)
               weighted.mean(y[index, iii], w     = w[index, iii]) else
@@ -1993,18 +3575,18 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
       }
 
 
-      pstr0.init <- if (length( .ipstr0 )) {
-        matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+      onempstr0.init <- if (length( .ionempstr0 )) {
+        matrix( .ionempstr0 , n, ncoly, byrow = TRUE)
       } else {
         pstr0.init <- y
-        for(iii in 1:ncol(y))
+        for (iii in 1:ncol(y))
           pstr0.init[, iii] <- sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
-        pstr0.init[pstr0.init <= 0.02] <- 0.02 # Last resort
-        pstr0.init[pstr0.init >= 0.98] <- 0.98 # Last resort
-        pstr0.init
+        pstr0.init[pstr0.init <= 0.02] <- 0.02  # Last resort
+        pstr0.init[pstr0.init >= 0.98] <- 0.98  # Last resort
+        1 - pstr0.init
       }
 
-        kay.init =
+        kay.init <-
         if ( is.Numeric( .isize )) {
           matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
         } else {
@@ -2013,7 +3595,6 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
             pstr0vec <- extraargs$pstr0
             muvec <- extraargs$mu
 
-
             ans1 <- 0.0
             if (any( index0))
               ans1 <- ans1 + sum(w[ index0] *
@@ -2029,47 +3610,80 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
           }
           k.grid <- 2^((-6):6)
           kay.init <- matrix(0, nrow = n, ncol = NOS)
-          for(spp. in 1:NOS) {
+          for (spp. in 1:NOS) {
             kay.init[, spp.] <- getMaxMin(k.grid,
-                              objfun = zinegbin.Loglikfun,
-                              y = y[, spp.], x = x, w = w[, spp.],
-                              extraargs = list(pstr0 = pstr0.init[, spp.],
-                                               mu  = mum.init[, spp.]))
+                  objfun = zinegbin.Loglikfun,
+                  y = y[, spp.], x = x, w = w[, spp.],
+                  extraargs = list(pstr0 = 1 - onempstr0.init[, spp.],
+                                   mu    = mum.init[, spp.]))
           }
           kay.init
         }
 
-        etastart <- cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
-                         theta2eta(mum.init,   .lmunb  , earg = .emunb  ),
-                         theta2eta(kay.init,   .lsize  , earg = .esize  ))
+        etastart <-
+          cbind(theta2eta(mum.init,   .lmunb  , earg = .emunb  ),
+                theta2eta(kay.init,   .lsize  , earg = .esize  ),
+                theta2eta(onempstr0.init, .lonempstr0 ,
+                          earg = .eonempstr0 ))
         etastart <-
           etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
     }
-  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
-            .epstr0 = epstr0, .emunb = emunb, .esize = esize,
-            .ipstr0 = ipstr0,                 .isize = isize,
+  }), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+            .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
+            .ionempstr0 = ionempstr0,                 .isize = isize,
+            .type.fitted = type.fitted,
             .sinit = shrinkage.init,
-            .imethod = imethod ))), 
+            .imethod = imethod ))),
+      
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
     Musual <- 3
     NOS <- extra$NOS
-    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
-                       .lpstr0 , earg = .epstr0 )
-    munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                       .lmunb  , earg = .emunb  )
-    fv.matrix <- (1 - pstr0) * munb
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(dimnames(pstr0)[[1]], extra$dimnamesy2)
-    fv.matrix
-  }, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
-           .epstr0 = epstr0, .esize = esize, .emunb = emunb ))),
+    if (type.fitted %in% c("mean", "pobs0"))
+      munb    <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                           .lmunb  , earg = .emunb  )
+    if (type.fitted %in% c("pobs0"))
+      kmat    <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                           .lsize , earg = .esize )
+    onempstr0 <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                           .lonempstr0 , earg = .eonempstr0 )
+
+    ans <- switch(type.fitted,
+                  "mean"      = (onempstr0) * munb,
+                  "pobs0"     = 1 -  onempstr0 + (onempstr0) *
+                                (kmat / (kmat + munb))^kmat,  # P(Y=0)
+                  "pstr0"     = 1 - onempstr0,
+                  "onempstr0" =     onempstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempstr0 = lonempstr0, .lsize = lsize, .lmunb = lmunb,
+           .eonempstr0 = eonempstr0, .esize = esize, .emunb = emunb,
+           .type.fitted = type.fitted ))),
+      
   last = eval(substitute(expression({
     misc$link <-
-      c(rep( .lpstr0 , length = NOS),
-        rep( .lmunb  , length = NOS),
-        rep( .lsize  , length = NOS))[interleave.VGAM(Musual*NOS,
-                                                      M = Musual)]
-    temp.names =
+      c(rep( .lmunb      , length = NOS),
+        rep( .lsize      , length = NOS),
+        rep( .lonempstr0 , length = NOS))[interleave.VGAM(Musual*NOS,
+                                                          M = Musual)]
+    temp.names <-
       c(mynames1,
         mynames2,
         mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
@@ -2077,83 +3691,81 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
     misc$earg <- vector("list", Musual*NOS)
     names(misc$earg) <- temp.names
-    for(ii in 1:NOS) {
-      misc$earg[[Musual*ii-2]] <- .epstr0
-      misc$earg[[Musual*ii-1]] <- .emunb
-      misc$earg[[Musual*ii  ]] <- .esize
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-2]] <- .emunb
+      misc$earg[[Musual*ii-1]] <- .esize
+      misc$earg[[Musual*ii  ]] <- .eonempstr0
     }
 
     misc$imethod <- .imethod
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
     misc$Musual <- Musual
-    misc$ipstr0  <- .ipstr0
+    misc$ionempstr0  <- .ionempstr0
     misc$isize <- .isize
     misc$multipleResponses <- TRUE
 
-
-
-   misc$pobs0 <- pstr0 + (1 - pstr0) * (kmat / (kmat + munb))^kmat  # P(Y=0)
-   misc$pstr0 <- pstr0
-  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
-            .epstr0 = epstr0, .emunb = emunb, .esize = esize,
-            .ipstr0 = ipstr0,                 .isize = isize,
+  }), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+            .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
+            .ionempstr0 = ionempstr0,                 .isize = isize,
             .nsimEIM = nsimEIM, .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     Musual <- 3
     NOS <- extra$NOS
-    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
-                      .lpstr0 , earg = .epstr0 )
-    munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                      .lmunb , earg = .emunb )
-    kmat  <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
-                      .lsize , earg = .esize )
+    munb      <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                           .lmunb , earg = .emunb )
+    kmat      <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                           .lsize , earg = .esize )
+    onempstr0 <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                           .lonempstr0 , earg = .eonempstr0 )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
       sum(c(w) * dzinegbin(x = y, size = kmat, munb = munb,
-                        pstr0 = pstr0, log = TRUE))
+                           pstr0 = 1 - onempstr0, log = TRUE))
     }
-  }, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
-           .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
-  vfamily = c("zinegbinomial"),
+  }, list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+           .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
+  vfamily = c("zinegbinomialff"),
   deriv = eval(substitute(expression({
     Musual <- 3
     NOS <- extra$NOS
 
-    pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
-                      .lpstr0 , earg = .epstr0 )
-    munb  <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
-                      .lmunb  , earg = .emunb  )
-    kmat  <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
-                      .lsize  , earg = .esize  )
+    munb      <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+                           .lmunb  , earg = .emunb  )
+    kmat      <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                           .lsize  , earg = .esize  )
+    onempstr0 <- eta2theta(eta[, Musual*(1:NOS)  , drop = FALSE],
+                           .lonempstr0 , earg = .eonempstr0 )
 
-    dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
+    donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
+                                   earg = .eonempstr0 )
     dmunb.deta  <- dtheta.deta(munb , .lmunb  , earg = .emunb  )
     dsize.deta  <- dtheta.deta(kmat , .lsize  , earg = .esize  )
-    dthetas.detas =
-        (cbind(dpstr0.deta,
-               dmunb.deta,
-               dsize.deta))[, interleave.VGAM(Musual*NOS, M = Musual)]
+    dthetas.detas <-
+        (cbind(dmunb.deta,
+               dsize.deta,
+               donempstr0.deta))[, interleave.VGAM(Musual*NOS,
+                                                   M = Musual)]
 
 
 
-    dl.dpstr0 <- -1 / (1 - pstr0)
     dl.dmunb <- y / munb - (y + kmat) / (munb + kmat)
     dl.dsize <- digamma(y + kmat) - digamma(kmat) -
                (y + kmat) / (munb + kmat) + 1 +
                log(kmat / (kmat + munb))
+    dl.donempstr0 <- +1 / (onempstr0)
 
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       index0 <- (y[, spp.] == 0)
-      if (!any(index0) || !any(!index0))
+      if (all(index0) || all(!index0))
         stop("must have some 0s AND some positive counts in the data")
 
-      kmat.  <-  kmat[index0, spp.]
-      munb.  <-  munb[index0, spp.]
-      pstr0. <- pstr0[index0, spp.]
+      kmat.      <-      kmat[index0, spp.]
+      munb.      <-      munb[index0, spp.]
+      onempstr0. <- onempstr0[index0, spp.]
 
 
       tempk. <- kmat. / (kmat. + munb.)
@@ -2162,23 +3774,22 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
       df0.dmunb.  <- -tempk.* prob0.
       df0.dkmat.  <- prob0. * (tempm. + log(tempk.))
 
-
-      denom. <- pstr0. + (1 - pstr0.) * prob0.
-     dl.dpstr0[index0, spp.]  <- (1 - prob0.) / denom.
-      dl.dmunb[index0, spp.]  <- (1 - pstr0.) * df0.dmunb. / denom.
-      dl.dsize[index0, spp.]  <- (1 - pstr0.) * df0.dkmat. / denom.
-    } # of spp.
+      denom. <- 1 - onempstr0. + (onempstr0.) * prob0.
+     dl.donempstr0[index0, spp.]  <- -(1 - prob0.) / denom.  # note "-"
+          dl.dmunb[index0, spp.]  <- (onempstr0.) * df0.dmunb. / denom.
+          dl.dsize[index0, spp.]  <- (onempstr0.) * df0.dkmat. / denom.
+    }  # of spp.
 
 
     dl.dthetas <-
-      cbind(dl.dpstr0,
-            dl.dmunb,
-            dl.dsize)[, interleave.VGAM(Musual*NOS, M = Musual)]
+      cbind(dl.dmunb,
+            dl.dsize,
+            dl.donempstr0)[, interleave.VGAM(Musual*NOS, M = Musual)]
 
 
       c(w) * dl.dthetas * dthetas.detas
-  }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
-            .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+  }), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+            .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
 
   weight = eval(substitute(expression({
 
@@ -2190,27 +3801,36 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
     run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
 
-    for(ii in 1:( .nsimEIM )) {
-      ysim <- rzinegbin(n = n*NOS, pstr0 = pstr0,
-                       size = kmat, mu = munb)
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rzinegbin(n = n*NOS, pstr0 = 1 - onempstr0,
+                        size = kmat, mu = munb)
       dim(ysim) <- c(n, NOS)
       index0 <- (ysim[, spp.] == 0)
 
-      dl.dpstr0 <- -1 / (1 - pstr0)
       dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat)
       dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
-                 (ysim + kmat) / (munb + kmat) + 1 +
-                 log(kmat / (kmat + munb))
+                  (ysim + kmat) / (munb + kmat) + 1 +
+                  log(kmat / (kmat + munb))
+      dl.donempstr0 <- +1 / (onempstr0)
 
 
-      for(spp. in 1:NOS) {
+      for (spp. in 1:NOS) {
         index0 <- (ysim[, spp.] == 0)
-        if (!any(index0) || !any(!index0))
-          stop("must have some 0s AND some positive counts in the data")
+        if (all(index0) || all(!index0)) {
+          repeat {
+            ysim[, spp.] <- rzinegbin(n = n,
+                                      pstr0 = 1 - onempstr0[, spp.],
+                                      size  =  kmat[, spp.],
+                                      mu    =  munb[, spp.])
+            index0 <- (ysim[, spp.] == 0)
+            if (any(!index0) && any(index0))
+              break
+          }
+        }
 
-        kmat.  <-  kmat[index0, spp.]
-        munb.  <-  munb[index0, spp.]
-        pstr0. <- pstr0[index0, spp.]
+        munb.      <-      munb[index0, spp.]
+        kmat.      <-      kmat[index0, spp.]
+        onempstr0. <- onempstr0[index0, spp.]
 
 
         tempk. <- kmat. / (kmat. + munb.)
@@ -2219,16 +3839,15 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
         df0.dmunb.  <- -tempk.* prob0.
         df0.dkmat.  <- prob0. * (tempm. + log(tempk.))
 
-
-        denom. <- pstr0. + (1 - pstr0.) * prob0.
-       dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
-        dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
-        dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
+        denom. <- 1 - onempstr0. + (onempstr0.) * prob0.
+       dl.donempstr0[index0, spp.] <- -(1 - prob0.) / denom.  # note "-"
+        dl.dmunb[index0, spp.] <- (onempstr0.) * df0.dmunb. / denom.
+        dl.dsize[index0, spp.] <- (onempstr0.) * df0.dkmat. / denom.
 
 
-        sdl.dthetas <- cbind(dl.dpstr0[, spp.],
-                             dl.dmunb[, spp.],
-                             dl.dsize[, spp.])
+        sdl.dthetas <- cbind(dl.dmunb[, spp.],
+                             dl.dsize[, spp.],
+                             dl.donempstr0[, spp.])
 
         temp3 <- sdl.dthetas
         run.varcov[,, spp.] <- run.varcov[,, spp.] +
@@ -2236,14 +3855,14 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
                               temp3[, ind3$col.index]
 
 
-      } # End of for(spp.) loop
-    } # End of ii nsimEIM loop
+      }  # End of for (spp.) loop
+    }  # End of ii nsimEIM loop
 
     run.varcov <- run.varcov / .nsimEIM
 
     wz1 <- if (intercept.only) {
-      for(spp. in 1:NOS) {
-        for(jay in 1:length(ind3$row.index)) {
+      for (spp. in 1:NOS) {
+        for (jay in 1:length(ind3$row.index)) {
           run.varcov[, jay, spp.] <- mean(run.varcov[, jay, spp.])
         }
       }
@@ -2252,17 +3871,17 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
       run.varcov
     }
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       wz1[,, spp.] <- wz1[,, spp.] *
                      dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
                      dthetas.detas[, Musual * (spp. - 1) + ind3$col]
     }
 
-    for(spp. in 1:NOS) {
-      for(jay in 1:Musual) {
-        for(kay in jay:Musual) {
+    for (spp. in 1:NOS) {
+      for (jay in 1:Musual) {
+        for (kay in jay:Musual) {
           cptr <- iam((spp. - 1) * Musual + jay,
-                     (spp. - 1) * Musual + kay, M = M)
+                      (spp. - 1) * Musual + kay, M = M)
           temp.wz1 <- wz1[,, spp.]
           wz[, cptr] <- temp.wz1[, iam(jay, kay, M = Musual)]
         }
@@ -2271,9 +3890,10 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
-  }), list( .lpstr0 = lpstr0,
-            .epstr0 = epstr0, .nsimEIM = nsimEIM ))))
-} # End of zinegbinomial
+  }), list( .lonempstr0 = lonempstr0,
+            .eonempstr0 = eonempstr0, .nsimEIM = nsimEIM ))))
+}  # End of zinegbinomialff
+
 
 
 
@@ -2282,11 +3902,16 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
- zipoissonff <- function(llambda = "loge", lprobp = "logit",
-                         ilambda = NULL,   iprobp = NULL, imethod = 1,
-                         shrinkage.init = 0.8, zero = -2) {
-  lprobp. <- lprobp
-  iprobp. <- iprobp
+
+ zipoissonff <-
+  function(llambda = "loge", lonempstr0 = "logit",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ilambda = NULL,   ionempstr0 = NULL, imethod = 1,
+           shrinkage.init = 0.8, zero = -2) {
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
 
 
@@ -2294,27 +3919,27 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
   elambda <- link2list(llambda)
   llambda <- attr(elambda, "function.name")
 
-  lprobp <- as.list(substitute(lprobp))
-  eprobp. <- link2list(lprobp)
-  lprobp. <- attr(eprobp., "function.name")
+  lonempstr0 <- as.list(substitute(lonempstr0))
+  eonempstr0 <- link2list(lonempstr0)
+  lonempstr0 <- attr(eonempstr0, "function.name")
 
 
 
   if (length(ilambda))
     if (!is.Numeric(ilambda, positive = TRUE))
       stop("'ilambda' values must be positive")
-  if (length(iprobp.))
-    if (!is.Numeric(iprobp., positive = TRUE) ||
-      any(iprobp. >= 1))
-      stop("'iprobp' values must be inside the interval (0,1)")
+  if (length(ionempstr0))
+    if (!is.Numeric(ionempstr0, positive = TRUE) ||
+      any(ionempstr0 >= 1))
+      stop("'ionempstr0' values must be inside the interval (0,1)")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
     shrinkage.init < 0 ||
     shrinkage.init > 1)
     stop("bad input for argument 'shrinkage.init'")
@@ -2324,18 +3949,23 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Zero-inflated Poisson\n\n",
             "Links:    ",
-            namesof("lambda", llambda, earg = elambda), ", ",
-            namesof("probp",  lprobp., earg = eprobp.), "\n",
-            "Mean:     probp * lambda"),
+            namesof("lambda",    llambda,    earg = elambda), ", ",
+            namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
+            "Mean:     onempstr0 * lambda"),
   constraints = eval(substitute(expression({
     dotzero <- .zero
     Musual <- 2
     eval(negzero.expression)
   }), list( .zero = zero ))),
+
   infos = eval(substitute(function(...) {
     list(Musual = 2,
+         type.fitted  = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
   initialize = eval(substitute(expression({
 
 
@@ -2358,13 +3988,15 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
     extra$ncoly <- ncoly
     extra$Musual <- Musual
     M <- Musual * ncoly
+    extra$type.fitted      <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
 
 
-    mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("probp",  if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- paste("lambda",    if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("onempstr0", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
-        c(namesof(mynames1, .llambda, earg = .elambda, tag = FALSE),
-          namesof(mynames2, .lprobp., earg = .eprobp., tag = FALSE))[
+      c(namesof(mynames1, .llambda    , earg = .elambda    , tag = FALSE),
+        namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
           interleave.VGAM(M, M = Musual)]
 
 
@@ -2372,18 +4004,18 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
         matL <- matrix(if (length( .ilambda )) .ilambda else 0,
                        n, ncoly, byrow = TRUE)
-        matP <- matrix(if (length( .iprobp. )) .iprobp. else 0,
+        matP <- matrix(if (length( .ionempstr0 )) .ionempstr0 else 0,
                        n, ncoly, byrow = TRUE)
 
         for (jay in 1:ncoly) {
           yjay <- y[, jay]
 
           Phi0.init <- 1 - 0.85 * sum(w[yjay > 0]) / sum(w)
-          Phi0.init[Phi0.init <= 0.02] <- 0.02 # Last resort
-          Phi0.init[Phi0.init >= 0.98] <- 0.98 # Last resort
+          Phi0.init[Phi0.init <= 0.02] <- 0.02  # Last resort
+          Phi0.init[Phi0.init >= 0.98] <- 0.98  # Last resort
 
           if ( length(mustart)) {
-            mustart <- matrix(mustart, n, ncoly) # Make sure right size
+            mustart <- matrix(mustart, n, ncoly)  # Make sure right size
             Lambda.init <- mustart / (1 - Phi0.init)
           } else if ( .imethod == 2) {
             mymean <- weighted.mean(yjay[yjay > 0],
@@ -2410,44 +4042,74 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
         if (!length( .ilambda ))
           matL[, jay] <- Lambda.init
-        if (!length( .iprobp. ))
+        if (!length( .ionempstr0 ))
           matP[, jay] <- Phi0mat.init
       }
 
-      etastart <- cbind(theta2eta(    matL, .llambda , earg = .elambda ),
-                        theta2eta(1 - matP, .lprobp. , earg = .eprobp. ))[,
+      etastart <-
+        cbind(theta2eta(    matL, .llambda    , earg = .elambda    ),
+              theta2eta(1 - matP, .lonempstr0 , earg = .eonempstr0 ))[,
                         interleave.VGAM(M, M = Musual)]
 
       mustart <- NULL  # Since etastart has been computed.
     }
-  }), list( .lprobp. = lprobp., .llambda = llambda,
-            .eprobp. = eprobp., .elambda = elambda,
-            .iprobp. = iprobp., .ilambda = ilambda,
+  }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+            .eonempstr0 = eonempstr0, .elambda = elambda,
+            .ionempstr0 = ionempstr0, .ilambda = ilambda,
+            .type.fitted = type.fitted,
             .imethod = imethod, .sinit = shrinkage.init ))),
+
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
     Musual <- 2
     ncoly <- extra$ncoly
-    lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
-                        earg = .elambda )
-    probp. <- eta2theta(eta[, Musual*(1:ncoly)    ], .lprobp.,
-                        earg = .eprobp. )
-    probp. * lambda
-  }, list( .lprobp. = lprobp., .llambda = llambda,
-           .eprobp. = eprobp., .elambda = elambda ))),
+    lambda    <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda ,
+                           earg = .elambda )
+    onempstr0 <- eta2theta(eta[, Musual*(1:ncoly)    ], .lonempstr0 ,
+                           earg = .eonempstr0 )
+
+
+    ans <- switch(type.fitted,
+                  "mean"      = onempstr0 * lambda,
+                  "pobs0"     = 1 + onempstr0 * expm1(-lambda),  # P(Y=0)
+                  "pstr0"     = 1 - onempstr0,
+                  "onempstr0" =     onempstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+           .eonempstr0 = eonempstr0, .elambda = elambda,
+           .type.fitted = type.fitted ))),
   last = eval(substitute(expression({
     Musual <- extra$Musual
     misc$link <-
-      c(rep( .llambda, length = ncoly),
-        rep( .lprobp., length = ncoly))[interleave.VGAM(M, M = Musual)]
+      c(rep( .llambda    , length = ncoly),
+        rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M = Musual)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
     names(misc$link) <- temp.names
 
 
     misc$earg <- vector("list", Musual * ncoly)
     names(misc$earg) <- temp.names
-    for(ii in 1:ncoly) {
+    for (ii in 1:ncoly) {
       misc$earg[[Musual*ii-1]] <- .elambda
-      misc$earg[[Musual*ii  ]] <- .eprobp.
+      misc$earg[[Musual*ii  ]] <- .eonempstr0
     }
 
     misc$Musual <- Musual
@@ -2455,64 +4117,66 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
     misc$expected <- TRUE
     misc$multipleResponses <- TRUE
 
-      misc$pobs0 <- (1 - probp.) + probp. * exp(-lambda)  # P(Y=0)
+      misc$pobs0 <- (1 - onempstr0) + onempstr0 * exp(-lambda)  # P(Y=0)
       misc$pobs0 <- as.matrix(misc$pobs0)
       if (length(dimnames(y)[[2]]) > 0)
         dimnames(misc$pobs0) <- dimnames(y)
 
-      misc$pstr0 <- (1 - probp.)
+      misc$pstr0 <- (1 - onempstr0)
       misc$pstr0 <- as.matrix(misc$pstr0)
       if (length(dimnames(y)[[2]]) > 0)
         dimnames(misc$pstr0) <- dimnames(y)
-  }), list( .lprobp. = lprobp., .llambda = llambda,
-            .eprobp. = eprobp., .elambda = elambda,
+  }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+            .eonempstr0 = eonempstr0, .elambda = elambda,
             .imethod = imethod ))),
   loglikelihood = eval(substitute( 
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     Musual <- 2
     ncoly <- extra$ncoly
-    lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
-                        earg = .elambda )
-    probp. <- eta2theta(eta[, Musual*(1:ncoly)    ], .lprobp.,
-                        earg = .eprobp. )
+    lambda    <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda    ,
+                           earg = .elambda )
+    onempstr0 <- eta2theta(eta[, Musual*(1:ncoly)    ], .lonempstr0 ,
+                           earg = .eonempstr0 )
 
 
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-      sum(c(w) * dzipois(x = y, pstr0 = 1 - probp., lambda = lambda,
+      sum(c(w) * dzipois(x = y, pstr0 = 1 - onempstr0, lambda = lambda,
                          log = TRUE))
     }
-  }, list( .lprobp. = lprobp., .llambda = llambda,
-           .eprobp. = eprobp., .elambda = elambda ))),
+  }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+           .eonempstr0 = eonempstr0, .elambda = elambda ))),
   vfamily = c("zipoissonff"),
   deriv = eval(substitute(expression({
     Musual <- 2
     ncoly <- extra$ncoly
-    lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
-                        earg = .elambda )
-    probp. <- eta2theta(eta[, Musual*(1:ncoly)    ], .lprobp.,
-                        earg = .eprobp. )
+    lambda    <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda    ,
+                           earg = .elambda )
+    onempstr0 <- eta2theta(eta[, Musual*(1:ncoly)    ], .lonempstr0 ,
+                           earg = .eonempstr0 )
 
 
-    dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda )
-    dprobp..deta <- dtheta.deta(probp., .lprobp., earg = .eprobp. )
+    dlambda.deta    <- dtheta.deta(lambda   , .llambda    ,
+                                   earg = .elambda )
+    donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
+                                   earg = .eonempstr0 )
 
-    denom <- 1 + probp. * expm1(-lambda)
+    denom <- 1 + onempstr0 * expm1(-lambda)
     ind0 <- (y == 0)
-    dl.dlambda <- -probp. * exp(-lambda) / denom
+    dl.dlambda <- -onempstr0 * exp(-lambda) / denom
     dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
-    dl.dprobp. <- expm1(-lambda) / denom
-    dl.dprobp.[!ind0] <- 1 / probp.[!ind0]
+    dl.donempstr0 <- expm1(-lambda) / denom
+    dl.donempstr0[!ind0] <- 1 / onempstr0[!ind0]
 
-    ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
-                        dl.dprobp. * dprobp..deta)
+    ans <- c(w) * cbind(dl.dlambda    * dlambda.deta,
+                        dl.donempstr0 * donempstr0.deta)
     ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
 
 
     if ( .llambda == "loge" && is.empty.list( .elambda ) &&
        any(lambda[!ind0] < .Machine$double.eps)) {
-      for(spp. in 1:ncoly) {
-        ans[!ind0[, spp.], Musual * spp.] =
+      for (spp. in 1:ncoly) {
+        ans[!ind0[, spp.], Musual * spp.] <-
           w[!ind0[, spp.]] *
          (y[!ind0[, spp.], spp.] - lambda[!ind0[, spp.], spp.])
       }
@@ -2521,20 +4185,20 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
     ans
-  }), list( .lprobp. = lprobp., .llambda = llambda,
-            .eprobp. = eprobp., .elambda = elambda ))),
+  }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+            .eonempstr0 = eonempstr0, .elambda = elambda ))),
   weight = eval(substitute(expression({
 
 
-    ned2l.dlambda2 <-  (    probp.) / lambda -
-                    probp. * (1 - probp.) * exp(-lambda) / denom
-    ned2l.dprobp.2 <- -expm1(-lambda) / ((  probp.) * denom)
+    ned2l.dlambda2 <-  (    onempstr0) / lambda -
+                    onempstr0 * (1 - onempstr0) * exp(-lambda) / denom
+    ned2l.donempstr0.2 <- -expm1(-lambda) / ((onempstr0) * denom)
     ned2l.dphilambda <- +exp(-lambda) / denom
 
 
     wz <- array(c(c(w) * ned2l.dlambda2 * dlambda.deta^2,
-                  c(w) * ned2l.dprobp.2 * dprobp..deta^2,
-                  c(w) * ned2l.dphilambda * dprobp..deta * dlambda.deta),
+                  c(w) * ned2l.donempstr0.2 * donempstr0.deta^2,
+                  c(w) * ned2l.dphilambda * donempstr0.deta * dlambda.deta),
                 dim = c(n, M / Musual, 3))
     wz <- arwz2wz(wz, M = M, Musual = Musual)
 
@@ -2573,8 +4237,8 @@ dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
 
 
   prob0 <- prob
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
   ans
@@ -2595,8 +4259,8 @@ pzigeom <- function(q, prob, pstr0 = 0) {
 
 
   prob0 <- prob
-  deflat_limit <- -prob0 / (1 - prob0)
-  ans[pstr0 < deflat_limit] <- NaN
+  deflat.limit <- -prob0 / (1 - prob0)
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
   ans
@@ -2617,8 +4281,8 @@ qzigeom <- function(p, prob, pstr0 = 0) {
 
 
   prob0 <- prob
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
     ans[p[ind0] <= pobs0] <- 0 
@@ -2628,7 +4292,7 @@ qzigeom <- function(p, prob, pstr0 = 0) {
                             prob = prob[pindex])
   }
 
-  ans[pstr0 < deflat_limit] <- NaN
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
   ans
@@ -2639,7 +4303,7 @@ qzigeom <- function(p, prob, pstr0 = 0) {
 rzigeom <- function(n, prob, pstr0 = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
 
@@ -2652,15 +4316,15 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
 
   prob0 <- prob
-  deflat_limit <- -prob0 / (1 - prob0)
-  ind0 <- (deflat_limit <= pstr0) & (pstr0 <  0)
+  deflat.limit <- -prob0 / (1 - prob0)
+  ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
     ans[ind0] <- 1 + rgeom(sum(ind0), prob = prob[ind0])
     ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
   }
 
-  ans[pstr0 < deflat_limit] <- NaN
+  ans[pstr0 < deflat.limit] <- NaN
   ans[pstr0 > 1] <- NaN
 
 
@@ -2670,13 +4334,314 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
 
 
-
- zigeometric <- function(lprob = "logit",
-                         lpstr0  = "logit",
-                         iprob = NULL,    ipstr0  = NULL,
-                         imethod = 1,
-                         bias.red = 0.5,
-                         zero = 2) {
+ zigeometric <-
+  function(
+           lpstr0 = "logit",
+           lprob  = "logit",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ipstr0  = NULL, iprob = NULL,
+           imethod = 1,
+           bias.red = 0.5,
+           zero = NULL) {
+
+
+  expected <- TRUE
+
+
+
+  lpstr0 <- as.list(substitute(lpstr0))
+  epstr0 <- link2list(lpstr0)
+  lpstr0 <- attr(epstr0, "function.name")
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+
+  if (length(ipstr0))
+    if (!is.Numeric(ipstr0, positive = TRUE) ||
+        ipstr0 >= 1)
+      stop("argument 'ipstr0' is out of range")
+
+  if (length(iprob))
+    if (!is.Numeric(iprob, positive = TRUE) ||
+      iprob >= 1)
+    stop("argument 'iprob' is out of range")
+
+  if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
+     bias.red > 1)
+    stop("argument 'bias.red' must be between 0 and 1")
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+  new("vglmff",
+  blurb = c("Zero-inflated geometric distribution,\n",
+            "P[Y = 0] = pstr0 + (1 - pstr0) * prob,\n",
+            "P[Y = y] = (1 - pstr0) * prob * (1 - prob)^y, ",
+            "y = 1, 2, ...\n\n",
+            "Link:     ",
+            namesof("pstr0",  lpstr0,  earg = epstr0), ", ",
+            namesof("prob",   lprob,   earg = eprob ), "\n",
+            "Mean:     (1 - pstr0) * (1 - prob) / prob"),
+  constraints = eval(substitute(expression({
+
+    dotzero <- .zero
+    Musual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted  = type.fitted ))),
+  initialize = eval(substitute(expression({
+
+    Musual <- 2
+    if (any(y < 0))
+      stop("the response must not have negative values")
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.nonnegative.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$type.fitted      <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
+
+
+    mynames1 <- if (ncoly == 1) "pstr0" else
+                paste("pstr0", 1:ncoly, sep = "")
+    mynames2 <- if (ncoly == 1) "prob"  else
+                paste("prob",  1:ncoly, sep = "")
+
+    predictors.names <-
+            c(namesof(mynames1, .lpstr0,  earg = .epstr0, tag = FALSE),
+              namesof(mynames2, .lprob,   earg = .eprob,  tag = FALSE))[
+          interleave.VGAM(Musual * NOS, M = Musual)]
+
+
+    if (!length(etastart)) {
+      prob.init <- if ( .imethod == 3)
+                       .bias.red / (1 + y + 1/8) else
+                   if ( .imethod == 2)
+                       .bias.red / (1 +
+                   matrix(colMeans(y) + 1/8,
+                          n, ncoly, byrow = TRUE)) else
+                       .bias.red / (1 +
+                   matrix(colSums(y * w) / colSums(w) + 1/8,
+                          n, ncoly, byrow = TRUE))
+
+      prob.init <- if (length( .iprob )) {
+        matrix( .iprob , n, ncoly, byrow = TRUE)
+      } else {
+        prob.init # Already a matrix
+      }
+
+
+      prob0.est <- psze.init <- matrix(0, n, NOS)
+      for (jlocal in 1:NOS) {
+        prob0.est[, jlocal] <-
+          sum(w[y[, jlocal] == 0, jlocal]) / sum(w[, jlocal])
+        psze.init[, jlocal] <- if ( .imethod == 3)
+                         prob0.est[, jlocal] / 2 else
+                     if ( .imethod == 1)
+                         pmax(0.05, (prob0.est[, jlocal] -
+                                     median(prob.init[, jlocal]))) else
+                         prob0.est[, jlocal] / 5
+      }
+      psze.init <- if (length( .ipstr0 )) {
+        matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+      } else {
+        psze.init # Already a matrix
+      }
+
+
+
+      etastart <-
+        cbind(theta2eta(psze.init, .lpstr0, earg = .epstr0),
+              theta2eta(prob.init, .lprob , earg = .eprob ))
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+    }
+  }), list( .lprob = lprob, .lpstr0 = lpstr0,
+            .eprob = eprob, .epstr0 = epstr0,
+            .iprob = iprob, .ipstr0 = ipstr0,
+            .type.fitted = type.fitted,
+            .bias.red = bias.red,
+            .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+    prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - pstr0) * (1 - prob) / prob,
+                  "pobs0"     = pstr0 + (1 - pstr0) * prob,  # P(Y=0)
+                  "pstr0"     =     pstr0,
+                  "onempstr0" = 1 - pstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lprob = lprob, .lpstr0 = lpstr0,
+           .eprob = eprob, .epstr0 = epstr0,
+           .type.fitted = type.fitted ))),
+  last = eval(substitute(expression({
+    temp.names <- c(rep( .lpstr0 , len = NOS),
+                    rep( .lprob  , len = NOS))
+    temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+    misc$link  <- temp.names
+
+
+    misc$earg <- vector("list", Musual * NOS)
+    names(misc$link) <-
+    names(misc$earg) <-
+        c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-1]] <- .epstr0
+      misc$earg[[Musual*ii  ]] <- .eprob
+    }
+
+
+    misc$imethod <- .imethod
+    misc$zero <- .zero
+    misc$bias.red <- .bias.red
+    misc$expected <- .expected
+    misc$ipstr0 <- .ipstr0
+    misc$type.fitted <- .type.fitted
+
+
+    misc$pobs0 <- pobs0 
+    if (length(dimnames(y)[[2]]) > 0)
+      dimnames(misc$pobs0) <- dimnames(y)
+    misc$pstr0 <- pstr0
+    if (length(dimnames(y)[[2]]) > 0)
+      dimnames(misc$pstr0) <- dimnames(y)
+  }), list( .lprob = lprob, .lpstr0 = lpstr0,
+            .eprob = eprob, .epstr0 = epstr0,
+                            .ipstr0 = ipstr0,
+            .zero = zero,
+            .expected = expected,
+            .type.fitted = type.fitted,
+            .bias.red = bias.red,
+            .imethod = imethod ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+    prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
+    }
+  }, list( .lprob = lprob, .lpstr0 = lpstr0,
+           .eprob = eprob, .epstr0 = epstr0 ))),
+  vfamily = c("zigeometric"),
+
+  deriv = eval(substitute(expression({
+    Musual <- 2
+    pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+    prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
+
+
+    prob0 <- prob  # P(Y == 0) from parent distribution
+    pobs0 <- pstr0 + (1 - pstr0) * prob0  # P(Y == 0)
+    index0 <- (y == 0)
+
+    dl.dpstr0 <- (1 - prob0) / pobs0
+    dl.dpstr0[!index0] <- -1 / (1 - pstr0[!index0])
+
+    dl.dprob <- (1 - pstr0) / pobs0
+    dl.dprob[!index0]   <- 1 / prob[!index0] -
+                           y[!index0] / (1 - prob[!index0])
+
+    dpstr0.deta  <- dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
+    dprob.deta   <- dtheta.deta(prob,   .lprob  , earg = .eprob  )
+
+    dl.deta12 <- c(w) * cbind(dl.dpstr0 * dpstr0.deta,
+                              dl.dprob  * dprob.deta)
+
+    dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = Musual)]
+    dl.deta12
+  }), list( .lprob = lprob, .lpstr0 = lpstr0,
+            .eprob = eprob, .epstr0 = epstr0 ))),
+  weight = eval(substitute(expression({
+    if ( .expected ) {
+      ned2l.dprob2 <- (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
+                                    (1 - pstr0) / pobs0)
+      ned2l.dpstr0.prob <- 1 / pobs0
+      ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
+    } else {
+      od2l.dprob2 <- ((1 - pstr0) / pobs0)^2
+      od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
+                              y[!index0] / (1 - prob[!index0])^2
+      od2l.dpstr0.prob <- (pobs0 + (1 - prob0) * (1 - pstr0)) / pobs0^2
+      od2l.dpstr0.prob[!index0] <- 0
+
+      od2l.dpstr02 <- ((1 - prob0) / pobs0)^2
+      od2l.dpstr02[!index0] <- 1 / (1 - pstr0[!index0])^2
+    }
+
+
+    allvals <- if ( .expected )
+                 c(c(w) * ned2l.dpstr02 * dpstr0.deta^2,
+                   c(w) * ned2l.dprob2  *  dprob.deta^2,
+                   c(w) * ned2l.dpstr0.prob * dprob.deta * dpstr0.deta) else
+                 c(c(w) *  od2l.dpstr02 * dpstr0.deta^2,
+                   c(w) *  od2l.dprob2  *  dprob.deta^2,
+                   c(w) *  od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
+    wz <- array(allvals, dim = c(n, M / Musual, 3))
+    wz <- arwz2wz(wz, M = M, Musual = Musual)
+
+
+    wz
+  }), list( .lprob = lprob, .lpstr0 = lpstr0,
+            .eprob = eprob, .epstr0 = epstr0,
+            .expected = expected ))))
+}
+
+
+
+
+ zigeometricff <-
+  function(lprob       = "logit",
+           lonempstr0  = "logit",
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           iprob = NULL,   ionempstr0  = NULL,
+           imethod = 1,
+           bias.red = 0.5,
+           zero = -2) {
 
 
   expected <- TRUE
@@ -2687,28 +4652,31 @@ rzigeom <- function(n, prob, pstr0 = 0) {
   eprob <- link2list(lprob)
   lprob <- attr(eprob, "function.name")
 
-  lpstr0 <- as.list(substitute(lpstr0))
-  epstr0 <- link2list(lpstr0)
-  lpstr0 <- attr(epstr0, "function.name")
+  lonempstr0 <- as.list(substitute(lonempstr0))
+  eonempstr0 <- link2list(lonempstr0)
+  lonempstr0 <- attr(eonempstr0, "function.name")
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "pstr0", "onempstr0"))[1]
 
 
   if (length(iprob))
     if (!is.Numeric(iprob, positive = TRUE) ||
       iprob >= 1)
     stop("argument 'iprob' is out of range")
-  if (length(ipstr0))
-    if (!is.Numeric(ipstr0, positive = TRUE) ||
-        ipstr0 >= 1)
-      stop("argument 'ipstr0' is out of range")
 
-  if (!is.Numeric(bias.red, allowable.length = 1, positive = TRUE) ||
+  if (length(ionempstr0))
+    if (!is.Numeric(ionempstr0, positive = TRUE) ||
+        ionempstr0 >= 1)
+      stop("argument 'ionempstr0' is out of range")
+
+  if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
      bias.red > 1)
     stop("argument 'bias.red' must be between 0 and 1")
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2716,13 +4684,13 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
   new("vglmff",
   blurb = c("Zero-inflated geometric distribution,\n",
-            "P[Y = 0] = pstr0 + (1 - pstr0) * prob,\n",
-            "P[Y = y] = (1 - pstr0) * prob * (1 - prob)^y, ",
+            "P[Y = 0] = 1 - onempstr0 + onempstr0 * prob,\n",
+            "P[Y = y] = onempstr0 * prob * (1 - prob)^y, ",
             "y = 1, 2, ...\n\n",
             "Link:     ",
-            namesof("prob",   lprob,   earg = eprob ), ", ",
-            namesof("pstr0",  lpstr0,  earg = epstr0), "\n",
-            "Mean:     (1 - pstr0) * (1 - prob) / prob"),
+            namesof("prob",       lprob,       earg = eprob ), ", ",
+            namesof("onempstr0",  lonempstr0,  earg = eonempstr0), "\n",
+            "Mean:     onempstr0 * (1 - prob) / prob"),
   constraints = eval(substitute(expression({
 
     dotzero <- .zero
@@ -2732,8 +4700,10 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
   infos = eval(substitute(function(...) {
     list(Musual = 2,
+         type.fitted  = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted  = type.fitted ))),
   initialize = eval(substitute(expression({
 
     Musual <- 2
@@ -2751,18 +4721,20 @@ rzigeom <- function(n, prob, pstr0 = 0) {
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
-    extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$type.fitted      <- .type.fitted
+    extra$dimnamesy <- dimnames(y)
 
 
-    mynames1 <- if (ncoly == 1) "prob" else
-                paste("prob", 1:ncoly, sep = "")
-    mynames2 <- if (ncoly == 1) "pobs0"  else
-                paste("pobs0",  1:ncoly, sep = "")
+    mynames1 <- if (ncoly == 1) "prob"      else
+                paste("prob",      1:ncoly, sep = "")
+    mynames2 <- if (ncoly == 1) "onempstr0" else
+                paste("onempstr0", 1:ncoly, sep = "")
 
     predictors.names <-
-            c(namesof(mynames1, .lprob,   earg = .eprob,  tag = FALSE),
-              namesof(mynames2, .lpstr0,  earg = .epstr0, tag = FALSE))[
-          interleave.VGAM(Musual*NOS, M = Musual)]
+      c(namesof(mynames1, .lprob      , earg = .eprob      , tag = FALSE),
+        namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
+        interleave.VGAM(Musual*NOS, M = Musual)]
 
 
     if (!length(etastart)) {
@@ -2779,7 +4751,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
       prob.init <- if (length( .iprob )) {
         matrix( .iprob , n, ncoly, byrow = TRUE)
       } else {
-        prob.init # Already a matrix
+        prob.init  # Already a matrix
       }
 
 
@@ -2794,8 +4766,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
                                      median(prob.init[, jlocal]))) else
                          prob0.est[, jlocal] / 5
       }
-      psze.init <- if (length( .ipstr0 )) {
-        matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+      psze.init <- if (length( .ionempstr0 )) {
+        matrix( 1 - .ionempstr0 , n, ncoly, byrow = TRUE)
       } else {
         psze.init # Already a matrix
       }
@@ -2803,24 +4775,53 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
 
       etastart <-
-        cbind(theta2eta(prob.init, .lprob , earg = .eprob ),
-              theta2eta(psze.init, .lpstr0, earg = .epstr0))
+        cbind(theta2eta(    prob.init, .lprob      , earg = .eprob      ),
+              theta2eta(1 - psze.init, .lonempstr0 , earg = .eonempstr0 ))
       etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
     }
-  }), list( .lprob = lprob, .lpstr0 = lpstr0,
-            .eprob = eprob, .epstr0 = epstr0,
-            .iprob = iprob, .ipstr0 = ipstr0,
+  }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+            .eprob = eprob, .eonempstr0 = eonempstr0,
+            .iprob = iprob, .ionempstr0 = ionempstr0,
+            .type.fitted = type.fitted,
             .bias.red = bias.red,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    prob   <- eta2theta(eta[, c(TRUE, FALSE)], .lprob  , earg = .eprob  )
-    pstr0  <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
-    (1 - pstr0) * (1 - prob) / prob
-  }, list( .lprob = lprob, .lpstr0 = lpstr0,
-           .eprob = eprob, .epstr0 = epstr0 ))),
+    prob      <- eta2theta(eta[, c(TRUE, FALSE)], .lprob      ,
+                           earg = .eprob  )
+    onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+                           earg = .eonempstr0 )
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = onempstr0 * (1 - prob) / prob,
+                  "pobs0"     = 1 - onempstr0 + onempstr0 * prob,  # P(Y=0)
+                  "pstr0"     = 1 - onempstr0,
+                  "onempstr0" =     onempstr0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+           .eprob = eprob, .eonempstr0 = eonempstr0,
+           .type.fitted = type.fitted ))),
   last = eval(substitute(expression({
     temp.names <- c(rep( .lprob  , len = NOS),
-                    rep( .lpstr0 , len = NOS))
+                    rep( .lonempstr0 , len = NOS))
     temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
     misc$link  <- temp.names
 
@@ -2830,111 +4831,119 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     names(misc$earg) <-
         c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
 
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .eprob
-      misc$earg[[Musual*ii  ]] <- .epstr0
+      misc$earg[[Musual*ii  ]] <- .eonempstr0
     }
 
 
-    misc$imethod <- .imethod
-    misc$zero <- .zero
+    misc$imethod  <- .imethod
+    misc$zero     <- .zero
     misc$bias.red <- .bias.red
     misc$expected <- .expected
-    misc$ipstr0 <- .ipstr0
+    misc$ionempstr0   <- .ionempstr0
 
 
     misc$pobs0 <- pobs0 
     if (length(dimnames(y)[[2]]) > 0)
       dimnames(misc$pobs0) <- dimnames(y)
-    misc$pstr0 <- pstr0
+    misc$onempstr0 <- onempstr0
     if (length(dimnames(y)[[2]]) > 0)
-      dimnames(misc$pstr0) <- dimnames(y)
-  }), list( .lprob = lprob, .lpstr0 = lpstr0,
-            .eprob = eprob, .epstr0 = epstr0,
-                            .ipstr0 = ipstr0,
+      dimnames(misc$onempstr0) <- dimnames(y)
+  }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+            .eprob = eprob, .eonempstr0 = eonempstr0,
+                            .ionempstr0 = ionempstr0,
             .zero = zero,
             .expected = expected,
             .bias.red = bias.red,
             .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    prob   <- eta2theta(eta[, c(TRUE, FALSE)], .lprob  , earg = .eprob  )
-    pstr0  <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
+    prob       <- eta2theta(eta[, c(TRUE, FALSE)], .lprob      ,
+                            earg = .eprob )
+    onempstr0  <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+                            earg = .eonempstr0 )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-      sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
+      sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = 1 - onempstr0,
+                         log = TRUE))
     }
-  }, list( .lprob = lprob, .lpstr0 = lpstr0,
-           .eprob = eprob, .epstr0 = epstr0 ))),
-  vfamily = c("zigeometric"),
+  }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+           .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+  vfamily = c("zigeometricff"),
 
   deriv = eval(substitute(expression({
     Musual <- 2
-    prob   <- eta2theta(eta[, c(TRUE, FALSE)], .lprob  , earg = .eprob  )
-    pstr0  <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
+    prob      <- eta2theta(eta[, c(TRUE, FALSE)], .lprob      ,
+                           earg = .eprob  )
+    onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+                           earg = .eonempstr0 )
 
 
-    prob0 <- prob # P(Y == 0)
-    pobs0 <- pstr0 + (1 - pstr0) * prob0
+    prob0 <- prob  # P(Y == 0) from the parent distribution
+    pobs0 <- 1 - onempstr0 + (onempstr0) * prob0  # P(Y == 0)
     index0 <- (y == 0)
 
-    dl.dpstr0 <- (1 - prob0) / pobs0
-    dl.dpstr0[!index0] <- -1 / (1 - pstr0[!index0])
 
-    dl.dprob <- (1 - pstr0) / pobs0
+    dl.donempstr0 <- -(1 - prob0) / pobs0  # zz
+    dl.donempstr0[!index0] <-  1 / (onempstr0[!index0])  # zz
+
+    dl.dprob <- (onempstr0) / pobs0
     dl.dprob[!index0]   <- 1 / prob[!index0] -
                            y[!index0] / (1 - prob[!index0])
 
-    dprob.deta   <- dtheta.deta(prob,   .lprob,   earg = .eprob  )
-    dpstr0.deta  <- dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
+    dprob.deta       <- dtheta.deta(prob      , .lprob      ,
+                                    earg = .eprob )
+    donempstr0.deta  <- dtheta.deta(onempstr0 , .lonempstr0 ,
+                                    earg = .eonempstr0 )
 
-    dl.deta12 <- c(w) * cbind(dl.dprob   * dprob.deta,
-                              dl.dpstr0  *  dpstr0.deta)
+    dl.deta12 <- c(w) * cbind(dl.dprob      * dprob.deta,
+                              dl.donempstr0 *  donempstr0.deta)
 
     dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = Musual)]
     dl.deta12
-  }), list( .lprob = lprob, .lpstr0 = lpstr0,
-            .eprob = eprob, .epstr0 = epstr0 ))),
+  }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+            .eprob = eprob, .eonempstr0 = eonempstr0 ))),
   weight = eval(substitute(expression({
     if ( .expected ) {
-      ned2l.dprob2 <- (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
-                                    (1 - pstr0) / pobs0)
-      ned2l.dpstr0.prob <- 1 / pobs0
-      ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
+      ned2l.dprob2 <- (    onempstr0) * (1 / (prob^2 * (1 - prob)) +
+                                    ( onempstr0) / pobs0)
+      ned2l.donempstr0.prob <- -1 / pobs0
+      ned2l.donempstr02 <- (1 - prob0) / ((    onempstr0) * pobs0)
     } else {
-      od2l.dprob2 <- ((1 - pstr0) / pobs0)^2
+      od2l.dprob2 <- ((    onempstr0) / pobs0)^2
       od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
                               y[!index0] / (1 - prob[!index0])^2
-      od2l.dpstr0.prob <- (pobs0 + (1 - prob0) * (1 - pstr0)) / pobs0^2
-      od2l.dpstr0.prob[!index0] <- 0
+      od2l.donempstr0.prob <- -(pobs0 + (1 - prob0) * (onempstr0)) / pobs0^2
+      od2l.donempstr0.prob[!index0] <- 0
 
-      od2l.dpstr02 <- ((1 - prob0) / pobs0)^2
-      od2l.dpstr02[!index0] <- 1 / (1 - pstr0[!index0])^2
+      od2l.donempstr02 <- ((1 - prob0) / pobs0)^2
+      od2l.donempstr02[!index0] <- 1 / (    onempstr0[!index0])^2
     }
 
 
     allvals <- if ( .expected )
                  c(c(w) * ned2l.dprob2  *  dprob.deta^2,
-                   c(w) * ned2l.dpstr02 * dpstr0.deta^2,
-                   c(w) * ned2l.dpstr0.prob * dprob.deta * dpstr0.deta) else
+                   c(w) * ned2l.donempstr02 * donempstr0.deta^2,
+                   c(w) * ned2l.donempstr0.prob * dprob.deta *
+                                                  donempstr0.deta) else
                  c(c(w) *  od2l.dprob2  *  dprob.deta^2,
-                   c(w) *  od2l.dpstr02 * dpstr0.deta^2,
-                   c(w) *  od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
+                   c(w) *  od2l.donempstr02 * donempstr0.deta^2,
+                   c(w) *  od2l.donempstr0.prob * dprob.deta *
+                                                  donempstr0.deta)
     wz <- array(allvals, dim = c(n, M / Musual, 3))
     wz <- arwz2wz(wz, M = M, Musual = Musual)
 
 
     wz
-  }), list( .lprob = lprob, .lpstr0 = lpstr0,
-            .eprob = eprob, .epstr0 = epstr0,
+  }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+            .eprob = eprob, .eonempstr0 = eonempstr0,
             .expected = expected ))))
 }
 
 
 
 
-
-
 dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -3006,7 +5015,7 @@ qzageom <- function(p, prob, pobs0 = 0) {
 rzageom <- function(n, prob, pobs0 = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
+                           length.arg = 1, positive = TRUE))
                stop("bad input for argument 'n'") else n
 
   ans <- rposgeom(use.n, prob)
@@ -3099,58 +5108,356 @@ qzabinom <- function(p, size, prob, pobs0 = 0) {
 }
 
 
-rzabinom <- function(n, size, prob, pobs0 = 0) {
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           allowable.length = 1, positive = TRUE))
-               stop("bad input for argument 'n'") else n
+rzabinom <- function(n, size, prob, pobs0 = 0) {
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+               stop("bad input for argument 'n'") else n
+
+  ans <- rposbinom(use.n, size, prob)
+  if (length(pobs0) != use.n)
+    pobs0 <- rep(pobs0, len = use.n)
+  if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+    stop("argument 'pobs0' must be between 0 and 1 inclusive")
+  ifelse(runif(use.n) < pobs0, 0, ans)
+}
+
+
+
+
+
+
+ zabinomial <-
+  function(lpobs0 = "logit",
+           lprob  = "logit",
+           type.fitted = c("mean", "pobs0"),
+           ipobs0 = NULL, iprob = NULL,
+           imethod = 1,
+           zero = NULL  # Was zero = 2 prior to 20130917
+          ) {
+
+
+
+  lpobs0 <- as.list(substitute(lpobs0))
+  epobs0 <- link2list(lpobs0)
+  lpobs0 <- attr(epobs0, "function.name")
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0"))[1]
+
+  if (length(ipobs0))
+    if (!is.Numeric(ipobs0, positive = TRUE) ||
+        ipobs0 >= 1)
+      stop("argument 'ipobs0' is out of range")
+
+  if (length(iprob))
+    if (!is.Numeric(iprob, positive = TRUE) ||
+      iprob >= 1)
+    stop("argument 'iprob' is out of range")
+
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+  new("vglmff",
+  blurb = c("Zero-altered binomial distribution ",
+            "(Bernoulli and positive-binomial conditional model)\n\n",
+            "P[Y = 0] = pobs0,\n",
+            "P[Y = y] = (1 - pobs0) * dposbinom(x = y, size, prob), ",
+            "y = 1, 2, ..., size,\n\n",
+            "Link:     ",
+            namesof("pobs0",   lpobs0, earg = epobs0), ", ",
+            namesof("prob" ,   lprob,  earg = eprob),  "\n",
+            "Mean:     (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
+  constraints = eval(substitute(expression({
+      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted ))),
+
+  initialize = eval(substitute(expression({
+    if (!all(w == 1))
+      extra$orig.w <- w
+
+
+
+    if (NCOL(y) == 1) {
+      if (is.factor(y))
+        y <- y != levels(y)[1]
+      nn <- rep(1, n)
+      if (!all(y >= 0 & y <= 1))
+        stop("response values must be in [0, 1]")
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + w * y) / (1.0 + w)
+
+
+      no.successes <- y
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+        stop("Number of successes must be integer-valued")
+
+    } else if (NCOL(y) == 2) {
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(y - round(y)) > 1.0e-8))
+        stop("Count data must be integer-valued")
+      y <- round(y)
+      nvec <- y[, 1] + y[, 2]
+      y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+      w <- w * nvec
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + nvec * y) / (1 + nvec)
+    } else {
+      stop("for the binomialff family, response 'y' must be a ",
+           "vector of 0 and 1's\n",
+           "or a factor ",
+           "(first level = fail, other levels = success),\n",
+           "or a 2-column matrix where col 1 is the no. of ",
+           "successes and col 2 is the no. of failures")
+    }
+    if (!all(w == 1))
+      extra$new.w <- w
+
+
+    y <- as.matrix(y)
+    extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+
+    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted      <- .type.fitted
+
+
+    predictors.names <-
+        c(namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE),
+          namesof("prob" , .lprob  , earg = .eprob  , tag = FALSE))
+          
+
+
+    orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+    new.w  <- if (length(extra$new.w))  extra$new.w  else 1
+    Size <- new.w / orig.w
+
+    phi.init <- if (length( .ipobs0 )) .ipobs0 else {
+        prob0.est <- sum(Size[y == 0]) / sum(Size)
+        if ( .imethod == 1) {
+          (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
+        } else
+        if ( .imethod == 2) {
+          prob0.est
+        } else {
+          prob0.est * 0.5
+        }
+    }
+
+    phi.init[phi.init <= -0.10] <- 0.50  # Lots of sample variation
+    phi.init[phi.init <=  0.01] <- 0.05  # Last resort
+    phi.init[phi.init >=  0.99] <- 0.95  # Last resort
+
+
+
+
+    if (!length(etastart)) {
+      etastart <-
+        cbind(theta2eta(phi.init, .lpobs0, earg = .epobs0 ),
+              theta2eta( mustart, .lprob,  earg = .eprob  ))
+              
+
+      mustart <- NULL
+    }
+  }), list( .lprob = lprob, .lpobs0 = lpobs0,
+            .eprob = eprob, .epobs0 = epobs0,
+            .iprob = iprob, .ipobs0 = ipobs0,
+            .imethod = imethod,
+            .type.fitted = type.fitted ))),
+
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0"))[1]
+    
+    phi0  <- eta2theta(eta[, 1], .lpobs0, earg = .epobs0 )
+    prob  <- eta2theta(eta[, 2], .lprob,  earg = .eprob  )
+    orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+    new.w  <- if (length(extra$new.w))  extra$new.w  else 1
+    Size <- new.w / orig.w
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - phi0) * prob / (1 - (1 - prob)^Size),
+                  "pobs0"     = phi0)  # P(Y=0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lprob = lprob, .lpobs0 = lpobs0,
+           .eprob = eprob, .epobs0 = epobs0 ))),
+
+  last = eval(substitute(expression({
+    misc$link <-    c(prob = .lprob, pobs0 = .lpobs0 )
+    misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
+
+    misc$imethod  <- .imethod
+    misc$zero     <- .zero
+    misc$expected <- TRUE
+  }), list( .lprob = lprob, .lpobs0 = lpobs0,
+            .eprob = eprob, .epobs0 = epobs0,
+            .zero = zero,
+            .imethod = imethod ))),
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+    new.w  <- if (length(extra$new.w))  extra$new.w  else 1
+    Size <- new.w / orig.w
+    pobs0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+    prob  <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(orig.w * dzabinom(x = round(y * Size), size = Size,
+                            prob = prob, pobs0 = pobs0,
+                            log = TRUE))
+    }
+  }, list( .lprob = lprob, .lpobs0 = lpobs0,
+           .eprob = eprob, .epobs0 = epobs0 ))),
+  vfamily = c("zabinomial"),
+
+  deriv = eval(substitute(expression({
+    NOS <- if (length(extra$NOS)) extra$NOS else 1
+    Musual <- 2
+
+    orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+    new.w  <- if (length(extra$new.w))  extra$new.w  else 1
+    Size <- new.w / orig.w
+
+    phi0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+    prob <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
+
+    dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
+    dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob  )
+
+    df0.dprob   <- -Size *              (1 -  prob)^(Size - 1)
+    df02.dprob2 <-  Size * (Size - 1) * (1 -  prob)^(Size - 2)
+    prob0  <- (1 -  prob)^(Size)
+    oneminusf0  <- 1 - prob0
+
+
+    dl.dphi0 <- -1 / (1 - phi0)
+    dl.dprob <-  c(w)      * (y / prob - (1 - y) / (1 - prob)) +
+                 c(orig.w) * df0.dprob / oneminusf0
+
+
+    dl.dphi0[y == 0] <- 1 / phi0[y == 0]  # Do it in one line
+    skip <- extra$skip.these
+    for (spp. in 1:NOS) {
+      dl.dprob[skip[, spp.], spp.] <- 0
+    }
+
+
+    ans <- cbind(c(orig.w) * dl.dphi0 * dphi0.deta,
+                             dl.dprob * dprob.deta)
+                 
+                 
+    ans
+  }), list( .lprob = lprob, .lpobs0 = lpobs0,
+            .eprob = eprob, .epobs0 = epobs0 ))),
+
+
+  weight = eval(substitute(expression({
+    wz <- matrix(0.0, n, Musual)
+
+    usualmeanY <-  prob
+    meanY <- (1 - phi0) * usualmeanY / oneminusf0
+
+
+    term1 <-  c(Size) * (meanY /      prob^2 -
+                         meanY / (1 - prob)^2) +
+             c(Size) * (1 - phi0) / (1 - prob)^2
 
-  ans <- rposbinom(use.n, size, prob)
-  if (length(pobs0) != use.n)
-    pobs0 <- rep(pobs0, len = use.n)
-  if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
-    stop("argument 'pobs0' must be between 0 and 1 inclusive")
-  ifelse(runif(use.n) < pobs0, 0, ans)
-}
+    term2 <-  -(1 - phi0) * df02.dprob2 / oneminusf0
+    term3 <-  -(1 - phi0) * (df0.dprob  / oneminusf0)^2
+    ned2l.dprob2 <- term1 + term2 + term3
+    wz[, iam(2, 2, M)] <- ned2l.dprob2 * dprob.deta^2
 
 
+    mu.phi0 <- phi0
+    tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+    tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+      tmp100
+    } else {
+      (dphi0.deta^2) / tmp100
+    }
+    wz[, iam(1, 1, M)] <- tmp200
 
 
+    c(orig.w) * wz
+  }), list( .lprob = lprob, .lpobs0 = lpobs0,
+            .eprob = eprob, .epobs0 = epobs0 ))))
+}
 
- zabinomial <- function(lprob  = "logit",
-                        lpobs0 = "logit",
-                        iprob = NULL, ipobs0 = NULL,
-                        imethod = 1,
-                        zero = 2) {
 
 
 
 
+ zabinomialff <-
+  function(lprob  = "logit",
+           lonempobs0 = "logit",
+           type.fitted = c("mean", "pobs0", "onempobs0"),
+           iprob = NULL, ionempobs0 = NULL,
+           imethod = 1,
+           zero = 2) {
 
 
   lprob <- as.list(substitute(lprob))
   eprob <- link2list(lprob)
   lprob <- attr(eprob, "function.name")
 
-  lpobs0 <- as.list(substitute(lpobs0))
-  epobs0 <- link2list(lpobs0)
-  lpobs0 <- attr(epobs0, "function.name")
-
+  lonempobs0 <- as.list(substitute(lonempobs0))
+  eonempobs0 <- link2list(lonempobs0)
+  lonempobs0 <- attr(eonempobs0, "function.name")
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
 
   if (length(iprob))
     if (!is.Numeric(iprob, positive = TRUE) ||
       iprob >= 1)
     stop("argument 'iprob' is out of range")
-  if (length(ipobs0))
-    if (!is.Numeric(ipobs0, positive = TRUE) ||
-        ipobs0 >= 1)
-      stop("argument 'ipobs0' is out of range")
+  if (length(ionempobs0))
+    if (!is.Numeric(ionempobs0, positive = TRUE) ||
+        ionempobs0 >= 1)
+      stop("argument 'ionempobs0' is out of range")
 
 
 
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
       imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3159,66 +5466,64 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
   new("vglmff",
   blurb = c("Zero-altered binomial distribution ",
             "(Bernoulli and positive-binomial conditional model)\n\n",
-            "P[Y = 0] = pobs0,\n",
-            "P[Y = y] = (1 - pobs0) * dposbinom(x = y, size, prob), ",
+            "P[Y = 0] = 1 - onempobs0,\n",
+            "P[Y = y] = onempobs0 * dposbinom(x = y, size, prob), ",
             "y = 1, 2, ..., size,\n\n",
             "Link:     ",
-            namesof("prob" ,   lprob,  earg = eprob), ", ",
-            namesof("pobs0",   lpobs0, earg = epobs0), "\n",
-            "Mean:     (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
+            namesof("prob"     , lprob     , earg = eprob     ), ", ",
+            namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
+            "Mean:     onempobs0 * prob / (1 - (1 - prob)^size)"),
   constraints = eval(substitute(expression({
       constraints <- cm.zero.vgam(constraints, x, .zero , M)
   }), list( .zero = zero ))),
+
   infos = eval(substitute(function(...) {
     list(Musual = 2,
+         type.fitted  = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted = type.fitted ))),
+
   initialize = eval(substitute(expression({
-            if (!all(w == 1))
-                extra$orig.w <- w
-
-
-
-    {
-        NCOL <- function (x)
-            if (is.array(x) && length(dim(x)) > 1 ||
-            is.data.frame(x)) ncol(x) else as.integer(1)
-
-        if (NCOL(y) == 1) {
-            if (is.factor(y)) y <- y != levels(y)[1]
-            nn <- rep(1, n)
-            if (!all(y >= 0 & y <= 1))
-                stop("response values must be in [0, 1]")
-            if (!length(mustart) && !length(etastart))
-                mustart <- (0.5 + w * y) / (1.0 + w)
-
-
-            no.successes <- y
-            if (min(y) < 0)
-                stop("Negative data not allowed!")
-            if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
-                stop("Number of successes must be integer-valued")
-
-        } else if (NCOL(y) == 2) {
-            if (min(y) < 0)
-                stop("Negative data not allowed!")
-            if (any(abs(y - round(y)) > 1.0e-8))
-                stop("Count data must be integer-valued")
-            y <- round(y)
-            nvec <- y[, 1] + y[, 2]
-            y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
-            w <- w * nvec
-            if (!length(mustart) && !length(etastart))
-              mustart <- (0.5 + nvec * y) / (1 + nvec)
-        } else {
-            stop("for the binomialff family, response 'y' must be a ",
-                 "vector of 0 and 1's\n",
-                 "or a factor ",
-                 "(first level = fail, other levels = success),\n",
-                 "or a 2-column matrix where col 1 is the no. of ",
-                 "successes and col 2 is the no. of failures")
-        }
+    if (!all(w == 1))
+      extra$orig.w <- w
+
 
+
+    if (NCOL(y) == 1) {
+      if (is.factor(y))
+        y <- y != levels(y)[1]
+      nn <- rep(1, n)
+      if (!all(y >= 0 & y <= 1))
+        stop("response values must be in [0, 1]")
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + w * y) / (1.0 + w)
+
+
+      no.successes <- y
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+        stop("Number of successes must be integer-valued")
+
+    } else if (NCOL(y) == 2) {
+      if (min(y) < 0)
+        stop("Negative data not allowed!")
+      if (any(abs(y - round(y)) > 1.0e-8))
+        stop("Count data must be integer-valued")
+      y <- round(y)
+      nvec <- y[, 1] + y[, 2]
+      y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+      w <- w * nvec
+      if (!length(mustart) && !length(etastart))
+        mustart <- (0.5 + nvec * y) / (1 + nvec)
+    } else {
+      stop("for the binomialff family, response 'y' must be a ",
+           "vector of 0 and 1's\n",
+           "or a factor ",
+           "(first level = fail, other levels = success),\n",
+           "or a 2-column matrix where col 1 is the no. of ",
+           "successes and col 2 is the no. of failures")
     }
     if (!all(w == 1))
       extra$new.w <- w
@@ -3229,18 +5534,20 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
+    extra$dimnamesy   <- dimnames(y)
+    extra$type.fitted <- .type.fitted
 
 
     predictors.names <-
-        c(namesof("prob" , .lprob  , earg = .eprob  , tag = FALSE),
-          namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE))
+    c(namesof("prob"     , .lprob      , earg = .eprob      , tag = FALSE),
+      namesof("onempobs0", .lonempobs0 , earg = .eonempobs0 , tag = FALSE))
 
 
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
     new.w  <- if (length(extra$new.w))  extra$new.w  else 1
     Size <- new.w / orig.w
 
-    phi.init <- if (length( .ipobs0 )) .ipobs0 else {
+    phi.init <- if (length( .ionempobs0 )) 1 - .ionempobs0 else {
         prob0.est <- sum(Size[y == 0]) / sum(Size)
         if ( .imethod == 1) {
           (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
@@ -3252,44 +5559,69 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
         }
     }
 
-    phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
-    phi.init[phi.init <=  0.01] <- 0.05 # Last resort
-    phi.init[phi.init >=  0.99] <- 0.95 # Last resort
+    phi.init[phi.init <= -0.10] <- 0.50  # Lots of sample variation
+    phi.init[phi.init <=  0.01] <- 0.05  # Last resort
+    phi.init[phi.init >=  0.99] <- 0.95  # Last resort
 
 
 
 
     if (!length(etastart)) {
       etastart <-
-        cbind(theta2eta( mustart, .lprob,  earg = .eprob  ),
-              theta2eta(phi.init, .lpobs0, earg = .epobs0 ))
+        cbind(theta2eta(     mustart, .lprob      , earg = .eprob      ),
+              theta2eta(1 - phi.init, .lonempobs0 , earg = .eonempobs0 ))
 
       mustart <- NULL
     }
-  }), list( .lprob = lprob, .lpobs0 = lpobs0,
-            .eprob = eprob, .epobs0 = epobs0,
-            .iprob = iprob, .ipobs0 = ipobs0,
-            .imethod = imethod ))),
+  }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+            .eprob = eprob, .eonempobs0 = eonempobs0,
+            .iprob = iprob, .ionempobs0 = ionempobs0,
+            .imethod = imethod,
+            .type.fitted = type.fitted ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    prob  <- eta2theta(eta[, 1], .lprob,  earg = .eprob  )
-    phi0  <- eta2theta(eta[, 2], .lpobs0, earg = .epobs0 )
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+    
+    prob      <- eta2theta(eta[, 1], .lprob      , earg = .eprob  )
+    onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
     new.w  <- if (length(extra$new.w))  extra$new.w  else 1
     Size <- new.w / orig.w
-    (1 - phi0) * prob / (1 - (1 - prob)^Size)
-  }, list( .lprob = lprob, .lpobs0 = lpobs0,
-           .eprob = eprob, .epobs0 = epobs0 ))),
+
+    ans <- switch(type.fitted,
+                  "mean"      = onempobs0 * prob / (1 - (1 - prob)^Size),
+                  "pobs0"     = 1 - onempobs0,  # P(Y=0)
+                  "onempobs0" =     onempobs0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
+           .eprob = eprob, .eonempobs0 = eonempobs0 ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(prob = .lprob, pobs0 = .lpobs0 )
-    misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
+    misc$link <-    c(prob = .lprob, onempobs0 = .lonempobs0 )
+    misc$earg <- list(prob = .eprob, onempobs0 = .eonempobs0 )
 
-    misc$imethod <- .imethod
-    misc$zero <- .zero
+    misc$imethod  <- .imethod
+    misc$zero     <- .zero
     misc$expected <- TRUE
-  }), list( .lprob = lprob, .lpobs0 = lpobs0,
-            .eprob = eprob, .epobs0 = epobs0,
+  }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+            .eprob = eprob, .eonempobs0 = eonempobs0,
             .zero = zero,
             .imethod = imethod ))),
 
@@ -3298,17 +5630,17 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
     new.w  <- if (length(extra$new.w))  extra$new.w  else 1
     Size <- new.w / orig.w
-    prob  <- eta2theta(eta[, 1], .lprob  , earg = .eprob  )
-    pobs0 <- eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
+    prob      <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
+    onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
       sum(orig.w * dzabinom(x = round(y * Size), size = Size,
-                            prob = prob, pobs0 = pobs0,
+                            prob = prob, pobs0 = 1 - onempobs0,
                             log = TRUE))
     }
-  }, list( .lprob = lprob, .lpobs0 = lpobs0,
-           .eprob = eprob, .epobs0 = epobs0 ))),
-  vfamily = c("zabinomial"),
+  }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
+           .eprob = eprob, .eonempobs0 = eonempobs0 ))),
+  vfamily = c("zabinomialff"),
 
   deriv = eval(substitute(expression({
     NOS <- if (length(extra$NOS)) extra$NOS else 1
@@ -3318,11 +5650,14 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     new.w  <- if (length(extra$new.w))  extra$new.w  else 1
     Size <- new.w / orig.w
 
-    prob <- eta2theta(eta[, 1], .lprob  , earg = .eprob  )
-    phi0 <- eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
+    prob      <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
+    onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+    phi0 <- 1 - onempobs0
 
-    dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob  )
-    dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
+    dprob.deta      <- dtheta.deta(prob     , .lprob      ,
+                                   earg = .eprob      )
+    donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
+                                   earg = .eonempobs0 )
 
     df0.dprob   <- -Size *              (1 -  prob)^(Size - 1)
     df02.dprob2 <-  Size * (Size - 1) * (1 -  prob)^(Size - 2)
@@ -3331,23 +5666,24 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
 
     dl.dprob <-  c(w)      * (y / prob - (1 - y) / (1 - prob)) +
-                c(orig.w) * df0.dprob / oneminusf0
-    dl.dphi0 <- -1 / (1 - phi0)
+                 c(orig.w) * df0.dprob / oneminusf0
+    dl.donempobs0 <- +1 / (onempobs0)
 
 
-    dl.dphi0[y == 0] <- 1 / phi0[y == 0]  # Do it in one line
+    dl.donempobs0[y == 0] <-
+      -1 / (1 - onempobs0[y == 0])  # Do it in 1 line
     skip <- extra$skip.these
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       dl.dprob[skip[, spp.], spp.] <- 0
     }
 
 
-    ans <- cbind(            dl.dprob * dprob.deta,
-                 c(orig.w) * dl.dphi0 * dphi0.deta)
+    ans <- cbind(            dl.dprob      * dprob.deta,
+                 c(orig.w) * dl.donempobs0 * donempobs0.deta)
                  
     ans
-  }), list( .lprob = lprob, .lpobs0 = lpobs0,
-            .eprob = eprob, .epobs0 = epobs0 ))),
+  }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+            .eprob = eprob, .eonempobs0 = eonempobs0 ))),
 
 
   weight = eval(substitute(expression({
@@ -3358,7 +5694,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
 
     term1 <-  c(Size) * (meanY /      prob^2 -
-                        meanY / (1 - prob)^2) +
+                         meanY / (1 - prob)^2) +
              c(Size) * (1 - phi0) / (1 - prob)^2
 
     term2 <-  -(1 - phi0) * df02.dprob2 / oneminusf0
@@ -3369,24 +5705,28 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     mu.phi0 <- phi0
     tmp100 <- mu.phi0 * (1.0 - mu.phi0)
-    tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+    tmp200 <- if (FALSE &&
+                  .lonempobs0 == "logit" &&
+                  is.empty.list( .eonempobs0 )) {
       tmp100
     } else {
-      (dphi0.deta^2) / tmp100
+      (donempobs0.deta^2) / tmp100
     }
     wz[, iam(2, 2, M)] <- tmp200
 
 
     c(orig.w) * wz
-  }), list( .lprob = lprob, .lpobs0 = lpobs0,
-            .eprob = eprob, .epobs0 = epobs0 ))))
+  }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+            .eprob = eprob, .eonempobs0 = eonempobs0 ))))
 }
 
 
 
 
 
+
  zageometric <- function(lpobs0 = "logit", lprob = "logit",
+                         type.fitted = c("mean", "pobs0", "onempobs0"),
                          imethod = 1,
                          ipobs0 = NULL, iprob = NULL,
                          zero = NULL) {
@@ -3401,10 +5741,11 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
   eprob <- link2list(lprob)
   lprob <- attr(eprob, "function.name")
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
 
 
-
-  if (!is.Numeric(imethod, allowable.length = 1,
+  if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3432,6 +5773,15 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     Musual <- 2
     eval(negzero.expression)
   }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
   initialize = eval(substitute(expression({
     Musual <- 2
     if (any(y < 0))
@@ -3455,6 +5805,10 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
+    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted      <- .type.fitted
+
+    
     mynames1 <- if (ncoly == 1) "pobs0"  else
                 paste("pobs0",  1:ncoly, sep = "")
     mynames2 <- if (ncoly == 1) "prob" else
@@ -3472,7 +5826,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
         phi0.init <- matrix( .ipobs0 , n, ncoly, byrow = TRUE)
 
 
-      prob.init =
+      prob.init <-
         if ( .imethod == 2)
           1 / (1 + y + 1/16) else
         if ( .imethod == 1)
@@ -3495,8 +5849,18 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
   }), list( .lpobs0 = lpobs0, .lprob = lprob,
             .epobs0 = epobs0, .eprob = eprob,
             .ipobs0 = ipobs0, .iprob = iprob,
-            .imethod = imethod ))), 
+            .imethod = imethod,
+            .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+
     NOS <- extra$NOS
     Musual <- 2
 
@@ -3505,7 +5869,22 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
                              .lprob  , earg = .eprob ))
 
-    (1 - phi0) / prob
+
+    ans <- switch(type.fitted,
+                  "mean"      = (1 - phi0) / prob,
+                  "pobs0"     =      phi0,  # P(Y=0)
+                  "onempobs0" =  1 - phi0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
   }, list( .lpobs0 = lpobs0, .lprob = lprob,
            .epobs0 = epobs0, .eprob = eprob ))),
   last = eval(substitute(expression({
@@ -3520,7 +5899,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     names(misc$earg) <-
         c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M <- Musual)]
 
-    for(ii in 1:NOS) {
+    for (ii in 1:NOS) {
       misc$earg[[Musual*ii-1]] <- .epobs0
       misc$earg[[Musual*ii  ]] <- .eprob
     }
@@ -3568,7 +5947,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     dl.dphi0 <- -1 / (1 - phi0)
 
 
-    for(spp. in 1:NOS) {
+    for (spp. in 1:NOS) {
       dl.dphi0[skip[, spp.], spp.] <- 1 / phi0[skip[, spp.], spp.]
       dl.dprob[skip[, spp.], spp.] <- 0
     }
@@ -3608,7 +5987,283 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     wz
   }), list( .lpobs0 = lpobs0,
             .epobs0 = epobs0 ))))
-} # End of zageometric
+}  # End of zageometric
+
+
+
+
+ zageometricff <- function(lprob = "logit", lonempobs0 = "logit",
+                           type.fitted = c("mean", "pobs0", "onempobs0"),
+                           imethod = 1,
+                           iprob = NULL, ionempobs0 = NULL,
+                           zero = -2) {
+
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  lonempobs0 <- as.list(substitute(lonempobs0))
+  eonempobs0 <- link2list(lonempobs0)
+  lonempobs0 <- attr(eonempobs0, "function.name")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "pobs0", "onempobs0"))[1]
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
+  if (length(iprob))
+    if (!is.Numeric(iprob, positive = TRUE) ||
+       max(iprob) >= 1)
+    stop("argument 'iprob' out of range")
+
+  if (length(ionempobs0))
+    if (!is.Numeric(ionempobs0, positive = TRUE) ||
+       max(ionempobs0) >= 1)
+      stop("argument 'ionempobs0' out of range")
+
+
+  new("vglmff",
+  blurb = c("Zero-altered geometric ",
+            "(Bernoulli and positive-geometric conditional model)\n\n",
+            "Links:    ",
+            namesof("prob"     , lprob     , earg = eprob     , tag = FALSE), ", ",
+            namesof("onempobs0", lonempobs0, earg = eonempobs0, tag = FALSE), "\n",
+            "Mean:     onempobs0 / prob"),
+
+  constraints = eval(substitute(expression({
+
+    dotzero <- .zero
+    Musual <- 2
+    eval(negzero.expression)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(Musual = 2,
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+
+  initialize = eval(substitute(expression({
+    Musual <- 2
+    if (any(y < 0))
+      stop("the response must not have negative values")
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+
+    extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+
+    extra$dimnamesy   <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+
+    
+    mynames1 <- if (ncoly == 1) "prob"       else
+                paste("prob",       1:ncoly, sep = "")
+    mynames2 <- if (ncoly == 1) "onempobs0"  else
+                paste("onempobs0",  1:ncoly, sep = "")
+    predictors.names <-
+        c(namesof(mynames1, .lprob      , earg = .eprob      , tag = FALSE),
+          namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
+          interleave.VGAM(Musual*NOS, M = Musual)]
+
+    if (!length(etastart)) {
+
+      foo <- function(x) mean(as.numeric(x == 0))
+      phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
+      if (length( .ionempobs0 ))
+        phi0.init <- matrix( 1 - .ionempobs0 , n, ncoly, byrow = TRUE)
+
+
+      prob.init <-
+        if ( .imethod == 2)
+          1 / (1 + y + 1/16) else
+        if ( .imethod == 1)
+          (1 - phi0.init) / (1 +
+          matrix(colSums(y * w) / colSums(w) + 1/16,
+                 n, ncoly, byrow = TRUE)) else
+          (1 - phi0.init) / (1 +
+          matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+
+
+      if (length( .iprob ))
+        prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
+
+
+
+      etastart <-
+        cbind(theta2eta(    prob.init, .lprob      , earg = .eprob      ),
+              theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
+                        
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+    }
+  }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+            .eonempobs0 = eonempobs0, .eprob = eprob,
+            .ionempobs0 = ionempobs0, .iprob = iprob,
+            .imethod = imethod,
+            .type.fitted = type.fitted ))), 
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "pobs0", "onempobs0"))[1]
+
+    NOS <- extra$NOS
+    Musual <- 2
+
+    prob      <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                                 .lprob  , earg = .eprob ))
+    onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                                 .lonempobs0 , earg = .eonempobs0 ))
+
+
+    ans <- switch(type.fitted,
+                  "mean"      =     (onempobs0) / prob,
+                  "pobs0"     =  1 - onempobs0,  # P(Y=0)
+                  "onempobs0" =      onempobs0)  # P(Y>0)
+    if (length(extra$dimnamesy) &&
+        is.matrix(ans) &&
+        length(extra$dimnamesy[[2]]) == ncol(ans) &&
+        length(extra$dimnamesy[[2]]) > 0) {
+      dimnames(ans) <- extra$dimnamesy
+    } else
+    if (NCOL(ans) == 1 &&
+        is.matrix(ans)) {
+      colnames(ans) <- NULL
+    }
+    ans
+  }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+           .eonempobs0 = eonempobs0, .eprob = eprob ))),
+  last = eval(substitute(expression({
+    temp.names <- c(rep( .lprob      , len = NOS),
+                    rep( .lonempobs0 , len = NOS))
+    temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+    misc$link  <- temp.names
+
+    misc$earg <- vector("list", Musual * NOS)
+
+    names(misc$link) <-
+    names(misc$earg) <-
+        c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+    for (ii in 1:NOS) {
+      misc$earg[[Musual*ii-1]] <- .eprob
+      misc$earg[[Musual*ii  ]] <- .eonempobs0
+    }
+
+
+    misc$expected <- TRUE
+    misc$imethod <- .imethod
+    misc$ionempobs0  <- .ionempobs0
+    misc$iprob   <- .iprob
+    misc$multipleResponses <- TRUE
+  }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+            .eonempobs0 = eonempobs0, .eprob = eprob,
+            .ionempobs0 = ionempobs0, .iprob = iprob,
+            .imethod = imethod ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    NOS <- extra$NOS
+    Musual <- 2
+
+    prob      <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                                 .lprob      , earg = .eprob      ))
+    onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                                 .lonempobs0 , earg = .eonempobs0 ))
+
+    if (residuals)
+      stop("loglikelihood residuals not implemented yet") else {
+      sum(c(w) * dzageom(x = y, pobs0 = 1 - onempobs0, prob = prob,
+                         log = TRUE))
+    }
+  }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+           .eonempobs0 = eonempobs0, .eprob = eprob ))),
+  vfamily = c("zageometricff"),
+  deriv = eval(substitute(expression({
+    Musual <- 2
+    NOS <- extra$NOS
+    y0 <- extra$y0
+    skip <- extra$skip.these
+
+    prob      <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+                       .lprob      , earg = .eprob      ))
+    onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+                       .lonempobs0 , earg = .eonempobs0 ))
+    pobs0 <- 1 - onempobs0
+
+
+    dl.dprob      <-  1 / prob - (y - 1) / (1 - prob)
+    dl.donempobs0 <- +1 / (onempobs0)
+
+
+    for (spp. in 1:NOS) {
+      dl.donempobs0[skip[, spp.], spp.] <- -1 / pobs0[skip[, spp.], spp.]
+      dl.dprob[skip[, spp.], spp.] <- 0
+    }
+    dprob.deta      <- dtheta.deta(prob,      .lprob  , earg = .eprob  )
+    donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
+                                   earg = .eonempobs0 )
+
+
+    ans <- c(w) * cbind(dl.dprob      * dprob.deta,
+                        dl.donempobs0 * donempobs0.deta)
+    ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+    ans
+  }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+            .eonempobs0 = eonempobs0, .eprob = eprob ))),
+  weight = eval(substitute(expression({
+
+    wz <- matrix(0.0, n, Musual*NOS)
+
+
+    ned2l.dprob2 <- (1 - pobs0) / (prob^2 * (1 - prob))
+
+    wz[, (1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
+
+
+    mu.phi0 <- pobs0  # phi0
+    tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+    tmp200 <- if ( FALSE &&
+                  .lonempobs0 == "logit" &&
+                  is.empty.list( .eonempobs0 )) {
+
+      cbind(c(w) * tmp100)
+    } else {
+      cbind(c(w) * (donempobs0.deta^2) / tmp100)
+    }
+    wz[, NOS+(1:NOS)] <- tmp200
+
+
+    wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+
+
+    wz
+  }), list( .lonempobs0 = lonempobs0,
+            .eonempobs0 = eonempobs0 ))))
+}  # End of zageometricff
 
 
 
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index a1b2213..5422ccd 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -13,33 +13,52 @@
 
 
 
-fittedvlm <- function(object, matrix.arg = TRUE, ...)
-{
-
-    answer = 
-    if (matrix.arg)
-        object at fitted.values else
-    {
-        if (!is.matrix(object at fitted.values) || !length(object at fitted.values))
-            stop("object at fitted.values is not a matrix or is empty")
-        if (ncol(object at fitted.values) == 1)
-            c(object at fitted.values) else {
-                warning("ncol(object at fitted.values) is not 1")
-                c(object at fitted.values)
-            }
-    }
+fittedvlm <- function(object, matrix.arg = TRUE,
+                      type.fitted = NULL,
+                      ...) {
+
 
-    if (length(answer) && length(object at na.action)) {
-        napredict(object at na.action[[1]], answer)
+  if (is.null(type.fitted)) {
+    answer <- if (matrix.arg) {
+        object at fitted.values
+    } else {
+      if (!is.matrix(object at fitted.values) ||
+          !length(object at fitted.values))
+        stop("object at fitted.values is not a matrix or is empty")
+  
+      if (ncol(object at fitted.values) == 1) {
+        c(object at fitted.values)
+      } else {
+        warning("ncol(object at fitted.values) is not 1")
+        c(object at fitted.values)
+      }
+    }
+  } else {
+    linkinv <- object at family@linkinv
+    new.extra <- object at extra
+    new.extra$type.fitted <- type.fitted
+    answer <- linkinv(eta = predict(object), extra = new.extra)
+
+    answer <- if (matrix.arg) {
+      as.matrix(answer)
     } else {
-        answer
+      c(answer)
     }
+  }
+
+  if (length(answer) && length(object at na.action)) {
+    napredict(object at na.action[[1]], answer)
+  } else {
+    answer
+  }
 }
 
 
 
-if(!isGeneric("fitted")) 
-    setGeneric("fitted", function(object, ...) standardGeneric("fitted"))
+if (!isGeneric("fitted")) 
+    setGeneric("fitted",
+    function(object, ...)
+      standardGeneric("fitted"))
 
 
 
@@ -63,31 +82,36 @@ setMethod("fitted",  "vglm",
     fittedvlm(object, ...))
 
 
-predictors.vglm <- function(object, matrix = TRUE, ...)
-{
-    answer = 
-    if (matrix)
-        object at predictors else
-    {
-        if (!is.matrix(object at predictors) || !length(object at predictors))
-            stop("object at predictors is not a matrix or is empty")
-        if (ncol(object at predictors) == 1)
-            c(object at predictors) else {
-                warning("ncol(object at predictors) is not 1")
-                c(object at predictors)
-            }
-    }
 
-    if (length(answer) && length(object at na.action)) {
-        napredict(object at na.action[[1]], answer)
+
+predictors.vglm <- function(object, matrix = TRUE, ...) {
+  answer <- if (matrix) {
+    object at predictors
+  }  else {
+    if (!is.matrix(object at predictors) || !length(object at predictors))
+      stop("object at predictors is not a matrix or is empty")
+
+    if (ncol(object at predictors) == 1) {
+      c(object at predictors)
     } else {
-        answer
-    }
+      warning("ncol(object at predictors) is not 1")
+      c(object at predictors)
+      }
+  }
+
+  if (length(answer) && length(object at na.action)) {
+    napredict(object at na.action[[1]], answer)
+  } else {
+    answer
+  }
 }
 
 
-if(!isGeneric("predictors")) 
-    setGeneric("predictors", function(object, ...) standardGeneric("predictors"))
+if (!isGeneric("predictors")) 
+    setGeneric("predictors",
+      function(object, ...)
+        standardGeneric("predictors"))
+
 
 setMethod("predictors",  "vglm",
     function(object, ...)
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 96b7a0b..50d1e3b 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -10,7 +10,7 @@
 
 formulavlm = function(x, fnumber=1, ...) {
   if (!is.Numeric(fnumber, integer.valued = TRUE,
-                  allowable.length = 1, positive = TRUE) ||
+                  length.arg = 1, positive = TRUE) ||
       fnumber > 2)
     stop("argument 'fnumber' must be 1 or 2")
 
diff --git a/R/links.q b/R/links.q
index ffb0b97..515fd11 100644
--- a/R/links.q
+++ b/R/links.q
@@ -20,11 +20,21 @@ ToString <- function(x)
  TypicalVGAMfamilyFunction <-
   function(lsigma = "loge",
            isigma = NULL,
+           link.list = list("(Default)" = "identity", 
+                            x2          = "loge", 
+                            x3          = "logoff",
+                            x4          = "mlogit",
+                            x5          = "mlogit"),
+           earg.list = list("(Default)" = list(),
+                            x2          = list(),
+                            x3          = list(offset = -1),
+                            x4          = list(),
+                            x5          = list()),
            gsigma = exp(-5:5),
            parallel = TRUE,
-           apply.parint = FALSE,
            shrinkage.init = 0.95,
            nointercept = NULL, imethod = 1,
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            probs.x = c(0.15, 0.85),
            probs.y = c(0.25, 0.50, 0.75),
            mv = FALSE, earg.link = FALSE,
@@ -38,7 +48,7 @@ ToString <- function(x)
 TypicalVGAMlinkFunction <-
   function(theta,
            someParameter = 0,
-           bvalue = NULL, # .Machine$double.xmin is an alternative
+           bvalue = NULL,  # .Machine$double.xmin is an alternative
            inverse = FALSE, deriv = 0,
            short = TRUE, tag = FALSE) {
   NULL
@@ -63,10 +73,9 @@ care.exp <- function(x,
 
 
  loge <- function(theta,
-                  bvalue = NULL, # .Machine$double.xmin is an alternative
+                  bvalue = NULL,  # .Machine$double.xmin is an alternative
                   inverse = FALSE, deriv = 0,
-                  short = TRUE, tag = FALSE)
-{
+                  short = TRUE, tag = FALSE) {
 
 
   if (is.character(theta)) {
@@ -99,6 +108,42 @@ care.exp <- function(x,
 
 
 
+ logneg <- function(theta,
+                    bvalue = NULL,  # .Machine$double.xmin is an alternative
+                    inverse = FALSE, deriv = 0,
+                    short = TRUE, tag = FALSE) {
+
+
+  if (is.character(theta)) {
+    string <- if (short)
+        paste("log(-(",  theta, "))", sep = "") else
+        paste("log(-(",  theta, "))", sep = "")
+    if (tag)
+      string <- paste("Log negative:", string)
+    return(string)
+  }
+
+  if (!inverse && length(bvalue))
+    theta[theta <= 0.0] <- bvalue
+
+  if (inverse) {
+    if (deriv > 0) {
+      1 / Recall(theta = theta,
+                 bvalue = bvalue,
+                 inverse = FALSE, deriv = deriv)
+    } else {
+      -exp(theta)
+    }
+  } else {
+    switch(deriv + 1, {
+       log(-theta)},
+       theta,
+       theta)
+  }
+}
+
+
+
  logoff <- function(theta,
                     offset = 0,
                     inverse = FALSE, deriv = 0,
@@ -163,18 +208,17 @@ care.exp <- function(x,
   } else {
     switch(deriv+1,
        theta,
-       theta*0 + 1,
-       theta*0)
+       theta * 0 + 1,
+       theta * 0)
   }
 }
 
 
 
 
- nidentity <- function(theta,
+ negidentity <- function(theta,
                       inverse = FALSE, deriv = 0,
-                      short = TRUE, tag = FALSE)
-{
+                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- paste("-", theta, sep = "")
     if (tag) 
@@ -202,7 +246,7 @@ care.exp <- function(x,
 
 
  logit <- function(theta,
-                   bvalue = NULL, # .Machine$double.eps is an alternative
+                   bvalue = NULL,  # .Machine$double.eps is an alternative
                    inverse = FALSE, deriv = 0,
                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
@@ -215,8 +259,8 @@ care.exp <- function(x,
   }
 
   if (!inverse && length(bvalue)) {
-      theta[theta <= 0.0] <- bvalue
-      theta[theta >= 1.0] <- 1.0 - bvalue
+    theta[theta <= 0.0] <- bvalue
+    theta[theta >= 1.0] <- 1.0 - bvalue
   }
   if (inverse) {
     if (deriv > 0) {
@@ -244,10 +288,9 @@ care.exp <- function(x,
 
 
  loglog <- function(theta,
-                    bvalue = NULL, # .Machine$double.eps is an alternative
+                    bvalue = NULL,  # .Machine$double.eps is an alternative
                     inverse = FALSE, deriv = 0,
-                    short = TRUE, tag = FALSE)
-{
+                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("loglog(",  theta, ")",  sep = "") else
@@ -284,10 +327,9 @@ care.exp <- function(x,
 
 
  cloglog <- function(theta,
-                     bvalue = NULL, # .Machine$double.eps is an alternative
+                     bvalue = NULL,  # .Machine$double.eps is an alternative
                      inverse = FALSE, deriv = 0,
-                     short = TRUE, tag = FALSE)
-{
+                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("cloglog(",    theta, ")",  sep = "") else
@@ -326,10 +368,9 @@ care.exp <- function(x,
 
 
  probit <- function(theta,
-                    bvalue = NULL, # .Machine$double.eps is an alternative
+                    bvalue = NULL,  # .Machine$double.eps is an alternative
                     inverse = FALSE, deriv = 0,
-                    short = TRUE, tag = FALSE)
-{
+                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("probit(", theta, ")", sep = "") else
@@ -340,8 +381,8 @@ care.exp <- function(x,
   }
 
   if (!inverse && length(bvalue)) {
-      theta[theta <= 0.0] <- bvalue
-      theta[theta >= 1.0] <- 1 - bvalue
+    theta[theta <= 0.0] <- bvalue
+    theta[theta >= 1.0] <- 1 - bvalue
   }
 
   if (inverse) {
@@ -361,25 +402,23 @@ care.exp <- function(x,
         if (is.matrix(theta))
             dim(ans) <- dim(theta)
         ans
-    },
-    {
-     if (is.matrix(theta)) {
+     }, {
+       if (is.matrix(theta)) {
          ans <- dnorm(qnorm(theta))
          dim(ans) <- dim(theta)
          ans
        } else dnorm(qnorm(as.vector(theta)))
-      }, 
-      {
+      }, {
         junk <- qnorm(theta)
         ans <- -junk * dnorm(junk)
         if (is.vector(theta)) ans else
         if (is.matrix(theta)) {
-            dim(ans) <- dim(theta)
-            ans
+          dim(ans) <- dim(theta)
+          ans
         } else {
-            warning("can only handle vectors and matrices;",
-                    " converting to vector")
-            ans
+          warning("can only handle vectors and matrices;",
+                  " converting to vector")
+          ans
         }
       })
   }
@@ -393,10 +432,9 @@ care.exp <- function(x,
 
 
  explink <- function(theta,
-                     bvalue = NULL, # .Machine$double.eps is an alternative
+                     bvalue = NULL,  # .Machine$double.eps is an alternative
                      inverse = FALSE, deriv = 0,
-                     short = TRUE, tag = FALSE)
-{
+                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("explink(", theta, ")", sep = "") else
@@ -429,10 +467,9 @@ care.exp <- function(x,
 
 
  reciprocal <- function(theta,
-                        bvalue = NULL, # .Machine$double.eps is an alternative
+                        bvalue = NULL,  # .Machine$double.eps is an alternative
                         inverse = FALSE, deriv = 0,
-                        short = TRUE, tag = FALSE)
-{
+                        short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- paste("1/", theta, sep = "")
     if (tag) 
@@ -462,13 +499,14 @@ care.exp <- function(x,
 
 
 
- nloge <- function(theta,
-                   bvalue = NULL, # .Machine$double.eps is an alternative
+
+ negloge <- function(theta,
+                   bvalue = NULL,  # .Machine$double.eps is an alternative
                    inverse = FALSE, deriv = 0,
                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
       string <- if (short) 
-          paste("nloge(", theta, ")", sep = "") else
+          paste("negloge(", theta, ")", sep = "") else
           paste("-log(",  theta, ")", sep = "")
       if (tag) 
         string <- paste("Negative log:", string) 
@@ -497,12 +535,11 @@ care.exp <- function(x,
 
 
 
- nreciprocal <-
+ negreciprocal <-
   function(theta,
-           bvalue = NULL, # .Machine$double.eps is an alternative
+           bvalue = NULL,  # .Machine$double.eps is an alternative
            inverse = FALSE,
-           deriv = 0, short = TRUE, tag = FALSE)
-{
+           deriv = 0, short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- paste("-1/", theta, sep = "")
     if (tag) 
@@ -534,10 +571,9 @@ care.exp <- function(x,
 
  natural.ig <-
   function(theta,
-           bvalue = NULL, # .Machine$double.eps is an alternative
+           bvalue = NULL,  # .Machine$double.eps is an alternative
            inverse = FALSE, deriv = 0,
-           short = TRUE, tag = FALSE)
-{
+           short = TRUE, tag = FALSE) {
 
   if (is.character(theta)) {
     string <- paste("-1/", theta, sep = "")
@@ -548,9 +584,9 @@ care.exp <- function(x,
 
   if (inverse) {
     if (deriv > 0) {
-      1 / nreciprocal(theta,
-                      bvalue = bvalue,
-                      inverse = FALSE, deriv = deriv)
+      1 / negreciprocal(theta,
+                        bvalue = bvalue,
+                        inverse = FALSE, deriv = deriv)
     } else {
       1 / sqrt(-2*theta)
     }
@@ -571,8 +607,7 @@ care.exp <- function(x,
                     bminvalue = NULL,
                     bmaxvalue = NULL,
                     inverse = FALSE, deriv = 0,
-                    short = TRUE, tag = FALSE)
-{
+                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("rhobit(", theta, ")", sep = "") else
@@ -595,7 +630,7 @@ care.exp <- function(x,
                  inverse = FALSE, deriv = deriv)
     } else {
       junk <- exp(theta)
-      expm1(theta) / (junk+1.0)
+      expm1(theta) / (junk + 1.0)
     }
   } else {
       switch(deriv+1, {
@@ -612,8 +647,7 @@ care.exp <- function(x,
                      bminvalue = NULL,
                      bmaxvalue = NULL,
                      inverse = FALSE, deriv = 0,
-                     short = TRUE, tag = FALSE)
-{
+                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("fisherz(", theta, ")", sep = "") else
@@ -654,7 +688,7 @@ care.exp <- function(x,
  mlogit <-
   function(theta,
            refLevel = "last",
-           M = NULL, # stop("argument 'M' not specified"),
+           M = NULL,  # stop("argument 'M' not specified"),
            whitespace = FALSE,
            bvalue = NULL,
            inverse = FALSE, deriv = 0,
@@ -676,11 +710,11 @@ care.exp <- function(x,
       warning("argument 'refLevel' is from an ordered factor")
     refLevel <- as.character(refLevel) == levels(refLevel)
     refLevel <- (1:length(refLevel))[refLevel]
-    if (!is.Numeric(refLevel, allowable.length = 1,
+    if (!is.Numeric(refLevel, length.arg = 1,
                     integer.valued = TRUE, positive = TRUE))
       stop("could not coerce 'refLevel' into a single positive integer")
   } else
-  if (!is.Numeric(refLevel, allowable.length = 1,
+  if (!is.Numeric(refLevel, length.arg = 1,
                   integer.valued = TRUE))
     stop("'refLevel' must be a single (positive?) integer")
 
@@ -732,7 +766,7 @@ care.exp <- function(x,
 
   M.orig <- M
   M <- if (inverse) ncol(cbind(theta)) else
-       ncol(cbind(theta)) - 1
+                    ncol(cbind(theta)) - 1
   if (M < 1)
     ifelse(inverse,
            stop("argument 'eta' should have at least one column"),
@@ -776,7 +810,7 @@ care.exp <- function(x,
                  bvalue = bvalue,
                  inverse = FALSE, deriv = deriv)
     } else {
-       foo(theta, refLevel, M = M) # log(theta[, -jay] / theta[, jay])
+       foo(theta, refLevel, M = M)  # log(theta[, -jay] / theta[, jay])
     }
   } else {
     switch(deriv + 1, {
@@ -792,7 +826,7 @@ care.exp <- function(x,
       care.exp(log(theta) + log1p(-theta)),
       care.exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
   }
-} # end of mlogit
+}  # end of mlogit
 
 
 
@@ -800,15 +834,15 @@ care.exp <- function(x,
 
 
 
-fsqrt <- function(theta, #  = NA  , = NULL,
+fsqrt <- function(theta,  #  = NA  , = NULL,
                   min = 0, max = 1, mux = sqrt(2),
                   inverse = FALSE, deriv = 0,
                   short = TRUE, tag = FALSE) {
-  if (!is.Numeric(min, allowable.length = 1))
+  if (!is.Numeric(min, length.arg = 1))
     stop("bad input for 'min' component")
-  if (!is.Numeric(max, allowable.length = 1))
+  if (!is.Numeric(max, length.arg = 1))
     stop("bad input for 'max' component")
-  if (!is.Numeric(mux, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(mux, length.arg = 1, positive = TRUE))
     stop("bad input for 'mux' component")
   if (min >= max)
     stop("'min' >= 'max' is not allowed")
@@ -858,17 +892,17 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
 
- powl <- function(theta,
-                  power = 1,
-                  inverse = FALSE, deriv = 0,
-                  short = TRUE, tag = FALSE) {
+ powerlink <- function(theta,
+                       power = 1,
+                       inverse = FALSE, deriv = 0,
+                       short = TRUE, tag = FALSE) {
     exponent <- power
     if (exponent == 0)
       stop("use the 'loge' link")
 
   if (is.character(theta)) {
     string <- if (short) 
-        paste("powl(", theta, ", power = ",
+        paste("powerlink(", theta, ", power = ",
               as.character(exponent), ")",
               sep = "") else
         paste(theta, "^(", as.character(exponent), ")", sep = "")
@@ -889,11 +923,9 @@ fsqrt <- function(theta, #  = NA  , = NULL,
     switch(deriv+1,
     {
       theta^exponent
-    },
-    {
+    }, {
       (theta^(1-exponent)) / exponent
-    },
-    {
+    }, {
       (theta^(2-exponent)) / (exponent * (exponent-1))
     })
   }
@@ -910,8 +942,8 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
 
-    A = min
-    B = max
+    A <- min
+    B <- max
    if (!inverse && length(bminvalue)) theta[theta <= A] <- bminvalue
    if (!inverse && length(bmaxvalue)) theta[theta >= B] <- bmaxvalue
 
@@ -957,7 +989,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
  logc <- function(theta,
-                  bvalue = NULL, # .Machine$double.xmin is an alternative
+                  bvalue = NULL,  # .Machine$double.xmin is an alternative
                   inverse = FALSE, deriv = 0,
                   short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
@@ -999,8 +1031,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
  cauchit <- function(theta,
                      bvalue = .Machine$double.eps,
                      inverse = FALSE, deriv = 0,
-                     short = TRUE, tag = FALSE)
-{
+                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
         paste("cauchit(", theta, ")", sep = "") else
@@ -1039,8 +1070,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                   lambda = 1,
                   cutpoint = NULL,
                   inverse = FALSE, deriv = 0,
-                  short = TRUE, tag = FALSE)
-{
+                  short = TRUE, tag = FALSE) {
 
 
 
@@ -1085,16 +1115,16 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
   thmat <- cbind(theta)
-  lambda <- rep(lambda, len = ncol(thmat)) # Allow recycling for lambda
+  lambda <- rep(lambda, len = ncol(thmat))  # Allow recycling for lambda
   if (is.Numeric(cutpoint))
     cutpoint <- rep(cutpoint, len = ncol(thmat))
   if (ncol(thmat) > 1) {
     answer <- thmat
-    for(ii in 1:ncol(thmat))
-      answer[,ii] <- Recall(theta = thmat[,ii],
-                            lambda = lambda[ii],
-                            cutpoint = if (is.Numeric(cutpoint))
-                                       cutpoint[ii] else NULL,
+    for (ii in 1:ncol(thmat))
+      answer[, ii] <- Recall(theta = thmat[, ii],
+                             lambda = lambda[ii],
+                             cutpoint = if (is.Numeric(cutpoint))
+                                        cutpoint[ii] else NULL,
                             inverse = inverse, deriv = deriv)
     return(answer)
   }
@@ -1116,8 +1146,8 @@ fsqrt <- function(theta, #  = NA  , = NULL,
   } else {
     smallno <- 1 * .Machine$double.eps
     Theta <- theta
-    Theta <- pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
-    Theta <- pmax(Theta, smallno) # Since theta == 0 is a possibility
+    Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
+    Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
     Ql <- qnorm(Theta)
     switch(deriv+1, {
         temp <- Ql / (3*sqrt(lambda))
@@ -1137,7 +1167,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
 
- polf <- function(theta, # = 1,
+ polf <- function(theta,  # = 1,
                   cutpoint = NULL,
                   inverse = FALSE, deriv = 0,
                   short = TRUE, tag = FALSE) {
@@ -1151,7 +1181,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
   if (is.character(theta)) {
     string <- if (short) {
-      lenc = length(cutpoint) > 1
+      lenc <- length(cutpoint) > 1
       paste("polf(", theta,
              ", cutpoint = ",
             if (lenc) "c(" else "",
@@ -1168,18 +1198,18 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
 
-    thmat = cbind(theta)
+    thmat <- cbind(theta)
     if (ncol(thmat) > 1) {
-        answer = thmat
-        cutpoint = rep(cutpoint, len = ncol(thmat)) # Reqd for the for loop
-        for(ii in 1:ncol(thmat))
-            answer[,ii] = Recall(theta = thmat[,ii],
+        answer <- thmat
+        cutpoint <- rep(cutpoint, len = ncol(thmat))  # Reqd for the for loop
+        for (ii in 1:ncol(thmat))
+            answer[, ii] <- Recall(theta = thmat[, ii],
                                  cutpoint = cutpoint,
                                  inverse = inverse, deriv = deriv)
         return(answer)
     }
 
-  answer =
+  answer <-
   if (inverse) {
       if (deriv > 0) {
           1 / Recall(theta = theta,
@@ -1204,15 +1234,15 @@ fsqrt <- function(theta, #  = NA  , = NULL,
         cloglog(theta = theta,
                 inverse = inverse, deriv = deriv)
     } else {
-      smallno = 1 * .Machine$double.eps
-      SMALLNO = 1 * .Machine$double.xmin
-      Theta = theta
-      Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
-      Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
-      Ql = qnorm(Theta)
+      smallno <- 1 * .Machine$double.eps
+      SMALLNO <- 1 * .Machine$double.xmin
+      Theta <- theta
+      Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
+      Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
+      Ql <- qnorm(Theta)
       switch(deriv+1, {
-      temp = 0.5 * Ql + sqrt(cutpoint + 7/8)
-      temp = pmax(temp, SMALLNO)
+      temp <- 0.5 * Ql + sqrt(cutpoint + 7/8)
+      temp <- pmax(temp, SMALLNO)
       2 * log(temp)},
       (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql),
       {  stop('cannot handle deriv = 2') },
@@ -1234,7 +1264,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                    inverse = FALSE, deriv = 0,
                    short = TRUE, tag = FALSE) {
 
-  kay = k
+  kay <- k
   if (!is.Numeric(kay, positive = TRUE))
     stop("could not determine 'k' or it is not positive-valued")
   if (!is.Numeric(cutpoint))
@@ -1246,8 +1276,8 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
   if (is.character(theta)) {
     string <- if (short) {
-        lenc = length(cutpoint) > 1
-        lenk = length(kay) > 1
+        lenc <- length(cutpoint) > 1
+        lenk <- length(kay) > 1
         paste("nbolf(", theta,
               ", cutpoint = ",
               if (lenc) "c(" else "",
@@ -1269,60 +1299,60 @@ fsqrt <- function(theta, #  = NA  , = NULL,
   }
 
 
-    thmat = cbind(theta)
-    kay = rep(kay, len = ncol(thmat)) # Allow recycling for kay
-    cutpoint = rep(cutpoint, len = ncol(thmat)) # Allow recycling for cutpoint
+    thmat <- cbind(theta)
+    kay <- rep(kay, len = ncol(thmat))  # Allow recycling for kay
+    cutpoint <- rep(cutpoint, len = ncol(thmat))  # Allow recycling for cutpoint
     if (ncol(thmat) > 1) {
-      answer = thmat
-      for(ii in 1:ncol(thmat))
-          answer[,ii] = Recall(theta = thmat[,ii],
+      answer <- thmat
+      for (ii in 1:ncol(thmat))
+          answer[, ii] <- Recall(theta = thmat[, ii],
                                cutpoint = cutpoint[ii],
                                k = kay[ii],
                                inverse = inverse, deriv = deriv)
       return(answer)
     }
 
-    answer =
+    answer <-
     if (inverse) {
-        if (deriv > 0) {
-            1 / Recall(theta = theta,
-                       cutpoint = cutpoint,
-                       k = kay,
-                       inverse = FALSE, deriv = deriv)
-        } else {
-            if (cutpoint == 0) {
-                1.0 - (kay / (kay + care.exp(theta)))^kay
-            } else {
-                pnorm((asinh(exp(theta/2)/sqrt(kay)) -
-                       asinh(sqrt(cutpoint/kay))) * 2 * sqrt(kay))
-            }
-        }
-    } else {
-        smallno = 1 * .Machine$double.eps
-        SMALLNO = 1 * .Machine$double.xmin
-        Theta = theta
-        Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
-        Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
+      if (deriv > 0) {
+        1 / Recall(theta = theta,
+                   cutpoint = cutpoint,
+                   k = kay,
+                   inverse = FALSE, deriv = deriv)
+      } else {
         if (cutpoint == 0) {
-            switch(deriv+1, {
-            temp = (1 - Theta)^(-1/kay) - 1
-            temp = pmax(temp, SMALLNO)
-            log(kay) + log(temp)},
-            (kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay),
-            {  stop('cannot handle deriv = 2') },
-            stop("argument 'deriv' unmatched"))
+          1.0 - (kay / (kay + care.exp(theta)))^kay
         } else {
-            Ql = qnorm(Theta)
-            switch(deriv+1, {
-                temp = sqrt(kay) * sinh(Ql/(2*sqrt(kay)) +
-                       asinh(sqrt(cutpoint/kay)))
-                temp = pmax(temp, SMALLNO)
-                2 * log(temp)}, {
-                arg1 = (Ql/(2*sqrt(kay)) + asinh(sqrt(cutpoint/kay)))
-                sqrt(kay) * tanh(arg1) * dnorm(Ql) },
-                {  stop('cannot handle deriv = 2') },
-                stop("argument 'deriv' unmatched"))
+            pnorm((asinh(exp(theta/2)/sqrt(kay)) -
+                   asinh(sqrt(cutpoint/kay))) * 2 * sqrt(kay))
         }
+      }
+    } else {
+      smallno <- 1 * .Machine$double.eps
+      SMALLNO <- 1 * .Machine$double.xmin
+      Theta <- theta
+      Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
+      Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
+      if (cutpoint == 0) {
+        switch(deriv+1, {
+        temp <- (1 - Theta)^(-1/kay) - 1
+        temp <- pmax(temp, SMALLNO)
+        log(kay) + log(temp)},
+        (kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay),
+        {  stop('cannot handle deriv = 2') },
+        stop("argument 'deriv' unmatched"))
+      } else {
+        Ql <- qnorm(Theta)
+        switch(deriv+1, {
+              temp <- sqrt(kay) * sinh(Ql/(2*sqrt(kay)) +
+                     asinh(sqrt(cutpoint/kay)))
+              temp <- pmax(temp, SMALLNO)
+              2 * log(temp)}, {
+              arg1 <- (Ql/(2*sqrt(kay)) + asinh(sqrt(cutpoint/kay)))
+              sqrt(kay) * tanh(arg1) * dnorm(Ql) },
+              {  stop('cannot handle deriv = 2') },
+              stop("argument 'deriv' unmatched"))
+      }
     }
     if (!is.Numeric(answer)) stop("the answer contains some NAs")
     answer
@@ -1339,54 +1369,54 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
 
-    kay <- k
-    if (!is.Numeric(kay, positive = TRUE))
-      stop("could not determine argument 'k' or ",
-           "it is not positive-valued")
-    if (!is.Numeric(cutpoint))
-      stop("could not determine the cutpoint")
-    if (any(cutpoint < 0) ||
-        !is.Numeric(cutpoint, integer.valued = TRUE))
-      warning("argument 'cutpoint' should ",
-              "contain non-negative integer values")
-
-    if (is.character(theta)) {
-        string <- if (short) {
-            lenc = length(cutpoint) > 1
-            lenk = length(kay) > 1
-            paste("nbolf2(", theta,
-                  ", earg = list(cutpoint = ",
-                  if (lenc) "c(" else "",
-                  ToString(cutpoint),
-                  if (lenc) ")" else "",
-                  ", k = ",
-                  if (lenk) "c(" else "",
-                  ToString(kay),
-                  if (lenk) ")" else "",
-                  "))", sep = "")
-    } else {
-       paste("3*log(<a complicated expression>)", sep = "")
-    }
-    if (tag) 
-      string = paste("Negative binomial-ordinal link function 2:",
-                     string)
-    return(string)
+  kay <- k
+  if (!is.Numeric(kay, positive = TRUE))
+    stop("could not determine argument 'k' or ",
+         "it is not positive-valued")
+  if (!is.Numeric(cutpoint))
+    stop("could not determine the cutpoint")
+  if (any(cutpoint < 0) ||
+      !is.Numeric(cutpoint, integer.valued = TRUE))
+    warning("argument 'cutpoint' should ",
+            "contain non-negative integer values")
+
+  if (is.character(theta)) {
+    string <- if (short) {
+      lenc <- length(cutpoint) > 1
+      lenk <- length(kay) > 1
+      paste("nbolf2(", theta,
+            ", earg = list(cutpoint = ",
+            if (lenc) "c(" else "",
+            ToString(cutpoint),
+            if (lenc) ")" else "",
+            ", k = ",
+            if (lenk) "c(" else "",
+            ToString(kay),
+            if (lenk) ")" else "",
+            "))", sep = "")
+  } else {
+    paste("3*log(<a complicated expression>)", sep = "")
+  }
+  if (tag) 
+    string <- paste("Negative binomial-ordinal link function 2:",
+                   string)
+  return(string)
   }
 
 
-    thmat = cbind(theta)
-    kay = rep(kay, len = ncol(thmat)) # Allow recycling for kay
+    thmat <- cbind(theta)
+    kay <- rep(kay, len = ncol(thmat))  # Allow recycling for kay
     if (ncol(thmat) > 1) {
-        answer = thmat
-        for(ii in 1:ncol(thmat))
-            answer[,ii] = Recall(theta = thmat[,ii],
+        answer <- thmat
+        for (ii in 1:ncol(thmat))
+            answer[, ii] <- Recall(theta = thmat[, ii],
                                  cutpoint = cutpoint[ii],
                                  k = kay[ii],
                                  inverse = inverse, deriv = deriv)
         return(answer)
     }
 
-    answer =
+    answer <-
     if (inverse) {
         if (deriv > 0) {
             1 / Recall(theta = theta,
@@ -1398,65 +1428,65 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                 1.0 - (kay / (kay + care.exp(theta)))^kay
             } else {
 
-            a1 = -(9*cutpoint+8) / (cutpoint+1)
-            a2 = (9*kay-1) / (kay * (cutpoint+1)^(1/3))
-            a3 = 9 / (kay * (cutpoint+1)^(2/3))
-            a4 = 9 / (cutpoint+1)
-            B = exp(theta/3)
-            mymat = rbind(a1^2*a2^2 + 2*a1*a2^3*B + B^2*a2^4, 0,
+            a1 <- -(9*cutpoint+8) / (cutpoint+1)
+            a2 <- (9*kay-1) / (kay * (cutpoint+1)^(1/3))
+            a3 <- 9 / (kay * (cutpoint+1)^(2/3))
+            a4 <- 9 / (cutpoint+1)
+            B <- exp(theta/3)
+            mymat <- rbind(a1^2*a2^2 + 2*a1*a2^3*B + B^2*a2^4, 0,
                     -2*a1*a2*a3*B - 2*a2^2*a3*B^2 - a1^2*a3 - a2^2*a4, 0,
                     B^2 * a3^2 + a3 * a4)
-            ans = Re(t(apply(mymat, 2, polyroot)))
-            theta2 = invfun = pnorm(-ans)  # pnorm(-x) = 1-pnorm(x)
-            for(ii in 1:4) {
-              theta2[,ii] =
-                Recall(theta = theta2[,ii],
+            ans <- Re(t(apply(mymat, 2, polyroot)))
+            theta2 <- invfun <- pnorm(-ans)  # pnorm(-x) = 1-pnorm(x)
+            for (ii in 1:4) {
+              theta2[, ii] <-
+                Recall(theta = theta2[, ii],
                        cutpoint = cutpoint,
                        k = kay,
                        inverse = FALSE, deriv = deriv)
             }
-            rankmat = t(apply(abs(theta2 - theta), 1, rank))
-            for(ii in 2:4) {
-                if (any(index4 <- (rankmat[,ii] == 1))) {
-                    invfun[index4,1] = invfun[index4,ii]
-                }
+            rankmat <- t(apply(abs(theta2 - theta), 1, rank))
+            for (ii in 2:4) {
+              if (any(index4 <- (rankmat[, ii] == 1))) {
+                invfun[index4, 1] <- invfun[index4, ii]
+              }
             }
-            invfun[,1]
+            invfun[, 1]
             }
         }
     } else {
-        smallno = 1 * .Machine$double.eps
-        SMALLNO = 1 * .Machine$double.xmin
-        Theta = theta
-        Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
-        Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
+        smallno <- 1 * .Machine$double.eps
+        SMALLNO <- 1 * .Machine$double.xmin
+        Theta <- theta
+        Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
+        Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
         if (cutpoint == 0) {
             switch(deriv+1, {
-            temp = (1 - Theta)^(-1/kay) - 1
-            temp = pmax(temp, SMALLNO)
+            temp <- (1 - Theta)^(-1/kay) - 1
+            temp <- pmax(temp, SMALLNO)
             log(kay) + log(temp)},
             (kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay),
             {  stop("cannot handle 'deriv = 2'") },
             stop("argument 'deriv' unmatched"))
         } else {
-            Ql = qnorm(Theta)
-            a1 = -(9*cutpoint+8) / (cutpoint+1)
-            a2 = (9*kay-1) / (kay * (cutpoint+1)^(1/3))
-            a3 = 9 / (kay * (cutpoint+1)^(2/3))
-            a4 = 9 / (cutpoint+1)
-            discrim = a1^2 * a3 + a2^2 * a4 - Ql^2 * a3 * a4
-            denomin = Ql^2 * a3 - a2^2
-            numerat = (a1*a2 - Ql * sqrt(discrim))
-            argmax1 = numerat / denomin
+            Ql <- qnorm(Theta)
+            a1 <- -(9*cutpoint+8) / (cutpoint+1)
+            a2 <- (9*kay-1) / (kay * (cutpoint+1)^(1/3))
+            a3 <- 9 / (kay * (cutpoint+1)^(2/3))
+            a4 <- 9 / (cutpoint+1)
+            discrim <- a1^2 * a3 + a2^2 * a4 - Ql^2 * a3 * a4
+            denomin <- Ql^2 * a3 - a2^2
+            numerat <- (a1*a2 - Ql * sqrt(discrim))
+            argmax1 <- numerat / denomin
             switch(deriv+1, {
-                argmax2 = (a1*a2 + Ql * sqrt(discrim)) / denomin
-                temp = ifelse(argmax1 > 0, argmax1, argmax2)
-                temp = pmax(temp, SMALLNO)
+                argmax2 <- (a1*a2 + Ql * sqrt(discrim)) / denomin
+                temp <- ifelse(argmax1 > 0, argmax1, argmax2)
+                temp <- pmax(temp, SMALLNO)
                 3 * log(temp)}, {
-                 BB = (sqrt(discrim) - Ql^2 * a3 *
+                 BB <- (sqrt(discrim) - Ql^2 * a3 *
                        a4 / sqrt(discrim)) / dnorm(Ql)
-                 CC = 2 * Ql * a3 / dnorm(Ql)
-                 dA.dtheta = (-denomin * BB - numerat * CC) / denomin^2
+                 CC <- 2 * Ql * a3 / dnorm(Ql)
+                 dA.dtheta <- (-denomin * BB - numerat * CC) / denomin^2
                  argmax1 / (3 * dA.dtheta)
                 },
                 {  stop('cannot handle deriv = 2') },
@@ -1474,7 +1504,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
 
 
   temp <- cut(y, breaks = breaks, labels = FALSE)
-  temp <- c(temp) # integer vector of integers
+  temp <- c(temp)  # integer vector of integers
   if (any(is.na(temp)))
     stop("there are NAs")
   answer <- if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
@@ -1499,7 +1529,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
   oklevels <- 1:L
   if (L == 1)
     stop("only one unique value")
-  for(ii in oklevels) {
+  for (ii in oklevels) {
     if (all(ii != uy))
       stop("there is no ", ii, " value")
   }
@@ -1519,8 +1549,7 @@ fsqrt <- function(theta, #  = NA  , = NULL,
                        wrt.eta = NULL,
                        bvalue = NULL,
                        inverse = FALSE, deriv = 0,
-                       short = TRUE, tag = FALSE)
-{
+                       short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short)
       paste("nbcanlink(", theta, ")", sep = "") else
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 300e962..acd9c61 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -8,10 +8,43 @@
 
 
 
+
+ 
 logLik.vlm <- function(object, ...)
   object at criterion$loglikelihood
 
 
+
+ 
+logLik.qrrvglm <- function(object, ...) {
+
+  ff.code <- object at family
+  ll.ff.code <- ff.code at loglikelihood
+
+  prior.weights <- weights(object, type = "prior")
+  if (is.matrix(prior.weights) &&
+      ncol(prior.weights) == 1)
+    prior.weights <- c(prior.weights)
+
+  loglik.try <-
+    ll.ff.code(mu = fitted(object),
+               y = depvar(object),
+               w = prior.weights,
+               residuals = FALSE,
+               eta = predict(object),
+               extra = object at extra)
+  if (!is.numeric(loglik.try))
+    loglik.try <- NULL
+
+  loglik.try
+}
+
+
+
+
+
+
+
 if (!isGeneric("logLik"))
   setGeneric("logLik", function(object, ...)
              standardGeneric("logLik"),
@@ -34,20 +67,48 @@ setMethod("logLik",  "vgam", function(object, ...)
 
 
 
+setMethod("logLik",  "qrrvglm", function(object, ...)
+    logLik.qrrvglm(object, ...))
+
+
+setMethod("logLik",  "cao", function(object, ...)
+    logLik.qrrvglm(object, ...))
+
+
+
+
+
+
 
 
 
-constraints.vlm <- function(object,
-                            type = c("lm", "term"),
-                            all = TRUE, which,
-                            matrix.out = FALSE,
-                            ...) {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+constraints.vlm <-
+  function(object,
+           type = c("lm", "term"),
+           all = TRUE, which,
+           matrix.out = FALSE,
+           colnames.arg = TRUE,  # 20130827
+           ...) {
 
 
   type <- match.arg(type, c("lm", "term"))[1]
 
 
-  Hlist <- ans <- slot(object, "constraints") # For "lm" (formerly "vlm")
+  Hlist <- ans <- slot(object, "constraints")  # For "lm" (formerly "vlm")
 
   if (type == "term") {
     oassign.LM <- object at misc$orig.assign
@@ -57,14 +118,17 @@ constraints.vlm <- function(object,
     names.att.x.LM <- names(att.x.LM)
     ppp <- length(names.att.x.LM)
 
+
     ans <- vector("list", ppp)
     for (ii in 1:ppp) {
-      col.ptr <- (oassign.LM[[ii]])[1] # 20110114
+      col.ptr <- (oassign.LM[[ii]])[1]  # 20110114
       ans[[ii]] <- (Hlist[[col.ptr]])
     }
     names(ans) <- names.att.x.LM
   } # End of "term"
 
+
+
   if (matrix.out) {
     if (all) {
       M <- npred(object)
@@ -73,6 +137,14 @@ constraints.vlm <- function(object,
         rownames(mat.ans) <- object at misc$predictors.names
       if (length(object at misc$colnames.X_vlm) == ncol(mat.ans))
         colnames(mat.ans) <- object at misc$colnames.X_vlm
+
+
+      if (colnames.arg)
+        dimnames(mat.ans) <-
+          list(NULL,
+               colnames(model.matrix(object, type = "vlm")))
+
+
       mat.ans
     } else {
       ans[[which]]
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index 4b4991c..2ef246b 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -112,7 +112,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
 
   cls <- class(object)[1]
 
-  nobs <- function(x) x at misc$nrow_X_vlm
+  nobs <- function(x) x at misc$nrow.X.vlm
 
 
   tlab <- function(x) attr(terms(x), "term.labels")
@@ -173,7 +173,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
   
   no.update <- sapply(objects, function(obj) inherits(obj, cls))
   
-  for(i in 2:nmodels) {
+  for (i in 2:nmodels) {
     objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
   }
 
@@ -182,7 +182,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
 
 
   if (any(ns != ns[1])) {
-    for(i in 2:nmodels) {
+    for (i in 2:nmodels) {
       if (ns[1] != ns[i]) {
         if (no.update[i])
           stop("models were not all fitted to ",
@@ -245,9 +245,13 @@ setMethod("lrtest", "vglm",
 
 
 
+
+
+
  setMethod("show", "VGAManova",
            function(object)
-           stats::print.anova(object at Body))
+             getS3method("print", "anova")(object at Body))
+
 
 
 
@@ -343,7 +347,7 @@ lrtest.default <- function(object, ..., name = NULL) {
  print("no.update")
  print( no.update )
   
-  for(i in 2:nmodels)
+  for (i in 2:nmodels)
     objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
 
  print("objects i")
@@ -351,7 +355,7 @@ lrtest.default <- function(object, ..., name = NULL) {
 
   ns <- sapply(objects, nobs)
   if (any(ns != ns[1])) {
-    for(i in 2:nmodels) {
+    for (i in 2:nmodels) {
       if (ns[1] != ns[i]) {
         if (no.update[i])
           stop("models were not all fitted to ",
@@ -510,7 +514,7 @@ waldtest_default <- function(object, ..., vcov = NULL,
   
   no.update <- sapply(objects, function(obj) inherits(obj, cls))
   
-  for(i in 2:nmodels)
+  for (i in 2:nmodels)
     objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
 
   responses <- as.character(lapply(objects,
@@ -524,7 +528,7 @@ waldtest_default <- function(object, ..., vcov = NULL,
 
   ns <- sapply(objects, nobs)
   if (any(ns != ns[1])) {
-    for(i in 2:nmodels) {
+    for (i in 2:nmodels) {
       if (ns[1] != ns[i]) {
         if (no.update[i])
           stop("models were not all fitted to the ",
@@ -550,16 +554,18 @@ waldtest_default <- function(object, ..., vcov = NULL,
                       paste("Pr(>", test, ")", sep = ""))
   rownames(rval) <- 1:nmodels
   rval[,1] <- as.numeric(sapply(objects, df.residual))
-  for(i in 2:nmodels)
+  for (i in 2:nmodels)
     rval[i, 2:3] <- modelCompare(objects[[i-1]], objects[[i]],
                                  vfun = vcov.)
   if (test == "Chisq") {
     rval[,4] <- pchisq(rval[,3], round(abs(rval[,2])), lower.tail = FALSE)
   } else {
     df <- rval[,1]
-    for(i in 2:nmodels) if (rval[i,2] < 0) df[i] <- rval[i-1,1]
-    rval[,3] <- rval[,3]/abs(rval[,2])
-    rval[,4] <- pf(rval[,3], abs(rval[,2]), df, lower.tail = FALSE)
+    for (i in 2:nmodels)
+      if (rval[i, 2] < 0)
+        df[i] <- rval[i-1, 1]
+    rval[, 3] <- rval[, 3] / abs(rval[, 2])
+    rval[, 4] <- pf(rval[, 3], abs(rval[, 2]), df, lower.tail = FALSE)
   }
 
 
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index c45807a..35208b9 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -12,50 +12,50 @@
 
 
 
- attrassigndefault <- function(mmat, tt) {
-    if (!inherits(tt, "terms"))
-        stop("need terms object")
-    aa <- attr(mmat, "assign")
-    if (is.null(aa))
-        stop("argument is not really a model matrix")
-    ll <- attr(tt, "term.labels")
-    if (attr(tt, "intercept") > 0)
-        ll <- c("(Intercept)", ll)
-    aaa <- factor(aa, labels = ll)
-    split(order(aa), aaa)
+attrassigndefault <- function(mmat, tt) {
+  if (!inherits(tt, "terms"))
+    stop("need terms object")
+  aa <- attr(mmat, "assign")
+  if (is.null(aa))
+    stop("argument is not really a model matrix")
+  ll <- attr(tt, "term.labels")
+  if (attr(tt, "intercept") > 0)
+    ll <- c("(Intercept)", ll)
+  aaa <- factor(aa, labels = ll)
+  split(order(aa), aaa)
 }
 
 
- attrassignlm <- function(object, ...)
-     attrassigndefault(model.matrix(object), object at terms)
+attrassignlm <- function(object, ...)
+  attrassigndefault(model.matrix(object), object at terms)
 
 
 
  vlabel <- function(xn, ncolBlist, M, separator = ":") {
 
-    if (length(xn) != length(ncolBlist))
-        stop("length of first two arguments not equal")
-
-    n1 <- rep(xn, ncolBlist)
-    if (M == 1)
-        return(n1)
-    n2 <- as.list(ncolBlist)
-    n2 <- lapply(n2, seq)
-    n2 <- unlist(n2)
-    n2 <- as.character(n2)
-    n2 <- paste(separator, n2, sep = "")
-    n3 <- rep(ncolBlist, ncolBlist)
-    n2[n3 == 1] <- ""
-    n1n2 <- paste(n1, n2, sep = "")
-    n1n2
+  if (length(xn) != length(ncolBlist))
+    stop("length of first two arguments not equal")
+
+  n1 <- rep(xn, ncolBlist)
+  if (M == 1)
+    return(n1)
+  n2 <- as.list(ncolBlist)
+  n2 <- lapply(n2, seq)
+  n2 <- unlist(n2)
+  n2 <- as.character(n2)
+  n2 <- paste(separator, n2, sep = "")
+  n3 <- rep(ncolBlist, ncolBlist)
+  n2[n3 == 1] <- ""
+  n1n2 <- paste(n1, n2, sep = "")
+  n1n2
 }
 
 
 
 
  vlm2lm.model.matrix <-
-  function(x_vlm, Blist = NULL,
-           which.lp = 1,
+  function(x.vlm, Blist = NULL,
+           which.linpred = 1,
            M = NULL) {
 
  
@@ -73,105 +73,107 @@
 
 
   Hmatrices <- matrix(c(unlist(Blist)), nrow = M)
-  if (ncol(Hmatrices) != ncol(x_vlm))
-    stop("ncol(Hmatrices) != ncol(x_vlm)")
-
-
-  n_lm <- nrow(x_vlm) / M
-  if (round(n_lm) != n_lm)
-    stop("'n_lm' does not seem to be an integer")
-    lapred.index <- which.lp
-    vecTF <- Hmatrices[lapred.index, ] != 0
-    X_lm_jay <- x_vlm[(0:(n_lm - 1)) * M + lapred.index, vecTF,
-                      drop = FALSE]
-  X_lm_jay
+  if (ncol(Hmatrices) != ncol(x.vlm))
+    stop("ncol(Hmatrices) != ncol(x.vlm)")
+
+
+  n.lm <- nrow(x.vlm) / M
+  if (round(n.lm) != n.lm)
+    stop("'n.lm' does not seem to be an integer")
+  linpred.index <- which.linpred
+  vecTF <- Hmatrices[linpred.index, ] != 0
+  X.lm.jay <- x.vlm[(0:(n.lm - 1)) * M + linpred.index, vecTF,
+                    drop = FALSE]
+  X.lm.jay
 }
 
 
 
 
 
- lm2vlm.model.matrix <- function(x, Blist = NULL, assign.attributes = TRUE,
-                                 M = NULL, xij = NULL, Xm2 = NULL) {
+ lm2vlm.model.matrix <-
+  function(x, Blist = NULL, assign.attributes = TRUE,
+           M = NULL, xij = NULL, Xm2 = NULL) {
 
 
 
 
-    if (length(Blist) != ncol(x))
-        stop("length(Blist) != ncol(x)")
+  if (length(Blist) != ncol(x))
+    stop("length(Blist) != ncol(x)")
 
-    if (length(xij)) {
-        if (inherits(xij, "formula"))
-            xij <- list(xij)
-        if (!is.list(xij))
-            stop("'xij' is not a list of formulae")
-    }
+  if (length(xij)) {
+    if (inherits(xij, "formula"))
+      xij <- list(xij)
+    if (!is.list(xij))
+      stop("'xij' is not a list of formulae")
+  }
 
-    if (!is.numeric(M))
-        M <- nrow(Blist[[1]])
-
-    nrow_X_lm <- nrow(x)
-    if (all(trivial.constraints(Blist) == 1)) {
-        X_vlm <- if (M > 1) kronecker(x, diag(M)) else x
-        ncolBlist <- rep(M, ncol(x))
-    } else {
-        allB <- matrix(unlist(Blist), nrow = M)
-        ncolBlist <- unlist(lapply(Blist, ncol))
-        Rsum <- sum(ncolBlist)
-
-        X1 <- rep(c(t(x)), rep(ncolBlist, nrow_X_lm))
-        dim(X1) <- c(Rsum, nrow_X_lm)
-        X_vlm <- kronecker(t(X1), matrix(1, M, 1)) *
-                 kronecker(matrix(1, nrow_X_lm, 1), allB)
-        rm(X1)
-    }
+  if (!is.numeric(M))
+    M <- nrow(Blist[[1]])
 
-    dn <- labels(x)
-    yn <- dn[[1]]
-    xn <- dn[[2]]
-    dimnames(X_vlm) <- list(vlabel(yn, rep(M, nrow_X_lm), M), 
-                            vlabel(xn, ncolBlist, M))
-
-    if (assign.attributes) {
-        attr(X_vlm, "contrasts")   <- attr(x, "contrasts")
-        attr(X_vlm, "factors")     <- attr(x, "factors")
-        attr(X_vlm, "formula")     <- attr(x, "formula")
-        attr(X_vlm, "class")       <- attr(x, "class")
-        attr(X_vlm, "order")       <- attr(x, "order")
-        attr(X_vlm, "term.labels") <- attr(x, "term.labels")
+  nrow.X.lm <- nrow(x)
+  if (all(trivial.constraints(Blist) == 1)) {
+    X.vlm <- if (M > 1) kronecker(x, diag(M)) else x
+    ncolBlist <- rep(M, ncol(x))
+  } else {
+    allB <- matrix(unlist(Blist), nrow = M)
+    ncolBlist <- unlist(lapply(Blist, ncol))
+    Rsum <- sum(ncolBlist)
+
+    X1 <- rep(c(t(x)), rep(ncolBlist, nrow.X.lm))
+    dim(X1) <- c(Rsum, nrow.X.lm)
+    X.vlm <- kronecker(t(X1), matrix(1, M, 1)) *
+             kronecker(matrix(1, nrow.X.lm, 1), allB)
+    rm(X1)
+  }
+
+  dn <- labels(x)
+  yn <- dn[[1]]
+  xn <- dn[[2]]
+  dimnames(X.vlm) <- list(vlabel(yn, rep(M, nrow.X.lm), M), 
+                          vlabel(xn, ncolBlist, M))
+
+  if (assign.attributes) {
+      attr(X.vlm, "contrasts")   <- attr(x, "contrasts")
+      attr(X.vlm, "factors")     <- attr(x, "factors")
+      attr(X.vlm, "formula")     <- attr(x, "formula")
+      attr(X.vlm, "class")       <- attr(x, "class")
+      attr(X.vlm, "order")       <- attr(x, "order")
+      attr(X.vlm, "term.labels") <- attr(x, "term.labels")
     
-        nasgn <- oasgn <- attr(x, "assign")
-        lowind <- 0
-        for(ii in 1:length(oasgn)) {
-            mylen <- length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
-            nasgn[[ii]] <- (lowind+1):(lowind+mylen)
-            lowind <- lowind + mylen
-        } # End of ii
-        if (lowind != ncol(X_vlm))
-            stop("something gone wrong")
-        attr(X_vlm, "assign") <- nasgn
+      nasgn <- oasgn <- attr(x, "assign")
+      lowind <- 0
+      for (ii in 1:length(oasgn)) {
+          mylen <- length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
+          nasgn[[ii]] <- (lowind+1):(lowind+mylen)
+          lowind <- lowind + mylen
+      } # End of ii
+      if (lowind != ncol(X.vlm))
+        stop("something gone wrong")
+      attr(X.vlm, "assign") <- nasgn
     
 
-        fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
-        vasgn <- vector("list", sum(fred))
-        kk <- 0
-        for(ii in 1:length(oasgn)) {
-            temp <- matrix(nasgn[[ii]], ncol = length(oasgn[[ii]]))
-            for(jloc in 1:nrow(temp)) {
-                kk <- kk + 1
-                vasgn[[kk]] <- temp[jloc,]
-            }
+      fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
+      vasgn <- vector("list", sum(fred))
+      kk <- 0
+      for (ii in 1:length(oasgn)) {
+        temp <- matrix(nasgn[[ii]], ncol = length(oasgn[[ii]]))
+        for (jloc in 1:nrow(temp)) {
+          kk <- kk + 1
+          vasgn[[kk]] <- temp[jloc,]
         }
-        names(vasgn) <- vlabel(names(oasgn), fred, M)
-        attr(X_vlm, "vassign") <- vasgn
+      }
+      names(vasgn) <- vlabel(names(oasgn), fred, M)
+      attr(X.vlm, "vassign") <- vasgn
 
-        attr(X_vlm, "constraints") <- Blist
-    } # End of if (assign.attributes)
+      attr(X.vlm, "constraints") <- Blist
+  } # End of if (assign.attributes)
 
 
 
 
-    if (!length(xij)) return(X_vlm)
+  if (!length(xij))
+    return(X.vlm)
 
 
 
@@ -179,52 +181,52 @@
 
 
 
-    at.x <- attr(x, "assign")
-    at.vlmx <- attr(X_vlm, "assign")
-    at.Xm2 <- attr(Xm2, "assign")
+  at.x <- attr(x, "assign")
+  at.vlmx <- attr(X.vlm, "assign")
+  at.Xm2 <- attr(Xm2, "assign")
 
-    for(ii in 1:length(xij)) {
-        form.xij <- xij[[ii]]
-        if (length(form.xij) != 3) 
-            stop("xij[[", ii, "]] is not a formula with a response")
-        tform.xij <- terms(form.xij)
-        aterm.form <- attr(tform.xij, "term.labels") # Does not include response
-        if (length(aterm.form) != M)
-            stop("xij[[", ii, "]] does not contain ", M, " terms")
+  for (ii in 1:length(xij)) {
+      form.xij <- xij[[ii]]
+      if (length(form.xij) != 3) 
+        stop("xij[[", ii, "]] is not a formula with a response")
+      tform.xij <- terms(form.xij)
+      aterm.form <- attr(tform.xij, "term.labels")  # Does not include response
+      if (length(aterm.form) != M)
+        stop("xij[[", ii, "]] does not contain ", M, " terms")
 
-        name.term.y <- as.character(form.xij)[2]
-        cols.X_vlm <- at.vlmx[[name.term.y]]  # May be > 1 in length.
+      name.term.y <- as.character(form.xij)[2]
+      cols.X.vlm <- at.vlmx[[name.term.y]]  # May be > 1 in length.
 
-        x.name.term.2 <- aterm.form[1]   # Choose the first one
-        One.such.term <- at.Xm2[[x.name.term.2]]
-        for(bbb in 1:length(One.such.term)) {
-            use.cols.Xm2 <- NULL
-            for(sss in 1:M) {
-                x.name.term.2 <- aterm.form[sss]
-                one.such.term <- at.Xm2[[x.name.term.2]]
-                use.cols.Xm2 <- c(use.cols.Xm2, one.such.term[bbb])
-            } # End of sss
+      x.name.term.2 <- aterm.form[1]   # Choose the first one
+      One.such.term <- at.Xm2[[x.name.term.2]]
+      for (bbb in 1:length(One.such.term)) {
+        use.cols.Xm2 <- NULL
+        for (sss in 1:M) {
+          x.name.term.2 <- aterm.form[sss]
+          one.such.term <- at.Xm2[[x.name.term.2]]
+          use.cols.Xm2 <- c(use.cols.Xm2, one.such.term[bbb])
+        } # End of sss
 
-            allXk <- Xm2[,use.cols.Xm2,drop=FALSE]
-            cmat.no <- (at.x[[name.term.y]])[1] # First one will do (all the same).
-            cmat <- Blist[[cmat.no]]
-            Rsum.k <- ncol(cmat)
-            tmp44 <- kronecker(matrix(1, nrow_X_lm, 1), t(cmat)) *
-                    kronecker(allXk, matrix(1,ncol(cmat), 1)) # n*Rsum.k x M
+      allXk <- Xm2[,use.cols.Xm2,drop=FALSE]
+      cmat.no <- (at.x[[name.term.y]])[1]  # First one will do (all the same).
+      cmat <- Blist[[cmat.no]]
+      Rsum.k <- ncol(cmat)
+      tmp44 <- kronecker(matrix(1, nrow.X.lm, 1), t(cmat)) *
+               kronecker(allXk, matrix(1,ncol(cmat), 1))  # n*Rsum.k x M
 
-            tmp44 <- array(t(tmp44), c(M, Rsum.k, nrow_X_lm))
-            tmp44 <- aperm(tmp44, c(1,3,2)) # c(M, n, Rsum.k)
-            rep.index <- cols.X_vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
-            X_vlm[,rep.index] <- c(tmp44) 
-        } # End of bbb
-    } # End of for(ii in 1:length(xij))
+      tmp44 <- array(t(tmp44), c(M, Rsum.k, nrow.X.lm))
+      tmp44 <- aperm(tmp44, c(1,3,2))  # c(M, n, Rsum.k)
+      rep.index <- cols.X.vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
+      X.vlm[,rep.index] <- c(tmp44) 
+    } # End of bbb
+  } # End of for (ii in 1:length(xij))
 
-    if (assign.attributes) {
-        attr(X_vlm, "vassign") <- vasgn
-        attr(X_vlm, "assign") <- nasgn
-        attr(X_vlm, "xij") <- xij
-    }
-    X_vlm
+  if (assign.attributes) {
+    attr(X.vlm, "vassign") <- vasgn
+    attr(X.vlm, "assign") <- nasgn
+    attr(X.vlm, "xij") <- xij
+  }
+  X.vlm
 }
 
 
@@ -234,95 +236,95 @@
 
  model.matrixvlm <- function(object,
                             type = c("vlm", "lm", "lm2", "bothlmlm2"),
-                            lapred.index = NULL,
+                            linpred.index = NULL,
                             ...) {
 
 
 
-    if (mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
-    type <- match.arg(type, c("vlm", "lm", "lm2", "bothlmlm2"))[1]
+  type <- match.arg(type, c("vlm", "lm", "lm2", "bothlmlm2"))[1]
 
-    if (length(lapred.index) &&
-        type != "lm")
-      stop("Must set 'type = \"lm\"' when 'lapred.index' is ",
-           "assigned a value")
-    if (length(lapred.index) &&
-        length(object at control$xij))
-      stop("Currently cannot handle 'xij' models when 'lapred.index' is ",
-           "assigned a value")
+  if (length(linpred.index) &&
+      type != "lm")
+    stop("Must set 'type = \"lm\"' when 'linpred.index' is ",
+         "assigned a value")
+  if (length(linpred.index) &&
+      length(object at control$xij))
+    stop("Currently cannot handle 'xij' models when 'linpred.index' is ",
+         "assigned a value")
 
 
-    x   <- slot(object, "x")
+  x   <- slot(object, "x")
 
 
-    Xm2 <- if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
-          numeric(0)
+  Xm2 <- if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
+         numeric(0)
 
-    if (!length(x)) {
-        data <- model.frame(object, xlev = object at xlevels, ...) 
+  if (!length(x)) {
+    data <- model.frame(object, xlev = object at xlevels, ...) 
 
-        kill.con <- if (length(object at contrasts)) object at contrasts else NULL
+    kill.con <- if (length(object at contrasts)) object at contrasts else NULL
 
-        x <- vmodel.matrix.default(object, data = data,
-                                  contrasts.arg = kill.con)
-        tt <- terms(object)
-        attr(x, "assign") <- attrassigndefault(x, tt)
-    }
+    x <- vmodel.matrix.default(object, data = data,
+                               contrasts.arg = kill.con)
+    tt <- terms(object)
+    attr(x, "assign") <- attrassigndefault(x, tt)
+  }
 
-    if ((type == "lm2" || type == "bothlmlm2") &&
-        !length(Xm2)) {
-      object.copy2 <- object
-      data <- model.frame(object.copy2, xlev = object.copy2 at xlevels, ...) 
+  if ((type == "lm2" || type == "bothlmlm2") &&
+      !length(Xm2)) {
+    object.copy2 <- object
+    data <- model.frame(object.copy2, xlev = object.copy2 at xlevels, ...) 
 
-      kill.con <- if (length(object.copy2 at contrasts))
-                 object.copy2 at contrasts else NULL
+    kill.con <- if (length(object.copy2 at contrasts))
+                object.copy2 at contrasts else NULL
 
-      Xm2 <- vmodel.matrix.default(object.copy2, data = data,
-                                  contrasts.arg = kill.con)
-      ttXm2 <- terms(object.copy2 at misc$form2)
-      attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
-    }
+    Xm2 <- vmodel.matrix.default(object.copy2, data = data,
+                                 contrasts.arg = kill.con)
+    ttXm2 <- terms(object.copy2 at misc$form2)
+    attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
+  }
 
 
 
 
 
-    if (type == "lm" && is.null(lapred.index)) {
-      return(x)
-    } else if (type == "lm2") {
-      return(Xm2)
-    } else if (type == "bothlmlm2") {
-      return(list(X = x, Xm2 = Xm2))
-    }
+  if (type == "lm" && is.null(linpred.index)) {
+    return(x)
+  } else if (type == "lm2") {
+    return(Xm2)
+  } else if (type == "bothlmlm2") {
+    return(list(X = x, Xm2 = Xm2))
+  }
 
 
-    M <- object at misc$M  
-    Blist <- object at constraints # == constraints(object, type = "lm")
-    X_vlm <- lm2vlm.model.matrix(x = x, Blist = Blist,
-                                 xij = object at control$xij, Xm2 = Xm2)
-
-    if (type == "vlm") {
-      return(X_vlm)
-    } else if (type == "lm" && length(lapred.index)) {
-      if (!is.Numeric(lapred.index, integer.valued = TRUE, positive = TRUE,
-                      allowable.length = 1))
-        stop("bad input for argument 'lapred.index'")
-      if (!length(intersect(lapred.index, 1:M)))
-        stop("argument 'lapred.index' should have ",
-             "a single value from the set 1:", M)
-
-      Hlist <- Blist
-      n_lm <- nobs(object)  # Number of rows of the LM matrix
-      M <- object at misc$M  # Number of linear/additive predictors
-      Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
-      jay <- lapred.index
-      index0 <- Hmatrices[jay, ] != 0
-      X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + jay, index0, drop = FALSE]
-      X_lm_jay
-    } else {
-      stop("am confused. Do not know what to return")
-    }
+  M <- object at misc$M  
+  Blist <- object at constraints # == constraints(object, type = "lm")
+  X.vlm <- lm2vlm.model.matrix(x = x, Blist = Blist,
+                               xij = object at control$xij, Xm2 = Xm2)
+
+  if (type == "vlm") {
+    return(X.vlm)
+  } else if (type == "lm" && length(linpred.index)) {
+    if (!is.Numeric(linpred.index, integer.valued = TRUE, positive = TRUE,
+                    length.arg = 1))
+      stop("bad input for argument 'linpred.index'")
+    if (!length(intersect(linpred.index, 1:M)))
+      stop("argument 'linpred.index' should have ",
+           "a single value from the set 1:", M)
+
+    Hlist <- Blist
+    n.lm <- nobs(object)  # Number of rows of the LM matrix
+    M <- object at misc$M  # Number of linear/additive predictors
+    Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
+    jay <- linpred.index
+    index0 <- Hmatrices[jay, ] != 0
+    X.lm.jay <- X.vlm[(0:(n.lm - 1)) * M + jay, index0, drop = FALSE]
+    X.lm.jay
+  } else {
+    stop("am confused. Do not know what to return")
+  }
 }
 
 
@@ -334,11 +336,26 @@ setMethod("model.matrix",  "vlm", function(object, ...)
 
 
 
+ model.matrixvgam <-
+  function(object,
+           type = c("lm", "vlm", "lm", "lm2", "bothlmlm2"),
+           linpred.index = NULL,
+           ...) {
+  model.matrixvlm(object = object, 
+                  type = type[1],
+                  linpred.index = linpred.index, ...)
+}
+setMethod("model.matrix",  "vgam", function(object, ...)
+           model.matrixvgam(object, ...))
+
+
+
 
 
 
  model.framevlm <- function(object, 
-                            setupsmart = TRUE, wrapupsmart = TRUE, ...) {
+                            setupsmart = TRUE,
+                            wrapupsmart = TRUE, ...) {
 
   dots <- list(...)
   nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
@@ -353,7 +370,7 @@ setMethod("model.matrix",  "vlm", function(object, ...)
     }
 
     fcall[names(nargs)] <- nargs
-    env <- environment(object at terms$terms) # @terms or @terms$terms ??
+    env <- environment(object at terms$terms)  # @terms or @terms$terms ??
     if (is.null(env)) 
       env <- parent.frame()
     ans <- eval(fcall, env, parent.frame())
@@ -371,14 +388,15 @@ if (!isGeneric("model.frame"))
         standardGeneric("model.frame"))
 
 setMethod("model.frame",  "vlm", function(formula, ...)
-           model.framevlm(object=formula, ...))
+           model.framevlm(object = formula, ...))
 
 
 
 
 
- vmodel.matrix.default <- function(object, data = environment(object),
-                                   contrasts.arg = NULL, xlev = NULL, ...) {
+ vmodel.matrix.default <-
+  function(object, data = environment(object),
+           contrasts.arg = NULL, xlev = NULL, ...) {
 
   t <- if (missing(data)) terms(object) else terms(object, data = data)
   if (is.null(attr(data, "terms")))
@@ -420,8 +438,8 @@ setMethod("model.frame",  "vlm", function(formula, ...)
       }
     }
   } else {
-      isF <- FALSE
-      data <- list(x = rep(0, nrow(data)))
+    isF <- FALSE
+    data <- list(x = rep(0, nrow(data)))
   }
 
 
@@ -439,14 +457,22 @@ setMethod("model.frame",  "vlm", function(formula, ...)
 
 
 
-depvar.vlm <- function(object, ...) {
-  object at y
+
+depvar.vlm <- function(object, type = c("lm", "lm2"), ...) {
+  type <- match.arg(type, c("lm", "lm2"))[1]
+  if (type == "lm") {
+    object at y
+  } else {
+    object at Ym2
+  }
 }
 
 
 
 if (!isGeneric("depvar"))
-    setGeneric("depvar", function(object, ...) standardGeneric("depvar"),
+    setGeneric("depvar",
+               function(object, ...)
+                 standardGeneric("depvar"),
                package = "VGAM")
 
 
@@ -494,8 +520,9 @@ setMethod("npred",  "rcim", function(object, ...)
 
 
 
-hatvaluesvlm <- function(model,
-                         type = c("diagonal", "matrix", "centralBlocks"), ...) {
+hatvaluesvlm <-
+  function(model,
+           type = c("diagonal", "matrix", "centralBlocks"), ...) {
 
 
   if(!missing(type))
@@ -514,7 +541,7 @@ hatvaluesvlm <- function(model,
   if (is.empty.list(qrSlot)) {
 
     wzedd <- weights(model, type = "working")
-    UU <- vchol(wzedd, M = M, n = nn, silent = TRUE) # Few rows, many cols
+    UU <- vchol(wzedd, M = M, n = nn, silent = TRUE)  # Few rows, many cols
     X.vlm <- model.matrix(model, type = "vlm")
     UU.X.vlm <- mux111(cc = UU, xmat = X.vlm, M = M)
     qrSlot <- qr(UU.X.vlm)
@@ -536,7 +563,7 @@ hatvaluesvlm <- function(model,
       rownames(Diag.Elts) <- rownames(model.matrix(model, type = "lm"))
 
     attr(Diag.Elts, "predictors.names") <- model at misc$predictors.names
-    attr(Diag.Elts, "ncol_X_vlm") <- model at misc$ncol_X_vlm
+    attr(Diag.Elts, "ncol.X.vlm") <- model at misc$ncol.X.vlm
 
     Diag.Elts
   } else if (type.arg == "matrix") {
@@ -547,7 +574,7 @@ hatvaluesvlm <- function(model,
 
     attr(all.mat, "M") <- M
     attr(all.mat, "predictors.names") <- model at misc$predictors.names
-    attr(all.mat, "ncol_X_vlm") <- model at misc$ncol_X_vlm
+    attr(all.mat, "ncol.X.vlm") <- model at misc$ncol.X.vlm
 
     all.mat
   } else {
@@ -556,18 +583,18 @@ hatvaluesvlm <- function(model,
     all.rows.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$row.index
     all.cols.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$col.index
 
-    H_ss <- rowSums(Q.S3[all.rows.index, ] *
-                   Q.S3[all.cols.index, ])
+    H.ss <- rowSums(Q.S3[all.rows.index, ] *
+                    Q.S3[all.cols.index, ])
 
-    H_ss <- matrix(H_ss, nn, MM12, byrow = TRUE)
-    H_ss
+    H.ss <- matrix(H.ss, nn, MM12, byrow = TRUE)
+    H.ss
   }
 }
 
 
 if (!isGeneric("hatvalues"))
     setGeneric("hatvalues", function(model, ...)
-      standardGeneric("hatvalues"), package = "VGAM")
+  standardGeneric("hatvalues"), package = "VGAM")
 
 
 setMethod("hatvalues",  "vlm", function(model, ...)
@@ -606,7 +633,7 @@ hatplot.vlm <-
   if (!is.matrix(hatval))
     stop("argument 'model' seems neither a vglm() object or a matrix")
 
-  ncol_X_vlm <- attr(hatval, "ncol_X_vlm")
+  ncol.X.vlm <- attr(hatval, "ncol.X.vlm")
   M <- attr(hatval, "M")
   predictors.names <- attr(hatval, "predictors.names")
   if (!length(predictors.names)) {
@@ -628,7 +655,7 @@ hatplot.vlm <-
          ylim = ylim, xlab = xlab, ylab = ylab,
          ...)
     points(1:N, hatval[, jay], ...)
-    abline(h = multiplier * ncol_X_vlm / (N * M), lty = lty, ...)
+    abline(h = multiplier * ncol.X.vlm / (N * M), lty = lty, ...)
   }
 }
 
@@ -674,14 +701,14 @@ dfbetavlm <-
   if (!is(model, "vlm"))
     stop("argument 'model' does not seem to be a vglm() object")
 
-  n_lm <- nobs(model, type = "lm")
-  X_lm <- model.matrix(model, type = "lm")
-  X_vlm <- model.matrix(model, type = "vlm")
-  p_vlm <- ncol(X_vlm) # nvar(model, type = "vlm")
+  n.lm <- nobs(model, type = "lm")
+  X.lm <- model.matrix(model, type = "lm")
+  X.vlm <- model.matrix(model, type = "vlm")
+  p.vlm <- ncol(X.vlm)  # nvar(model, type = "vlm")
   M    <- npred(model)
-  wz <- weights(model, type = "work") # zz unused!!!!!!!
+  wz <- weights(model, type = "work")  # zz unused!!!!!!!
   etastart <- predict(model)
-  offset <- matrix(model at offset, n_lm, M)
+  offset <- matrix(model at offset, n.lm, M)
   new.control <- model at control
   pweights <- weights(model, type = "prior")
   orig.w <- if (is.numeric(model at extra$orig.w))
@@ -693,7 +720,7 @@ dfbetavlm <-
   new.control$trace <- trace.new
   new.control$maxit <- maxit.new
 
-  dfbeta <- matrix(0, n_lm, p_vlm)
+  dfbeta <- matrix(0, n.lm, p.vlm)
 
   Terms.zz <- NULL
 
@@ -701,29 +728,29 @@ dfbetavlm <-
 
 
 
-  for (ii in 1:n_lm) {
+  for (ii in 1:n.lm) {
     if (trace.new) {
       cat("\n", "Observation ", ii, "\n")
       flush.console()
     }
 
-    w.orig <- if (length(orig.w) != n_lm)
-               rep(orig.w, length.out = n_lm) else
-               orig.w
+    w.orig <- if (length(orig.w) != n.lm)
+                rep(orig.w, length.out = n.lm) else
+                orig.w
     w.orig[ii] <- w.orig[ii] * smallno # Relative
 
-    fit <- vglm.fit(x = X_lm,
-                    X_vlm_arg = X_vlm, # Should be more efficient
+    fit <- vglm.fit(x = X.lm,
+                    X.vlm.arg = X.vlm,  # Should be more efficient
                     y = if (y.integer)
                       round(depvar(model) * c(pweights) / c(orig.w)) else
                            (depvar(model) * c(pweights) / c(orig.w)),
-                    w = w.orig, # Set to zero so that it is 'deleted'.
+                    w = w.orig,  # Set to zero so that it is 'deleted'.
                     Xm2 = NULL, Ym2 = NULL,
-                    etastart = etastart, # coefstart = NULL,
+                    etastart = etastart,  # coefstart = NULL,
                     offset = offset,
                     family = model at family,
                     control = new.control,
-                    criterion =  new.control$criterion, # "coefficients",
+                    criterion =  new.control$criterion,  # "coefficients",
                     qr.arg = FALSE,
                     constraints = constraints(model, type = "term"),
                     extra = model at extra,
@@ -734,7 +761,7 @@ dfbetavlm <-
   }
 
 
-  dimnames(dfbeta) <- list(rownames(X_lm), names(coef(model)))
+  dimnames(dfbeta) <- list(rownames(X.lm), names(coef(model)))
   dfbeta
 }
 
@@ -767,7 +794,7 @@ setMethod("dfbeta",  "rcim", function(model, ...)
 
 
 
-hatvaluesbasic <- function(X_vlm,
+hatvaluesbasic <- function(X.vlm,
                            diagWm,
                            M = 1) {
 
@@ -775,31 +802,31 @@ hatvaluesbasic <- function(X_vlm,
   if (M  > 1)
     stop("currently argument 'M' must be 1")
 
-  nn <- nrow(X_vlm)
-  ncol_X_vlm <- ncol(X_vlm)
+  nn <- nrow(X.vlm)
+  ncol.X.vlm <- ncol(X.vlm)
 
-  XtW <- t(c(diagWm) * X_vlm)
+  XtW <- t(c(diagWm) * X.vlm)
 
 
-    UU <- sqrt(diagWm) # Only for M == 1
-    UU.X_vlm <- c(UU) * X_vlm # c(UU) okay for M==1
+  UU <- sqrt(diagWm)  # Only for M == 1
+  UU.X.vlm <- c(UU) * X.vlm # c(UU) okay for M==1
 
-    qrSlot <- qr(UU.X_vlm)
-    Rmat <- qr.R(qrSlot)
+  qrSlot <- qr(UU.X.vlm)
+  Rmat <- qr.R(qrSlot)
 
-    rinv <- diag(ncol_X_vlm)
-    rinv <- backsolve(Rmat, rinv)
+  rinv <- diag(ncol.X.vlm)
+  rinv <- backsolve(Rmat, rinv)
 
 
-    Diag.Hat <- if (FALSE) {
-      covun <- rinv %*% t(rinv)
-      rhs.mat <- covun %*% XtW
-      colSums(t(X_vlm) * rhs.mat)
-    } else {
-      mymat <- X_vlm %*% rinv
-      rowSums(diagWm * mymat^2)
-    }
-    Diag.Hat
+  Diag.Hat <- if (FALSE) {
+    covun <- rinv %*% t(rinv)
+    rhs.mat <- covun %*% XtW
+    colSums(t(X.vlm) * rhs.mat)
+  } else {
+    mymat <- X.vlm %*% rinv
+    rowSums(diagWm * mymat^2)
+  }
+  Diag.Hat
 }
 
 
diff --git a/R/mux.q b/R/mux.q
index 892c26c..3825dec 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -17,10 +17,10 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
     cc <- matrix(cc, 1, 1)
   if (!is.matrix(cc))
     stop("'cc' is not a matrix")
-  c(dotC(name = "VGAM_C_mux34", as.double(xmat), as.double(cc),
+  c( .C("VGAM_C_mux34", as.double(xmat), as.double(cc),
          as.integer(nnn), as.integer(RRR),
          as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
-         NAOK = TRUE)$ans)
+         NAOK = TRUE, PACKAGE = "VGAM")$ans)
 }
 
 
@@ -34,12 +34,14 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
   d <- dim(xmat)
   n <- d[1]
   R <- d[2]
-  if (length(cc) == 1) cc = matrix(cc, 1, 1)
-  if (!is.matrix(cc)) stop("'cc' is not a matrix")
-  c(dotFortran(name = "vgamf90mux34", as.double(xmat), as.double(cc),
+  if (length(cc) == 1)
+    cc <- matrix(cc, 1, 1)
+  if (!is.matrix(cc))
+    stop("'cc' is not a matrix")
+  c( .Fortran("vgamf90mux34", as.double(xmat), as.double(cc),
                as.integer(n), as.integer(R),
                as.integer(symmetric), ans = as.double(rep(0.0, n)),
-               NAOK = TRUE)$ans)
+               NAOK = TRUE, PACKAGE = "VGAM")$ans)
 }
 
 
@@ -61,9 +63,9 @@ mux2 <- function(cc, xmat) {
   if (d[2] != p || d[3] != n)
     stop("dimension size inconformable")
   ans <- rep(as.numeric(NA), n*M)
-  fred <- dotC(name = "mux2", as.double(cc), as.double(t(xmat)),
+  fred <- .C("mux2", as.double(cc), as.double(t(xmat)),
                ans = as.double(ans), as.integer(p), as.integer(n),
-               as.integer(M), NAOK = TRUE)
+               as.integer(M), NAOK = TRUE, PACKAGE = "VGAM")
   matrix(fred$ans, n, M, byrow = TRUE)
 }
 
@@ -77,14 +79,14 @@ mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE) {
   n <- ncol(cc)
 
   index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
-  dimm.value <- nrow(cc) # Usually M or M(M+1)/2
+  dimm.value <- nrow(cc)  # Usually M or M(M+1)/2
 
   ans <- rep(as.numeric(NA), n*M)
-  fred <- dotC(name = "mux22", as.double(cc), as.double(t(xmat)),
+  fred <- .C("mux22", as.double(cc), as.double(t(xmat)),
                ans = as.double(ans), as.integer(dimm.value),
                as.integer(index$row), as.integer(index$col),
                as.integer(n), as.integer(M), wk = double(M*M),
-               as.integer(as.numeric(upper)), NAOK = TRUE)
+               as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
   if (!as.matrix) fred$ans else {
     dim(fred$ans) <- c(M, n)
     t(fred$ans)
@@ -122,7 +124,7 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
   index.r <- iam(NA, NA, r, both = TRUE, diag = TRUE)
 
   size <- if (matrix.arg) dimm(r)*n else r*r*n
-  fred <- dotC(name = "mux5", as.double(cc), as.double(x),
+  fred <- .C("mux5", as.double(cc), as.double(x),
                ans = double(size),
                as.integer(M), as.integer(n), as.integer(r),
                as.integer(neltscci),
@@ -131,7 +133,7 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
                double(M*M), double(r*r), 
                as.integer(index.M$row), as.integer(index.M$col),
                as.integer(index.r$row), as.integer(index.r$col), 
-               ok3 = as.integer(1), NAOK = TRUE)
+               ok3 = as.integer(1), NAOK = TRUE, PACKAGE = "VGAM")
   if (fred$ok3 == 0)
     stop("can only handle matrix.arg == 1")
  
@@ -155,14 +157,14 @@ mux55 <- function(evects, evals, M) {
   if (d[1] != M || d[2] != M || d[3] != n ||
       nrow(evals)!= M || ncol(evals) != n)
     stop("input nonconformable")
-  MM12 <- M*(M+1)/2 # The answer is a full-matrix
+  MM12 <- M*(M+1)/2  # The answer is a full-matrix
   index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
 
-  fred <- dotC(name = "mux55", as.double(evects), as.double(evals),
+  fred <- .C("mux55", as.double(evects), as.double(evals),
                ans = double(MM12 * n),
                double(M*M), double(M*M),
                as.integer(index$row), as.integer(index$col), 
-               as.integer(M), as.integer(n), NAOK = TRUE)
+               as.integer(M), as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
   dim(fred$ans) <- c(MM12, n)
   fred$ans
 }
@@ -182,13 +184,13 @@ mux7 <- function(cc, x) {
   n  <- dimcc[3]
   r <- dimx[2]
   if (is.matrix(x))
-    x <- array(x,c(qq,r, n))
+    x <- array(x, c(qq, r, n))
 
   ans <- array(NA, c(M, r, n))
-  fred <- dotC(name = "mux7", as.double(cc), as.double(x),
+  fred <- .C("mux7", as.double(cc), as.double(x),
                ans = as.double(ans),
                as.integer(M), as.integer(qq), as.integer(n),
-               as.integer(r), NAOK = TRUE)
+               as.integer(r), NAOK = TRUE, PACKAGE = "VGAM")
   array(fred$ans, c(M, r, n))
 }
 
@@ -211,9 +213,9 @@ mux9 <- function(cc, xmat) {
   n <- dimcc[3]
 
   ans <-  matrix(as.numeric(NA), n, M)
-  fred <- dotC(name = "mux9", as.double(cc), as.double(xmat),
+  fred <- .C("mux9", as.double(cc), as.double(xmat),
                ans = as.double(ans),
-               as.integer(M), as.integer(n), NAOK = TRUE)
+               as.integer(M), as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
   matrix(fred$ans, n, M)
 }
 
@@ -247,15 +249,15 @@ mux111 <- function(cc, xmat, M, upper = TRUE) {
   R <- ncol(xmat)
   n <- nrow(xmat) / M
   index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
-  dimm.value <- nrow(cc) # M or M(M+1)/2
+  dimm.value <- nrow(cc)  # M or M(M+1)/2
 
-  fred <- dotC(name = "mux111", as.double(cc),
+  fred <- .C("mux111", as.double(cc),
                b = as.double(t(xmat)),
                as.integer(M),
                as.integer(R), as.integer(n), wk = double(M * M),
                wk2 = double(M * R), as.integer(index$row),
                as.integer(index$col), as.integer(dimm.value),
-               as.integer(as.numeric(upper)), NAOK = TRUE)
+               as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
 
   ans <- fred$b
   dim(ans) <- c(R, nrow(xmat))
@@ -279,9 +281,9 @@ mux15 <- function(cc, xmat) {
     stop("argument 'cc' is not symmetric")
 
   ans <- rep(as.numeric(NA), n*M*M)
-  fred <- dotC(name = "mux15", as.double(cc), as.double(t(xmat)),
+  fred <- .C("mux15", as.double(cc), as.double(t(xmat)),
                ans = as.double(ans), as.integer(M),
-               as.integer(n), NAOK = TRUE)
+               as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
   array(fred$ans, c(M, M, n))
 }
 
@@ -294,13 +296,13 @@ vforsub <- function(cc, b, M, n) {
 
 
     index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
-    dimm.value <- nrow(cc) # M or M(M+1)/2
+    dimm.value <- nrow(cc)  # M or M(M+1)/2
 
 
-  fred <- dotC(name = "vforsub", as.double(cc), b = as.double(t(b)),
+  fred <- .C("vforsub", as.double(cc), b = as.double(t(b)),
                as.integer(M), as.integer(n), wk = double(M*M),
                as.integer(index$row), as.integer(index$col),
-               as.integer(dimm.value), NAOK = TRUE)
+               as.integer(dimm.value), NAOK = TRUE, PACKAGE = "VGAM")
 
   dim(fred$b) <- c(M, n)
   fred$b
@@ -315,10 +317,10 @@ vbacksub <- function(cc, b, M, n) {
   if (nrow(b) != M || ncol(b) != n)
     stop("dimension size inconformable")
 
-  fred <- dotC(name = "vbacksub", as.double(cc), b = as.double(b),
+  fred <- .C("vbacksub", as.double(cc), b = as.double(b),
                as.integer(M), as.integer(n), wk = double(M*M),
                as.integer(index$row), as.integer(index$col),
-               as.integer(dimm.value), NAOK = TRUE)
+               as.integer(dimm.value), NAOK = TRUE, PACKAGE = "VGAM")
 
   if (M == 1) {
     fred$b
@@ -340,12 +342,12 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
   cc <- t(cc)
   MM <- nrow(cc)    # cc is big enough to hold its Cholesky decom.
 
-  fred <- dotC(name = "vchol", cc = as.double(cc), as.integer(M),
+  fred <- .C("vchol", cc = as.double(cc), as.integer(M),
                as.integer(n), ok = integer(n),
                wk = double(M*M), as.integer(index$row),
                as.integer(index$col),
                as.integer(MM),
-               NAOK = TRUE)
+               NAOK = TRUE, PACKAGE = "VGAM")
 
   failed <- (fred$ok != 1)
   if ((correction.needed <- any(failed))) {
@@ -363,20 +365,20 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
   dim(ans) <- c(MM, n)
 
   if (correction.needed) {
-      temp <- cc[, index, drop = FALSE]
-      tmp777 <- vchol.greenstadt(temp, M = M, silent = silent,
-                                 callno = callno + 1)
+    temp <- cc[, index, drop = FALSE]
+    tmp777 <- vchol.greenstadt(temp, M = M, silent = silent,
+                               callno = callno + 1)
 
 
-      if (length(index) == n) {
-          ans <- tmp777[1:nrow(ans),,drop = FALSE]
-      } else {
+    if (length(index) == n) {
+      ans <- tmp777[1:nrow(ans), , drop = FALSE]
+    } else {
 
 
-          ans[,index] <- tmp777 # restored 16/10/03
-      }
+      ans[, index] <- tmp777  # restored 16/10/03
+    }
   }
-  dim(ans) <- c(MM, n) # Make sure
+  dim(ans) <- c(MM, n)  # Make sure
 
   ans 
 }
@@ -384,8 +386,7 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
 
 
 vchol.greenstadt <- function(cc, M, silent = FALSE,
-                             callno = 0)
-{
+                             callno = 0) {
 
 
 
@@ -402,7 +403,7 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
 
 
   temp <- veigen(cc, M = M)  # , mat = TRUE) 
-  dim(temp$vectors) <- c(M, M, n) # Make sure (when M = 1) for mux5
+  dim(temp$vectors) <- c(M, M, n)  # Make sure (when M = 1) for mux5
   dim(temp$values)  <- c(M, n)    # Make sure (when M = 1) for mux5
 
   is.neg <- (temp$values < .Machine$double.eps)
@@ -420,24 +421,22 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
 
 
       temp$values[zilch] <- small.value
-
   }
 
-
   if (callno > 9) {
-      warning("taking drastic action; setting all wz to ",
-              "scaled versions of the order-M identity matrix")
+    warning("taking drastic action; setting all wz to ",
+            "scaled versions of the order-M identity matrix")
 
-      cc2mean <- abs(colMeans(cc[1:M, , drop = FALSE]))
-      temp$values  <- matrix(cc2mean, M, n, byrow = TRUE)
-      temp$vectors <- array(c(diag(M)), c(M, M, n))
+    cc2mean <- abs(colMeans(cc[1:M, , drop = FALSE]))
+    temp$values  <- matrix(cc2mean, M, n, byrow = TRUE)
+    temp$vectors <- array(c(diag(M)), c(M, M, n))
   }
 
 
 
-  temp3 <- mux55(temp$vectors, temp$values, M = M) #, matrix.arg = TRUE)
+  temp3 <- mux55(temp$vectors, temp$values, M = M)  #, matrix.arg = TRUE)
   ans <- vchol(t(temp3), M = M, n = n, silent = silent,
-               callno = callno + 1) #, matrix.arg = TRUE)
+               callno = callno + 1)  #, matrix.arg = TRUE)
                                    
 
 
@@ -449,10 +448,10 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
 
 if (FALSE)
 myf <- function(x) {
-    dotFortran("VGAM_F90_fill9",
+    .Fortran("VGAM_F90_fill9",
                x = as.double(x), lenx = as.integer(length(x)),
                answer = as.double(x),
-               NAOK = TRUE)$answer
+               NAOK = TRUE, PACKAGE = "VGAM")$answer
 }
 
 
diff --git a/R/nobs.R b/R/nobs.R
index 79407c2..17e8535 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -10,7 +10,7 @@
 nobs.vlm <- function(object, type = c("lm", "vlm"), ...) {
 
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("lm", "vlm"))[1]
@@ -19,7 +19,7 @@ nobs.vlm <- function(object, type = c("lm", "vlm"), ...) {
   if (type == "lm") {
     object at misc$n
   } else {
-    object at misc$nrow_X_vlm
+    object at misc$nrow.X.vlm
   }
 }
 
@@ -47,7 +47,7 @@ setMethod("nobs", "vlm",
 
 nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("vlm", "lm"))[1]
@@ -56,7 +56,7 @@ nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
   if (type == "lm") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
@@ -64,7 +64,7 @@ nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
 
 nvar.vgam <- function(object, type = c("vgam", "zz"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("vgam", "zz"))[1]
@@ -74,14 +74,14 @@ nvar.vgam <- function(object, type = c("vgam", "zz"), ...) {
   if (type == "vgam") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
 
 nvar.rrvglm <- function(object, type = c("rrvglm", "zz"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("rrvglm", "zz"))[1]
@@ -91,7 +91,7 @@ nvar.rrvglm <- function(object, type = c("rrvglm", "zz"), ...) {
   if (type == "vgam") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
@@ -99,7 +99,7 @@ nvar.rrvglm <- function(object, type = c("rrvglm", "zz"), ...) {
 
 nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("qrrvglm", "zz"))[1]
@@ -109,7 +109,7 @@ nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
   if (type == "qrrvglm") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
@@ -117,7 +117,7 @@ nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
 
 nvar.cao <- function(object, type = c("cao", "zz"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("rrvglm", "zz"))[1]
@@ -127,7 +127,7 @@ nvar.cao <- function(object, type = c("cao", "zz"), ...) {
   if (type == "cao") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
@@ -135,7 +135,7 @@ nvar.cao <- function(object, type = c("cao", "zz"), ...) {
 
 nvar.rcim <- function(object, type = c("rcim", "zz"), ...) {
 
-  if(mode(type) != "character" && mode(type) != "name")
+  if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("rcim", "zz"))[1]
@@ -145,7 +145,7 @@ nvar.rcim <- function(object, type = c("rcim", "zz"), ...) {
   if (type == "rcim") {
     object at misc$p
   } else {
-    object at misc$ncol_X_vlm
+    object at misc$ncol.X.vlm
   }
 }
 
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index e193459..7871b9e 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -26,10 +26,10 @@ plotvgam <-
   missing.control <- missing(control)
 
   na.act <- x at na.action
-  x at na.action <- list() # Don't want NAs returned from predict() or resid()
+  x at na.action <- list()
 
   if (!is.Numeric(varxij, integer.valued = TRUE,
-                  allowable.length = 1, positive = TRUE))
+                  length.arg = 1, positive = TRUE))
       stop("bad input for the 'varxij' argument")
   if (any(slotNames(x) == "control")) {
     x at control$varxij <- varxij
@@ -37,7 +37,8 @@ plotvgam <-
 
 
   missing.type.residuals <- missing(type.residuals)
-  if (mode(type.residuals) != "character" && mode(type.residuals) != "name")
+  if (mode(type.residuals) != "character" &&
+      mode(type.residuals) != "name")
     type.residuals <- as.character(substitute(type.residuals))
   if (!missing.type.residuals)
     type.residuals <- match.arg(type.residuals,
@@ -45,7 +46,7 @@ plotvgam <-
 
 
   if (!is.Numeric(deriv.arg, integer.valued = TRUE,
-                  allowable.length = 1) ||
+                  length.arg = 1) ||
       deriv.arg < 0)
     stop("bad input for the 'deriv' argument")
 
@@ -70,9 +71,9 @@ plotvgam <-
     if (residuals) {
       if (missing.type.residuals) {
         for (rtype in type.residuals)
-            if (!is.null(residuals <- resid(x, type = rtype))) break
+          if (!is.null(residuals <- resid(x, type = rtype))) break
       } else {
-       residuals = resid(x, typ = type.residuals)# Get the prespecified type
+       residuals = resid(x, type = type.residuals)
         if (!length(residuals))
           warning("residuals are NULL. Ignoring 'residuals = TRUE'")
       }
@@ -82,11 +83,11 @@ plotvgam <-
   }
 
   if (!missing.control) {
-      control <- c(plotvgam.control(.include.dots = FALSE, ...),
-                  control, plotvgam.control(...))
+    control <- c(plotvgam.control( .include.dots = FALSE, ...),
+                control, plotvgam.control(...))
   }
 
-  x at post$plotvgam.control <- control # Add it to the object 
+  x at post$plotvgam.control <- control  # Add it to the object 
 
   if (plot.arg)
     plotpreplotvgam(preplot.object, residuals = residuals,
@@ -97,7 +98,7 @@ plotvgam <-
                     which.term = which.term, which.cf = which.cf,
                     control = control)
 
-  x at na.action <- na.act # Restore its original value
+  x at na.action <- na.act  # Restore its original value
   invisible(x)
 }
 
@@ -123,9 +124,9 @@ getallresponses <- function(xij) {
 
   allterms <- lapply(xij, terms)
   allres <- NULL
-  for(ii in 1:length(xij))
+  for (ii in 1:length(xij))
     allres <- c(allres,
-                as.character(attr(allterms[[ii]],"variables"))[2])
+                as.character(attr(allterms[[ii]], "variables"))[2])
   allres
 }
 
@@ -156,26 +157,27 @@ getallresponses <- function(xij) {
   xnames <- as.list(terms)
   names(xnames) <- terms
   modes <- sapply(xvars, mode)
-  for(term in terms[modes != "name"]) {
-    evars <- all.names(xvars[term], functions= FALSE, unique = TRUE)
+  for (term in terms[modes != "name"]) {
+    evars <- all.names(xvars[term], functions = FALSE, unique = TRUE)
     if (!length(evars))
       next
     xnames[[term]] <- evars
     evars <- parse(text=evars)
     if (length(evars) == 1) {
       evars <- evars[[1]]
-    } else if (length(evars) > 1 &&
-               any(getallresponses(object at control$xij) == names(xnames)) ) {
+    } else
+    if (length(evars) > 1 &&
+        any(getallresponses(object at control$xij) == names(xnames)) ) {
 
 
 
 
-          evars <- evars[[varxij]]
-      } else {
-        evars <- c(as.name("list"), evars)
-        mode(evars) <- "call"
-      }
-      xvars[[term]] <- evars
+      evars <- evars[[varxij]]
+    } else {
+      evars <- c(as.name("list"), evars)
+      mode(evars) <- "call"
+    }
+    xvars[[term]] <- evars
   }
     
     
@@ -187,31 +189,31 @@ getallresponses <- function(xij) {
     Call <- object at call
     if (!is.null(Call$subset) | !is.null(Call$na.action) |
         !is.null(options("na.action")[[1]])) {
-        Rownames <- names(fitted(object))
-        if (!(Rl <- length(Rownames)))
-          Rownames <- dimnames(fitted(object))[[1]]
-
-        if (length(object at x) && !(Rl <- length(Rownames)))
-          Rownames <- (dimnames(object at x))[[1]]
-        if (length(object at y) && !(Rl <- length(Rownames)))
-          Rownames <- (dimnames(object at y))[[1]]
-
-        if (!(Rl <- length(Rownames)))
-          stop("need to have names for fitted.values ",
-               "when call has a 'subset' or 'na.action' argument")
-
-        form <- paste("~", unlist(xnames), collapse = "+")
-        Mcall <- c(as.name("model.frame"), list(formula =
-                   terms(as.formula(form)),
-                   subset = Rownames, na.action = function(x) x))
-        mode(Mcall) <- "call"
-        Mcall$data <- Call$data
-        xvars <- eval(xvars, eval(Mcall))
-      } else {
-        ecall <- substitute(eval(expression(xvars)))
-        ecall$local <- Call$data
-        xvars <- eval(ecall)
-      }
+      Rownames <- names(fitted(object))
+      if (!(Rl <- length(Rownames)))
+        Rownames <- dimnames(fitted(object))[[1]]
+
+      if (length(object at x) && !(Rl <- length(Rownames)))
+        Rownames <- (dimnames(object at x))[[1]]
+      if (length(object at y) && !(Rl <- length(Rownames)))
+        Rownames <- (dimnames(object at y))[[1]]
+
+      if (!(Rl <- length(Rownames)))
+        stop("need to have names for fitted.values ",
+             "when call has a 'subset' or 'na.action' argument")
+
+      form <- paste("~", unlist(xnames), collapse = "+")
+      Mcall <- c(as.name("model.frame"), list(formula =
+                 terms(as.formula(form)),
+                 subset = Rownames, na.action = function(x) x))
+      mode(Mcall) <- "call"
+      Mcall$data <- Call$data
+      xvars <- eval(xvars, eval(Mcall))
+    } else {
+      ecall <- substitute(eval(expression(xvars)))
+      ecall$local <- Call$data
+      xvars <- eval(ecall)
+    }
   }
   list(xnames = xnames, xvars = xvars)
 }
@@ -228,54 +230,55 @@ preplotvgam <- function(object, newdata = NULL,
                 raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
                 varxij = 1) {
 
-    result1 <- headpreplotvgam(object, newdata = newdata, terms = terms,
-                raw = raw, deriv.arg = deriv.arg, se = se,
-                varxij = varxij)
+  result1 <- headpreplotvgam(object, newdata = newdata, terms = terms,
+                             raw = raw, deriv.arg = deriv.arg, se = se,
+                             varxij = varxij)
 
-    xvars  <- result1$xvars
-    xnames <- result1$xnames
+  xvars  <- result1$xvars
+  xnames <- result1$xnames
 
 
 
-    if (FALSE && !is.null(object at control$jix)) {
+  if (FALSE && !is.null(object at control$jix)) {
 
 
 
 
-      myxij <- object at control$xij
-      if (length(myxij)) {
-      }
+    myxij <- object at control$xij
+    if (length(myxij)) {
+    }
+  }
 
+  pred <- if (length(newdata)) {
+    predict(object, newdata, type = "terms",
+            raw = raw, se.fit = se, deriv.arg = deriv.arg)
+  } else {
+    predict(object, type = "terms",
+            raw = raw, se.fit = se, deriv.arg = deriv.arg)
   }
 
-    pred <- if (length(newdata)) {
-      predict(object, newdata, type = "terms",
-              raw = raw, se.fit = se, deriv.arg = deriv.arg)
-    } else {
-      predict(object, type = "terms",
-              raw = raw, se.fit = se, deriv.arg = deriv.arg)
-    }
+  fits <- if (is.atomic(pred)) NULL else pred$fit
+  se.fit <- if (is.atomic(pred)) NULL else pred$se.fit
 
-    fits <- if (is.atomic(pred)) NULL else pred$fit
-    se.fit <- if (is.atomic(pred)) NULL else pred$se.fit
-    if (is.null(fits))
-      fits <- pred
-    fred <- attr(fits, "vterm.assign")   # NULL for M==1
-    Constant <- attr(fits, "constant")  # NULL if se = TRUE
+  if (is.null(fits))
+    fits <- pred
+  fred <- attr(fits, "vterm.assign")   # NULL for M==1
+  Constant <- attr(fits, "constant")  # NULL if se = TRUE
 
-    gamplot <- xnames
+  gamplot <- xnames
 
-    loop.var <- names(fred)
-    for(term in loop.var) {
-      .VGAM.x <- xvars[[term]]
+  loop.var <- names(fred)
+  for (term in loop.var) {
+    .VGAM.x <- xvars[[term]]
 
-    myylab <- if (all(substring(term, 1:nchar(term), 1:nchar(term)) != "("))
-               paste("partial for", term) else term
+    myylab <- if (all(substring(term, 1:nchar(term),
+                                      1:nchar(term)) != "("))
+              paste("partial for", term) else term
 
     TT <- list(x = .VGAM.x,
-               y = fits[,(if(is.null(fred)) term else fred[[term]])],
+               y = fits[,(if (is.null(fred)) term else fred[[term]])],
                se.y = if (is.null(se.fit)) NULL else
-                     se.fit[,(if(is.null(fred)) term else fred[[term]])],
+                     se.fit[, (if (is.null(fred)) term else fred[[term]])],
                xlab = xnames[[term]],
                ylab = myylab)
     class(TT) <- "preplotvgam"
@@ -293,17 +296,61 @@ plotvlm <- function(object, residuals = NULL, rugplot= FALSE, ...) {
 
 
 
-plotvglm <- function(x, residuals = NULL, smooths= FALSE,
-                     rugplot= FALSE, id.n= FALSE, ...) {
-  stop("this function hasn't been written yet")
+
+plotvglm <-
+  function(x,
+           type = c("vglm", "vgam"),
+           newdata = NULL, y = NULL,
+           residuals = NULL, rugplot = TRUE,
+           se = FALSE, scale = 0,
+           raw = TRUE, offset.arg = 0,
+           deriv.arg = 0, overlay = FALSE,
+           type.residuals = c("deviance", "working", "pearson", "response"),
+           plot.arg = TRUE,
+           which.term = NULL, which.cf = NULL,
+           control = plotvgam.control(...),
+           varxij = 1, ...) {
+
+  ptype <- match.arg(type, c("vglm", "vgam"))[1]
+
+  if (ptype == "vglm") {
+    stop("this function has not been written yet!")
+  }
+
+
+
+  if (length(newdata))
+    newdata <- newdata else newdata <- NULL
+
+
+
+  invisible(
+  plotvgam(x = x, newdata = newdata, y = y,
+           residuals = residuals, rugplot = rugplot,
+           se = se, scale = scale, 
+           raw = raw, offset.arg = offset.arg,
+           deriv.arg = deriv.arg, overlay = overlay,
+           type.residuals = type.residuals,
+           plot.arg = plot.arg,
+           which.term = which.term, which.cf = which.cf,
+           control = control,
+           varxij = varxij, ...)
+    )
 }
 
 
 
 
+
+
+
+
+
+
+
  plotpreplotvgam <-
   function(x, y = NULL, residuals = NULL,
-           rugplot= TRUE, se= FALSE, scale = 0,
+           rugplot = TRUE, se = FALSE, scale = 0,
            offset.arg = 0, deriv.arg = 0, overlay = FALSE,
            which.term = NULL, which.cf = NULL,
            control = NULL) {
@@ -311,12 +358,12 @@ plotvglm <- function(x, residuals = NULL, smooths= FALSE,
   if (listof) {
     TT <- names(x)
     if (is.null(which.term))
-      which.term <- TT # Plot them all
+      which.term <- TT  # Plot them all
     plot.no <- 0
-    for(ii in TT) {
+    for (ii in TT) {
       plot.no <- plot.no + 1
       if ((is.character(which.term) && any(which.term == ii)) ||
-         (is.numeric(which.term) && any(which.term == plot.no)))
+          (is.numeric(which.term) && any(which.term == plot.no)))
         plotpreplotvgam(x[[ii]], y = NULL,
                         residuals, rugplot = rugplot, se = se,
                         scale = scale,
@@ -326,22 +373,24 @@ plotvglm <- function(x, residuals = NULL, smooths= FALSE,
                         control = control)
     }
   } else {
-    dummy <- function(residuals = NULL, rugplot = TRUE, se = FALSE, scale = 0, 
+    dummy <- function(residuals = NULL, rugplot = TRUE,
+                      se = FALSE, scale = 0,
                       offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
                       which.cf = NULL, control = plotvgam.control())
-     c(list(residuals=residuals, rugplot = rugplot, se = se, scale = scale,
-       offset.arg = offset.arg, deriv.arg = deriv.arg, overlay = overlay,
-       which.cf = which.cf), control)
-
-    d <- dummy(residuals = residuals, rugplot = rugplot,
-               se = se, scale = scale,
-               offset.arg = offset.arg, deriv.arg = deriv.arg,
-               overlay = overlay,
-               which.cf = which.cf, 
-               control = control)
-
-    uniq.comps <- unique(c(names(x), names(d)))
-    Call <- c(as.name("vplot"), c(d, x)[uniq.comps])
+     c(list(residuals = residuals, rugplot = rugplot,
+            se = se, scale = scale,
+            offset.arg = offset.arg, deriv.arg = deriv.arg,
+            overlay = overlay, which.cf = which.cf), control)
+
+    dd <- dummy(residuals = residuals, rugplot = rugplot,
+                se = se, scale = scale,
+                offset.arg = offset.arg, deriv.arg = deriv.arg,
+                overlay = overlay,
+                which.cf = which.cf, 
+                control = control)
+
+    uniq.comps <- unique(c(names(x), names(dd)))
+    Call <- c(as.name("vplot"), c(dd, x)[uniq.comps])
     mode(Call) <- "call"
     invisible(eval(Call))
   }
@@ -363,10 +412,10 @@ vplot.default <- function(x, y, se.y = NULL, xlab = "", ylab = "",
                          residuals, rugplot, scale, se,
                          offset.arg = offset.arg, overlay = overlay, ...)
          } else {
-           warning("The \"x\" component of \"", ylab, "\" has class \"",
-                   class(x), "\"; no vplot() methods available")
+           warning("The 'x' component of '", ylab, "' has class '",
+                   class(x), "'; no vplot() methods available")
          }
-        ) # End of switch
+        )  # End of switch
 }
 
 
@@ -426,7 +475,7 @@ vplot.list <-
   } else {
     default.vals <- plotvgam.control()
     return.list <- list()
-    for(ii in names(default.vals)) {
+    for (ii in names(default.vals)) {
       replace.val <- !((length(ans[[ii]]) == length(default.vals[[ii]])) &&
             (length(default.vals[[ii]]) > 0) &&
             (is.logical(all.equal(ans[[ii]], default.vals[[ii]]))) &&
@@ -449,26 +498,28 @@ vplot.list <-
 
 
 
-vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
-                   residuals = NULL, rugplot= FALSE, se= FALSE, scale = 0,
-                   offset.arg = 0, deriv.arg = 0, overlay= FALSE,
-                   which.cf = NULL,
-                   xlim = NULL, ylim = NULL,
-                   llty = par()$lty,
-                   slty = "dashed",
-                   pcex = par()$cex,
-                   pch = par()$pch,
-                   pcol = par()$col,
-                   lcol = par()$col,
-                   rcol = par()$col,
-                   scol = par()$col,
-                   llwd = par()$lwd,
-                   slwd = par()$lwd,
-                   add.arg= FALSE,
-                   one.at.a.time= FALSE, 
-                   noxmean = FALSE, 
-                   separator = ":",
-                   ...) {
+vplot.numeric <-
+  function(x, y, se.y = NULL, xlab, ylab,
+           residuals = NULL, rugplot = FALSE,
+           se = FALSE, scale = 0,
+           offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+           which.cf = NULL,
+           xlim = NULL, ylim = NULL,
+           llty = par()$lty,
+           slty = "dashed",
+           pcex = par()$cex,
+           pch = par()$pch,
+           pcol = par()$col,
+           lcol = par()$col,
+           rcol = par()$col,
+           scol = par()$col,
+           llwd = par()$lwd,
+           slwd = par()$lwd,
+           add.arg = FALSE,
+           one.at.a.time = FALSE, 
+           noxmean = FALSE, 
+           separator = ":",
+           ...) {
 
 
 
@@ -476,22 +527,22 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
     ylim0 <- ylim
 
     if (length(y)/length(x)  != round(length(y)/length(x)))
-        stop("length of 'x' and 'y' do not seem to match")
+      stop("length of 'x' and 'y' do not seem to match")
     y <- as.matrix(y) 
     if (!length(which.cf))
-        which.cf <- 1:ncol(y)  # Added 7/8/04
+      which.cf <- 1:ncol(y)  # Added 7/8/04
 
     if (!is.null(se.y))
-        se.y <- as.matrix(se.y)
+      se.y <- as.matrix(se.y)
     if (!is.null(se.y) && any(is.na(se.y)))
-        se.y <- NULL
+      se.y <- NULL
 
     if (!is.null(residuals))  {
-        residuals <- as.matrix(residuals)
-        if (ncol(residuals) != ncol(y)) {
-            warning("ncol(residuals) != ncol(y) so residuals are not plotted")
-            residuals <- NULL
-        }
+      residuals <- as.matrix(residuals)
+      if (ncol(residuals) != ncol(y)) {
+        warning("ncol(residuals) != ncol(y) so residuals are not plotted")
+        residuals <- NULL
+      }
     }
 
     offset.arg <- matrix(offset.arg, nrow(y), ncol(y), byrow = TRUE)
@@ -507,12 +558,12 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
       y <- rbind(y, 0 * y[1,])
       se.y <- rbind(se.y, 0 * se.y[1,])
       if (!is.null(residuals))
-        residuals <- rbind(residuals, NA*residuals[1,]) # NAs not plotted
+        residuals <- rbind(residuals, NA*residuals[1,])  # NAs not plotted
     }
 
     ux <- unique(sort(x))
-    o <- match(ux, x)
-    uy <- y[o, , drop = FALSE]
+    ooo <- match(ux, x)
+    uy <- y[ooo, , drop = FALSE]
     xlim <- range(xlim, ux)
     ylim <- range(ylim, uy[, which.cf], na.rm = TRUE)
     if (rugplot) {
@@ -522,15 +573,15 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
     }
 
     if (se && !is.null(se.y)) {
-      se.upper <- uy + 2 * se.y[o,,drop = FALSE]
-      se.lower <- uy - 2 * se.y[o,,drop = FALSE]
-      ylim <- range(c(ylim, se.upper[,which.cf], se.lower[,which.cf]))
+      se.upper <- uy + 2 * se.y[ooo, , drop = FALSE]
+      se.lower <- uy - 2 * se.y[ooo, , drop = FALSE]
+      ylim <- range(c(ylim, se.upper[, which.cf], se.lower[, which.cf]))
     }
 
     if (!is.null(residuals)) {
       if (length(residuals) == length(y)) {
         residuals <- as.matrix(y + residuals)
-        ylim <- range(c(ylim, residuals[,which.cf]), na.rm = TRUE)
+        ylim <- range(c(ylim, residuals[, which.cf]), na.rm = TRUE)
       } else {
         residuals <- NULL
         warning("Residuals do not match 'x' in \"", ylab, 
@@ -548,7 +599,7 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
 
   if (overlay) {
     if (!length(which.cf))
-      which.cf <- 1:ncol(uy) # Added 7/8/04
+      which.cf <- 1:ncol(uy)  # Added 20040807
     if (!add.arg) {
       matplot(ux, uy[,which.cf], type = "n", 
               xlim = xlim, ylim = ylim, 
@@ -561,66 +612,66 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
         points(x, residuals, pch = pch, col = pcol, cex = pcex) 
       } else {
         matpoints(x, residuals[,which.cf],
-                  pch = pch, col = pcol, cex = pcex) # add.arg = TRUE,
+                  pch = pch, col = pcol, cex = pcex)  # add.arg = TRUE,
       }
     if (rugplot)
       rug(jx, col = rcol)
     if (se && !is.null(se.y)) {
-      matlines(ux, se.upper[,which.cf], lty =  slty, lwd = slwd, col = scol)
-      matlines(ux, se.lower[,which.cf], lty =  slty, lwd = slwd, col = scol)
+     matlines(ux, se.upper[, which.cf], lty =  slty, lwd = slwd, col = scol)
+     matlines(ux, se.lower[, which.cf], lty =  slty, lwd = slwd, col = scol)
     }
   } else {
     YLAB <- ylab 
 
-    pcex <- rep(pcex, len = ncol(uy))
-    pch  <- rep(pch , len = ncol(uy))
-    pcol <- rep(pcol, len = ncol(uy))
-    lcol <- rep(lcol, len = ncol(uy))
+    pcex <- rep(pcex,  len = ncol(uy))
+    pch  <- rep(pch ,  len = ncol(uy))
+    pcol <- rep(pcol,  len = ncol(uy))
+    lcol <- rep(lcol,  len = ncol(uy))
     llty <- rep(llty,  len = ncol(uy))
     llwd <- rep(llwd,  len = ncol(uy))
-    slty <- rep(slty, len = ncol(uy))
-    rcol <- rep(rcol, len = ncol(uy))
-    scol <- rep(scol, len = ncol(uy))
-    slwd <- rep(slwd, len = ncol(uy))
+    slty <- rep(slty,  len = ncol(uy))
+    rcol <- rep(rcol,  len = ncol(uy))
+    scol <- rep(scol,  len = ncol(uy))
+    slwd <- rep(slwd,  len = ncol(uy))
 
-    for(ii in 1:ncol(uy)) {
+    for (ii in 1:ncol(uy)) {
       if (!length(which.cf) ||
          (length(which.cf) && any(which.cf == ii))) {
 
-          if (is.Numeric(ylim0, allowable.length = 2)) {
-              ylim <- ylim0
+          if (is.Numeric(ylim0, length.arg = 2)) {
+            ylim <- ylim0
           } else {
-              ylim <- range(ylim0, uy[, ii], na.rm = TRUE)
-              if (se && !is.null(se.y))
-                  ylim <- range(ylim0, se.lower[, ii], se.upper[, ii],
-                                      na.rm = TRUE)
-              if (!is.null(residuals))
-                  ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
-              ylim <- ylim.scale(ylim, scale)
+            ylim <- range(ylim0, uy[, ii], na.rm = TRUE)
+            if (se && !is.null(se.y))
+              ylim <- range(ylim0, se.lower[, ii], se.upper[, ii],
+                            na.rm = TRUE)
+            if (!is.null(residuals))
+              ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
+            ylim <- ylim.scale(ylim, scale)
           }
           if (ncol(uy)>1 && length(separator))
-              YLAB <- paste(ylab, separator, ii, sep = "")  
+            YLAB <- paste(ylab, separator, ii, sep = "")  
             if (!add.arg) {
               if (one.at.a.time) {
                 readline("Hit return for the next plot ")
               }
-                plot(ux, uy[, ii], type = "n", 
-                     xlim = xlim, ylim = ylim, 
-                     xlab = xlab, ylab = YLAB, ...)
+              plot(ux, uy[, ii], type = "n", 
+                   xlim = xlim, ylim = ylim, 
+                   xlab = xlab, ylab = YLAB, ...)
             }
             lines(ux, uy[, ii], 
                   lwd = llwd[ii], col = lcol[ii], lty = llty[ii])
             if (!is.null(residuals))
-                points(x, residuals[, ii], pch = pch[ii],
-                       col = pcol[ii], cex = pcex[ii]) 
+              points(x, residuals[, ii], pch = pch[ii],
+                     col = pcol[ii], cex = pcex[ii]) 
             if (rugplot)
-                rug(jx, col = rcol[ii])
+              rug(jx, col = rcol[ii])
 
             if (se && !is.null(se.y)) {
-                lines(ux, se.upper[, ii], lty = slty[ii], lwd = slwd[ii],
-                      col = scol[ii])
-                lines(ux, se.lower[, ii], lty = slty[ii], lwd = slwd[ii],
-                      col = scol[ii])
+              lines(ux, se.upper[, ii], lty = slty[ii], lwd = slwd[ii],
+                    col = scol[ii])
+              lines(ux, se.lower[, ii], lty = slty[ii], lwd = slwd[ii],
+                    col = scol[ii])
             }
         }
     }
@@ -641,7 +692,7 @@ vplot.matrix <-
 add.hookey <- function(ch, deriv.arg = 0) {
 
   if (!is.Numeric(deriv.arg, integer.valued = TRUE,
-                  allowable.length = 1) ||
+                  length.arg = 1) ||
       deriv.arg < 0)
       stop("bad input for the 'deriv' argument")
 
@@ -665,7 +716,7 @@ vplot.factor <-
   function(x, y, se.y = NULL, xlab, ylab, 
            residuals = NULL, rugplot = FALSE, scale = 0, 
            se = FALSE, xlim = NULL, ylim = NULL, 
-           offset.arg = 0, deriv.arg = 0, overlay= FALSE, 
+           offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
            which.cf = NULL, ...) {
   if (deriv.arg > 0)
     return(NULL)
@@ -695,7 +746,7 @@ vplot.factor <-
                     rugplot = rugplot, scale = scale,
                     se = se, xlim = xlim, ylim = ylim, ...) 
   } else {
-    for(ii in 1:ncol(y)) {
+    for (ii in 1:ncol(y)) {
       ylab <- rep(ylab, len = ncol(y))
       if (ncol(y) > 1)
         ylab <- dimnames(y)[[2]]
@@ -727,13 +778,13 @@ vvplot.factor <-
   nn <- as.numeric(table(x))
   codex <- as.numeric(x)
   ucodex <- seq(nn)[nn > 0]
-  o <- match(ucodex, codex, 0)
+  ooo <- match(ucodex, codex, 0)
 
-  uy <- y[o,,drop = FALSE]
+  uy <- y[ooo, , drop = FALSE]
   ylim <- range(ylim, uy)
   xlim <- range(c(0, sum(nn), xlim))
   rightx <- cumsum(nn)
-  leftx <- c(0, rightx[ - length(nn)])
+  leftx <- c(0, rightx[ -length(nn)])
   ux <- (leftx + rightx)/2
   delta <- (rightx - leftx)/8
 
@@ -743,8 +794,8 @@ vvplot.factor <-
   if (rugplot)
     xlim <- range(c(xlim, nnajx))
   if (se && !is.null(se.y)) {
-    se.upper <- uy + 2 * se.y[o,, drop = FALSE]
-    se.lower <- uy - 2 * se.y[o,, drop = FALSE]
+    se.upper <- uy + 2 * se.y[ooo, , drop = FALSE]
+    se.lower <- uy - 2 * se.y[ooo, , drop = FALSE]
     ylim <- range(c(ylim, se.upper, se.lower))
   }
   if (!is.null(residuals)) {
@@ -774,7 +825,7 @@ vvplot.factor <-
     if (M == 1) return(cbind(ux))
     ans <- matrix(as.numeric(NA), length(ux), M)
     grid <- seq(-Delta, Delta, len = M)
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       ans[, ii] <- ux + grid[ii]
     }
     ans
@@ -783,18 +834,18 @@ vvplot.factor <-
   uxx <- about(ux, M, Delta = min(delta))
   xlim <- range(c(xlim, uxx))
 
-  matplot(ux, uy, ylim = ylim, xlim = xlim, xlab = "", type = "n", 
+  matplot(ux, uy, ylim = ylim, xlim = xlim, xlab = "", type = "n",
           ylab = ylab, axes = FALSE, frame.plot = TRUE, ...)
   mtext(xlab, 1, 2, adj = 0.5)
   axis(side = 2)
   lpos <- par("mar")[3]
   mtext(Levels, side = 3, line = lpos/2, at = ux, adj = 0.5, srt = 45)
 
-  for(ii in 1:M)
+  for (ii in 1:M)
     segments(uxx[, ii] - 1.0 * delta, uy[, ii],
              uxx[, ii] + 1.0 * delta, uy[, ii])
   if (!is.null(residuals)) {
-    for(ii in 1:M) {
+    for (ii in 1:M) {
       jux <- uxx[, ii]
       jux <- jux[codex]
       jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
@@ -805,20 +856,20 @@ vvplot.factor <-
   if (rugplot)
     rug(nnajx)
   if (se) {
-    for(ii in 1:M) {
-        segments(uxx[, ii]+0.5*delta, se.upper[, ii],
-                 uxx[, ii]-0.5*delta, se.upper[, ii])
-        segments(uxx[, ii]+0.5*delta, se.lower[, ii],
-                 uxx[, ii]-0.5*delta, se.lower[, ii])
-        segments(uxx[, ii], se.lower[, ii],
-                 uxx[, ii], se.upper[, ii], lty = 2)
+    for (ii in 1:M) {
+      segments(uxx[, ii] + 0.5*delta, se.upper[, ii],
+               uxx[, ii] - 0.5*delta, se.upper[, ii])
+      segments(uxx[, ii] + 0.5*delta, se.lower[, ii],
+               uxx[, ii] - 0.5*delta, se.lower[, ii])
+      segments(uxx[, ii], se.lower[, ii],
+               uxx[, ii], se.upper[, ii], lty = 2)
     }
   }
   invisible(diff(ylim))
 }
 
 
-if(!isGeneric("vplot"))
+if (!isGeneric("vplot"))
   setGeneric("vplot", function(x, ...) standardGeneric("vplot"))
 
 
@@ -842,12 +893,13 @@ setMethod("plot", "vglm",
            function(x, y, ...) {
            if (!missing(y))
              stop("cannot process the 'y' argument")
-           invisible(plotvglm(x, y, ...))})
+           invisible(plotvglm(x = x, y = y, ...))})
 setMethod("plot", "vgam",
            function(x, y, ...) {
            if (!missing(y))
              stop("cannot process the 'y' argument")
-           invisible(plotvgam(x, ...))})
+           invisible(plotvgam(x = x, y = y, ...))})
+
 
 
 
@@ -874,9 +926,10 @@ plotqrrvglm <- function(object,
                   deviance = "Deviance", working = "Working")
 
   done <- 0
-  for(rr in 1:Rank)
-    for(ii in 1:M) {
-      plot(Coef.object at lv[, rr], res[, ii],
+  for (rr in 1:Rank)
+    for (ii in 1:M) {
+      plot(Coef.object at latvar[, rr],
+           res[, ii],
            xlab = paste(xlab, if (Rank == 1) "" else rr, sep = ""),
            ylab = my.ylab[ii],
            main = main, ...)
@@ -889,6 +942,7 @@ plotqrrvglm <- function(object,
   object
 }
 
+
 setMethod("plot", "qrrvglm", function(x, y, ...)
          invisible(plotqrrvglm(object = x, ...)))
 
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 7a5434e..329f123 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -6,13 +6,14 @@
 
 
 
-predict.vgam <- function(object, newdata = NULL,
-                         type = c("link", "response", "terms"),
-                         se.fit = FALSE, deriv.arg = 0, terms.arg = NULL,
-                         raw = FALSE,
-                         all = TRUE, offset = 0,
-                         untransform = FALSE,
-                         dispersion = NULL, ...) {
+predict.vgam <-
+  function(object, newdata = NULL,
+           type = c("link", "response", "terms"),
+           se.fit = FALSE, deriv.arg = 0, terms.arg = NULL,
+           raw = FALSE,
+           all = TRUE, offset = 0,
+           untransform = FALSE,
+           dispersion = NULL, ...) {
   newdata <- if (missing(newdata)) {
     NULL
   } else {
@@ -40,7 +41,7 @@ predict.vgam <- function(object, newdata = NULL,
      deriv.arg != round(deriv.arg) || length(deriv.arg) > 1)
     stop("bad input for the 'deriv' argument")
 
-  if (deriv.arg > 0 && type!="terms")
+  if (deriv.arg > 0 && type != "terms")
     stop("'deriv>0' can only be specified if 'type=\"terms\"'")
 
   if (deriv.arg != 0 && !(type != "response" && !se.fit))
@@ -52,7 +53,7 @@ predict.vgam <- function(object, newdata = NULL,
          "is assigned")
 
 
-  tt <- terms(object) # 11/8/03; object at terms$terms
+  tt <- terms(object)  # 20030811; object at terms$terms
 
   ttf <- attr(tt, "factors")
   tto <- attr(tt, "order")
@@ -98,12 +99,13 @@ predict.vgam <- function(object, newdata = NULL,
     }
 
     predictor <- predict.vlm(object,
-                     type = "terms",
-                     se.fit = se.fit,
-                     terms.arg = terms.arg,
-                     raw = raw,
-                     all = all, offset = offset, 
-                     dispersion = dispersion, ...) # deriv.arg = deriv.arg,
+                             type = "terms",
+                             se.fit = se.fit,
+                             terms.arg = terms.arg,
+                             raw = raw,
+                             all = all, offset = offset,
+                             dispersion = dispersion,
+                             ...)  # deriv.arg = deriv.arg,
 
     newdata <- model.matrixvlm(object, type = "lm")
 
@@ -114,12 +116,13 @@ predict.vgam <- function(object, newdata = NULL,
 
 
     predictor <- predict.vlm(object, newdata,
-                        type = temp.type,
-                        se.fit = se.fit,
-                        terms.arg = terms.arg,
-                        raw = raw,
-                        all = all, offset = offset, 
-                        dispersion = dispersion, ...) # deriv.arg = deriv.arg,
+                             type = temp.type,
+                             se.fit = se.fit,
+                             terms.arg = terms.arg,
+                             raw = raw,
+                             all = all, offset = offset, 
+                             dispersion = dispersion,
+                             ...)  # deriv.arg = deriv.arg,
   }
 
 
@@ -147,77 +150,74 @@ predict.vgam <- function(object, newdata = NULL,
 
 
 
-    if (is.null(tmp6 <- attr(if(se.fit) predictor$fitted.values else
+    if (is.null(tmp6 <- attr(if (se.fit) predictor$fitted.values else
                             predictor, "vterm.assign"))) {
 
-          Blist <- subconstraints(object at misc$orig.assign,
-                                  object at constraints)
-          ncolBlist <- unlist(lapply(Blist, ncol))
-          if (intercept)
-            ncolBlist <- ncolBlist[-1]
+      Blist <- subconstraints(object at misc$orig.assign,
+                              object at constraints)
+      ncolBlist <- unlist(lapply(Blist, ncol))
+      if (intercept)
+        ncolBlist <- ncolBlist[-1]
     
-          cs <- if (raw) cumsum(c(1, ncolBlist)) else
-                         cumsum(c(1, M + 0*ncolBlist))
-          tmp6 <- vector("list", length(ncolBlist))
-          for(ii in 1:length(tmp6))
-            tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
-          names(tmp6) <- names(ncolBlist)
+      cs <- if (raw) cumsum(c(1, ncolBlist)) else
+                     cumsum(c(1, M + 0 * ncolBlist))
+      tmp6 <- vector("list", length(ncolBlist))
+      for (ii in 1:length(tmp6))
+        tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
+      names(tmp6) <- names(ncolBlist)
     }
 
-    n.s.xargument <- names(s.xargument)   # e.g., c("s(x)", "s(x2)")
-      for(ii in n.s.xargument) {
+    n.s.xargument <- names(s.xargument)  # e.g., c("s(x)", "s(x2)")
+      for (ii in n.s.xargument) {
 
         fred <- s.xargument[ii]
         if (!any(dimnames(newdata)[[2]] == fred))
           fred <- ii
 
-            xx <- newdata[,fred] # [,s.xargument[ii]]   # [,nindex[ii]]   
-            ox <- order(xx)
+        xx <- newdata[, fred]  # [, s.xargument[ii]]  # [, nindex[ii]]
 
-            rawMat <- predictvsmooth.spline.fit(
-                                 object at Bspline[[ii]],
-                                 x = xx,
-                                 deriv = deriv.arg)$y
+        rawMat <- predictvsmooth.spline.fit(object at Bspline[[ii]],
+                                            x = xx,
+                                            deriv = deriv.arg)$y
 
 
-            eta.mat <- if (raw) rawMat else (rawMat %*% t(Blist[[ii]]))
+        eta.mat <- if (raw) rawMat else (rawMat %*% t(Blist[[ii]]))
 
-            if (type == "terms") {
-                hhh <- tmp6[[ii]]
-                if (se.fit) {
-                    predictor$fitted.values[,hhh] <- 
-                    predictor$fitted.values[,hhh] + eta.mat
+        if (type == "terms") {
+          hhh <- tmp6[[ii]]
+          if (se.fit) {
+            predictor$fitted.values[, hhh] <- 
+            predictor$fitted.values[, hhh] + eta.mat
 
-                        TS <- predictor$sigma^2
+            TS <- predictor$sigma^2
 
-                        temp.var <- if (raw) {
-                                        tmp7 <- object at misc$varassign
-                                        tmp7 <- tmp7[[ii]]
-                                        object at var[, tmp7, drop = FALSE]
-                                    } else
-                                   stop("cannot handle se's with raw = FALSE")
+            temp.var <- if (raw) {
+                          tmp7 <- object at misc$varassign
+                          tmp7 <- tmp7[[ii]]
+                          object at var[, tmp7, drop = FALSE]
+                        } else {
+                          stop("cannot handle se's with raw = FALSE")
+                        }
 
-                        predictor$se.fit[,hhh] <- (predictor$se.fit[,hhh]^2 +
-                           TS * temp.var)^0.5
+                        predictor$se.fit[, hhh] <-
+                       (predictor$se.fit[, hhh]^2 + TS * temp.var)^0.5
                 } else {
-                    predictor[,hhh] <- predictor[,hhh] + eta.mat
+                  predictor[, hhh] <- predictor[, hhh] + eta.mat
                 }
         } else {
-            if (se.fit) {
-                predictor$fitted.values <- predictor$fitted.values + eta.mat 
-
-                TS <- 1  # out$residual.scale^2
-                TS <- predictor$sigma^2
+          if (se.fit) {
+            predictor$fitted.values <- predictor$fitted.values + eta.mat 
 
-                TT <- ncol(object at var)
-                predictor$se.fit <- sqrt(predictor$se.fit^2 + TS *
-                                         object at var %*% rep(1, TT))
+            TS <- 1  # out$residual.scale^2
+            TS <- predictor$sigma^2
 
-
-            } else {
-                predictor <- predictor + eta.mat 
-            }
+            TT <- ncol(object at var)
+            predictor$se.fit <- sqrt(predictor$se.fit^2 +
+                                     TS * object at var %*% rep(1, TT))
+          } else {
+            predictor <- predictor + eta.mat 
           }
+        }
       }
   }
 
@@ -229,11 +229,11 @@ predict.vgam <- function(object, newdata = NULL,
     }
   } else
   if (type == "response") {
-    fv <- object at family@linkinv(if(se.fit) predictor$fitted.values else
+    fv <- object at family@linkinv(if (se.fit) predictor$fitted.values else
                                 predictor, object at extra)
     if (is.matrix(fv) && is.matrix(object at fitted.values))
       dimnames(fv) <- list(dimnames(fv)[[1]],
-                             dimnames(object at fitted.values)[[2]])
+                           dimnames(object at fitted.values)[[2]])
     if (is.matrix(fv) && ncol(fv) == 1)
       fv <- c(fv)
     if (no.newdata && length(na.act)) {
@@ -244,68 +244,70 @@ predict.vgam <- function(object, newdata = NULL,
       }
     }
     if (se.fit) {
-      return(list(fit = fv, se.fit = fv*NA))
+      return(list(fit = fv, se.fit = fv * NA))
     } else {
       return(fv)
     }
   } else {
-    if (deriv.arg >= 1)
+    if (deriv.arg >= 1) {
       if (se.fit) {
         attr(predictor$fitted.values, "constant") <- NULL
       } else {
         attr(predictor, "constant") <- NULL
       }
+    }
+
 
     if (deriv.arg >= 1) {
-      v <- attr(if(se.fit) predictor$fitted.values else 
+      v <- attr(if (se.fit) predictor$fitted.values else 
           predictor, "vterm.assign")
       is.lin <- is.linear.term(names(v))
         coefmat <- coefvlm(object, matrix.out = TRUE)
         ord <- 0
-        for(ii in names(v)) {
+        for (ii in names(v)) {
           ord <- ord + 1
           index <- v[[ii]]
           lindex <- length(index)
           if (is.lin[ii]) {
-            if (tto[ord] > 1 || (length(ttf) && ttf[ii,ii])) {
-                  if (se.fit) {
-                    predictor$fitted.values[,index] <- 
-                        if (tto[ord]>1) NA else NA
-                  } else {
-                    predictor[,index] <- if (tto[ord]>1) NA else NA
-                  }
+            if (tto[ord] > 1 || (length(ttf) && ttf[ii, ii])) {
+              if (se.fit) {
+                predictor$fitted.values[, index] <-
+                  if (tto[ord] > 1) NA else NA
               } else {
-                ans <- coefmat[ii, 1:lindex]
+                predictor[, index] <- if (tto[ord] > 1) NA else NA
+              }
+            } else {
+              ans <- coefmat[ii, 1:lindex]
                 if (se.fit) {
-                  predictor$fitted.values[,index] =
+                  predictor$fitted.values[, index] <-
                       if (deriv.arg == 1)
-                      matrix(ans, ncol <- lindex, byrow = TRUE) else 0
+                      matrix(ans, ncol = lindex, byrow = TRUE) else 0
                 } else {
-                  predictor[,index] <- if (deriv.arg == 1)
-                      matrix(ans, ncol <- lindex, byrow = TRUE) else 0
+                  predictor[, index] <- if (deriv.arg == 1)
+                      matrix(ans, ncol = lindex, byrow = TRUE) else 0
                 }
             }
           } else
             if (length(s.xargument) && any(n.s.xargument == ii)) {
-                ans <- coefmat[ii, 1:lindex]
+              ans <- coefmat[ii, 1:lindex]
               if (se.fit) {
-                predictor$fitted.values[,index] =
-                predictor$fitted.values[,index] + 
-                     (if(deriv.arg == 1)
+                predictor$fitted.values[, index] <-
+                predictor$fitted.values[, index] + 
+                     (if (deriv.arg == 1)
                       matrix(ans, nrow = nrow(predictor$fitted.values),
-                       ncol = lindex, byrow = TRUE) else 0)
+                             ncol = lindex, byrow = TRUE) else 0)
               } else {
                 predictor[, index] <- predictor[, index] +
-                     (if(deriv.arg == 1)
+                     (if (deriv.arg == 1)
                       matrix(ans, nrow = nrow(predictor), 
                        ncol = lindex, byrow = TRUE) else 0)
               }
           } else {
               cat("Derivatives of term ", ii, "are unknown\n")
               if (se.fit) {
-                predictor$fitted.values[,index] <- NA
+                predictor$fitted.values[, index] <- NA
               } else {
-                predictor[,index] <- NA
+                predictor[, index] <- NA
               }
           }
         }
@@ -332,9 +334,9 @@ predict.vgam <- function(object, newdata = NULL,
 }
 
 
-  setMethod("predict", "vgam",
-            function(object, ...)
-            predict.vgam(object, ...))
+setMethod("predict", "vgam",
+          function(object, ...)
+          predict.vgam(object, ...))
 
 
 
@@ -349,7 +351,7 @@ varassign <- function(constraints, n.s.xargument) {
 
   names(ans) <- n.s.xargument
   ptr <- 1
-  for(ii in n.s.xargument) {
+  for (ii in n.s.xargument) {
     temp <- ncolBlist[[ii]]
     ans[[ii]] <- ptr:(ptr + temp - 1)
     ptr <- ptr + temp
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index a6fc6f5..895a6e0 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -58,7 +58,7 @@ predictvglm <- function(object,
                predict.vlm(object, newdata = newdata,
                            type = type, se.fit = se.fit,
                            deriv = deriv, dispersion = dispersion, ...) 
-             }) # End of switch
+             })  # End of switch
   } else {
     if (is.null(newdata)) {
       switch(type, 
@@ -78,39 +78,39 @@ predictvglm <- function(object,
 
 
 
-                   predictor <- predict.vlm(object, newdata = newdata,
-                                           type = type, se.fit = se.fit,
-                                           deriv = deriv, 
-                                           dispersion = dispersion, ...)
+               predictor <- predict.vlm(object, newdata = newdata,
+                                        type = type, se.fit = se.fit,
+                                        deriv = deriv, 
+                                        dispersion = dispersion, ...)
 
 
 
-                   M <- object at misc$M
+               M <- object at misc$M
 
-                   fv <- object at family@linkinv(predictor, extra)
-                   if (M > 1 && is.matrix(fv)) {
-                       dimnames(fv) <- list(dimnames(fv)[[1]],
+               fv <- object at family@linkinv(predictor, extra)
+               if (M > 1 && is.matrix(fv)) {
+                 dimnames(fv) <- list(dimnames(fv)[[1]],
                                       dimnames(object at fitted.values)[[2]])
-                   } else {
-                   }
-                   fv
+               } else {
+               }
+                 fv
                },
                link = {
 
 
 
-                   predict.vlm(object, newdata = newdata,
-                               type = "response", se.fit = se.fit,
-                               deriv = deriv, dispersion = dispersion, ...)
+                 predict.vlm(object, newdata = newdata,
+                             type = "response", se.fit = se.fit,
+                             deriv = deriv, dispersion = dispersion, ...)
 
 
 
                },
                terms = {
-                   predict.vlm(object, newdata = newdata,
-                               type = type, se.fit = se.fit,
-                               deriv = deriv, dispersion = dispersion, ...) 
-               }) # End of switch
+                 predict.vlm(object, newdata = newdata,
+                             type = type, se.fit = se.fit,
+                             deriv = deriv, dispersion = dispersion, ...) 
+               })  # End of switch
         }
   }
 
@@ -128,7 +128,7 @@ predictvglm <- function(object,
 
 
 setMethod("predict", "vglm", function(object, ...) 
-    predictvglm(object, ...))
+  predictvglm(object, ...))
 
 
 
@@ -144,40 +144,40 @@ predict.rrvglm <- function(object,
                           extra = object at extra, ...) {
 
   if (se.fit) {
-      stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet") 
-      pred <- 
-      switch(type,
-             response = {
-                warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
-                        "together; setting 'se.fit = FALSE'")
-                se.fit <- FALSE
-                  predictor <- predict.vlm(object, newdata = newdata,
-                                           type = type, se.fit = se.fit,
-                                           deriv = deriv, 
-                                           dispersion = dispersion, ...) 
-                fv <- object at family@linkinv(predictor, extra)
-                dimnames(fv) <- list(dimnames(fv)[[1]],
-                                     dimnames(object at fitted.values)[[2]])
-                fv
-             },
-             link = {
-                   type <- "response"
-                   predict.vlm(object, newdata = newdata,
-                               type = type, se.fit = se.fit,
-                               deriv = deriv, dispersion = dispersion, ...) 
-             },
-              terms = {
-                predict.vlm(object, newdata = newdata,
-                            type = type, se.fit = se.fit,
-                            deriv = deriv, dispersion = dispersion, ...) 
-              }
-            )
+    stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet") 
+    pred <- 
+    switch(type,
+           response = {
+             warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+                     "together; setting 'se.fit = FALSE'")
+             se.fit <- FALSE
+               predictor <- predict.vlm(object, newdata = newdata,
+                                        type = type, se.fit = se.fit,
+                                        deriv = deriv, 
+                                        dispersion = dispersion, ...) 
+             fv <- object at family@linkinv(predictor, extra)
+             dimnames(fv) <- list(dimnames(fv)[[1]],
+                                  dimnames(object at fitted.values)[[2]])
+             fv
+           },
+           link = {
+             type <- "response"
+             predict.vlm(object, newdata = newdata,
+                         type = type, se.fit = se.fit,
+                         deriv = deriv, dispersion = dispersion, ...) 
+           },
+           terms = {
+             predict.vlm(object, newdata = newdata,
+                         type = type, se.fit = se.fit,
+                         deriv = deriv, dispersion = dispersion, ...) 
+           }
+          )
   } else {
-        return(predictvglm(object, newdata = newdata,
-                            type = type, se.fit = se.fit,
-                            deriv = deriv, 
-                            dispersion = dispersion, ...))
-    }
+    return(predictvglm(object, newdata = newdata,
+                       type = type, se.fit = se.fit,
+                       deriv = deriv, 
+                       dispersion = dispersion, ...))
+  }
 
   na.act <- object at na.action
 
@@ -213,8 +213,8 @@ untransformVGAM <- function(object, pred) {
 
 
 
-  LINK <- object at misc$link # link.names # This should be a character vector.
-  EARG <- object at misc$earg # This could be a NULL
+  LINK <- object at misc$link  # link.names # This should be a character vector.
+  EARG <- object at misc$earg  # This could be a NULL
   if (is.null(EARG))
     EARG <- list(theta = NULL)
   if (!is.list(EARG))
@@ -251,8 +251,8 @@ untransformVGAM <- function(object, pred) {
 
 
 
-  for(ii in 1:M) {
-    TTheta <- pred[, ii] # Transformed theta
+  for (ii in 1:M) {
+    TTheta <- pred[, ii]  # Transformed theta
 
 
     use.earg      <-
@@ -261,8 +261,8 @@ untransformVGAM <- function(object, pred) {
       if (llink == 1) LINK else LINK[ii]
 
 
-      use.earg[["inverse"]] <- TRUE # New
-      use.earg[["theta"]] <- TTheta # New
+      use.earg[["inverse"]] <- TRUE  # New
+      use.earg[["theta"]] <- TTheta  # New
       Theta <- do.call(function.name, use.earg)
 
 
@@ -273,7 +273,8 @@ untransformVGAM <- function(object, pred) {
     upred[, ii] <- Theta
   }
 
-  dmn2 <- if (length(names(object at misc$link))) names(object at misc$link) else {
+  dmn2 <- if (length(names(object at misc$link)))
+    names(object at misc$link) else {
       if (length(object at misc$parameters)) object at misc$parameters else NULL
   }
   dimnames(upred) <- list(dimnames(upred)[[1]], dmn2)
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 0a541cc..62a7524 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -37,7 +37,7 @@ predict.vlm <- function(object,
     }
   }
 
-  ttob <- terms(object)  # 11/8/03; object at terms$terms
+  ttob <- terms(object)  # 20030811; object at terms$terms
 
 
   if (!length(newdata)) {
@@ -139,7 +139,6 @@ predict.vlm <- function(object,
 
 
 
-
     coefs <- coefvlm(object)
     vasgn <- attr(X_vlm, "vassign")
 
@@ -163,10 +162,10 @@ predict.vlm <- function(object,
 
     dname2 <- object at misc$predictors.names
     if (se.fit) {
-      object <- as(object, "vlm") # Coerce
+      object <- as(object, "vlm")  # Coerce
       fit.summary <- summaryvlm(object, dispersion=dispersion)
       sigma <- if (is.numeric(fit.summary at sigma)) fit.summary at sigma else
-               sqrt(deviance(object) / object at df.residual) # was @rss
+               sqrt(deviance(object) / object at df.residual)  # was @res.ss
       pred <- Build.terms.vlm(x = X_vlm, coefs = coefs,
                               cov = sigma^2 * fit.summary at cov.unscaled,
                               assign = vasgn,
@@ -203,12 +202,12 @@ predict.vlm <- function(object,
       ncolBlist <- ncolBlist[-1]
 
     cs <- cumsum(c(1, ncolBlist))  # Like a pointer
-    for(ii in 1:(length(cs)-1))
+    for (ii in 1:(length(cs)-1))
       if (cs[ii+1] - cs[ii] > 1)
-        for(kk in (cs[ii]+1):(cs[ii+1]-1))
+        for (kk in (cs[ii]+1):(cs[ii+1]-1))
           if (se.fit) {
-            pred$fitted.values[, cs[ii]]<- pred$fitted.values[, cs[ii]] +
-                                           pred$fitted.values[, kk]
+            pred$fitted.values[, cs[ii]] <- pred$fitted.values[, cs[ii]] +
+                                            pred$fitted.values[, kk]
             pred$se.fit[, cs[ii]] <- pred$se.fit[, cs[ii]] +
                                      pred$se.fit[, kk]
           } else {
@@ -216,8 +215,8 @@ predict.vlm <- function(object,
           }
 
         if (se.fit) {
-          pred$fitted.values <- pred$fitted.values[, cs[-length(cs)],
-                                                   drop = FALSE]
+          pred$fitted.values <-
+          pred$fitted.values[, cs[-length(cs)], drop = FALSE]
           pred$se.fit <- pred$se.fit[, cs[-length(cs)], drop = FALSE]
         } else {
           pred <- pred[, cs[-length(cs)], drop = FALSE]
@@ -231,7 +230,7 @@ predict.vlm <- function(object,
           pred$se.fit <- aperm(pred$se.fit, c(2, 1, 3))
           dim(pred$fitted.values) <- dim(pred$se.fit) <- c(nn, M*pp)
         } else {
-          dimnames(pred) <- NULL # Saves a warning
+          dimnames(pred) <- NULL  # Saves a warning
           dim(pred) <- c(M, nn, pp)
           pred <- aperm(pred, c(2, 1, 3))
           dim(pred) <- c(nn, M*pp)
@@ -239,7 +238,7 @@ predict.vlm <- function(object,
 
       if (raw) {
         kindex <- NULL
-        for(ii in 1:pp) 
+        for (ii in 1:pp) 
           kindex <- c(kindex, (ii-1)*M + (1:ncolBlist[ii]))
         if (se.fit) {
           pred$fitted.values <- pred$fitted.values[, kindex, drop = FALSE]
@@ -253,10 +252,10 @@ predict.vlm <- function(object,
       dd <- vlabel(names(ncolBlist), temp, M)
       if (se.fit) {
         dimnames(pred$fitted.values) <- 
-        dimnames(pred$se.fit) <- list(if(length(newdata))
+        dimnames(pred$se.fit) <- list(if (length(newdata))
                                       dimnames(newdata)[[1]] else dx1, dd)
       } else {
-        dimnames(pred) <- list(if(length(newdata))
+        dimnames(pred) <- list(if (length(newdata))
                                dimnames(newdata)[[1]] else dx1, dd)
       }
 
@@ -270,9 +269,9 @@ predict.vlm <- function(object,
       }
 
     if (!raw)
-      cs <- cumsum(c(1, M + 0*ncolBlist))
+      cs <- cumsum(c(1, M + 0 * ncolBlist))
     fred <- vector("list", length(ncolBlist))
-    for(ii in 1:length(fred))
+    for (ii in 1:length(fred))
       fred[[ii]] <- cs[ii]:(cs[ii+1]-1)
     names(fred) <- names(ncolBlist)
     if (se.fit) {
@@ -281,7 +280,7 @@ predict.vlm <- function(object,
     } else {
       attr(pred,               "vterm.assign") <- fred
     }
-  } # End of if (type == "terms")
+  }  # End of if (type == "terms")
 
   if (!is.null(xbar)) {
     if (se.fit) {
@@ -310,7 +309,7 @@ setMethod("predict", "vlm",
 predict.vglm.se <- function(fit, ...) {
 
 
-  H_ss <- hatvalues(fit, type = "centralBlocks") # diag = FALSE
+  H_ss <- hatvalues(fit, type = "centralBlocks")  # diag = FALSE
 
   M <- npred(fit)
   nn <- nobs(fit, type = "lm")
@@ -336,9 +335,9 @@ predict.vglm.se <- function(fit, ...) {
     }
 
   var.boldeta_i <- mux5(H_ss, Utinv.array, M = M,
-                        matrix.arg = TRUE) # First M cols are SE^2
+                        matrix.arg = TRUE)  # First M cols are SE^2
 
-  sqrt(var.boldeta_i[, 1:M]) # SE(linear.predictor)
+  sqrt(var.boldeta_i[, 1:M])  # SE(linear.predictor)
 
 
 
@@ -359,7 +358,7 @@ subconstraints <- function(assign, constraints) {
   ans <- vector("list", length(assign))
   if (!length(assign) || !length(constraints))
     stop("assign and/or constraints is empty")
-  for(ii in 1:length(assign))
+  for (ii in 1:length(assign))
     ans[[ii]] <- constraints[[assign[[ii]][1]]]
   names(ans) <- names(assign)
   ans
@@ -369,7 +368,7 @@ subconstraints <- function(assign, constraints) {
 is.linear.term <- function(ch) {
   lchar <- length(ch)
   ans <- rep(FALSE, len = lchar)
-  for(ii in 1:lchar) {
+  for (ii in 1:lchar) {
     nc <- nchar(ch[ii])
     x <- substring(ch[ii], 1:nc, 1:nc)
     ans[ii] <- all(x != "(" & x != "+" & x != "-" &
@@ -381,9 +380,9 @@ is.linear.term <- function(ch) {
 
 
 canonical.Blist <- function(Blist) {
-  for(ii in 1:length(Blist)) {
+  for (ii in 1:length(Blist)) {
     temp <- Blist[[ii]] * 0
-    temp[cbind(1:ncol(temp),1:ncol(temp))] <- 1
+    temp[cbind(1:ncol(temp), 1:ncol(temp))] <- 1
     Blist[[ii]] <- temp
   }
   Blist
diff --git a/R/print.vglm.q b/R/print.vglm.q
index f42fb83..2fdb2e1 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -44,7 +44,7 @@ show.vglm <- function(object) {
 
   if (length(object at criterion)) {
     ncrit <- names(object at criterion)
-    for(ii in ncrit)
+    for (ii in ncrit)
       if (ii != "loglikelihood" &&
           ii != "deviance")
           cat(paste(ii, ":", sep = ""),
@@ -155,7 +155,7 @@ print.vglm <- function(x, ...) {
 
   if (length(x at criterion)) {
     ncrit <- names(x at criterion)
-    for(ii in ncrit)
+    for (ii in ncrit)
       if (ii != "loglikelihood" && ii != "deviance")
           cat(paste(ii, ":", sep = ""),
               format(x at criterion[[ii]]), "\n")
diff --git a/R/print.vlm.q b/R/print.vlm.q
index c65bd44..aa67391 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -34,9 +34,9 @@ show.vlm <- function(object) {
   if (length(deviance(object)) &&
       is.finite(deviance(object)))
     cat("Deviance:", format(deviance(object)), "\n")
-  if (length(object at rss) &&
-      is.finite(object at rss))
-    cat("Residual Sum of Squares:", format(object at rss), "\n")
+  if (length(object at res.ss) &&
+      is.finite(object at res.ss))
+    cat("Residual Sum of Squares:", format(object at res.ss), "\n")
 
   invisible(object)
 }
@@ -79,9 +79,9 @@ print.vlm <- function(x, ...) {
   if (length(deviance(x)) &&
       is.finite(deviance(x)))
     cat("Deviance:", format(deviance(x)), "\n")
-  if (length(x at rss) &&
-      is.finite(x at rss))
-    cat("Residual Sum of Squares:", format(x at rss), "\n")
+  if (length(x at res.ss) &&
+      is.finite(x at res.ss))
+    cat("Residual Sum of Squares:", format(x at res.ss), "\n")
 
   invisible(x)
 }
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index bfe7214..ca32a97 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -3,7 +3,8 @@
 # All rights reserved.
 
 
-qrrvglm.control = function(Rank = 1,
+
+qrrvglm.control <- function(Rank = 1,
           Bestof = if (length(Cinit)) 1 else 10,
           checkwz = TRUE,
           Cinit = NULL,
@@ -14,7 +15,8 @@ qrrvglm.control = function(Rank = 1,
           FastAlgorithm = TRUE,
           GradientFunction = TRUE,
           Hstep = 0.001,
-          isdlv = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
+          isd.latvar = rep(c(2, 1, rep(0.5, length = Rank)),
+                           length = Rank),
           iKvector = 0.1,
           iShape = 0.1,
           ITolerances = FALSE,
@@ -26,7 +28,7 @@ qrrvglm.control = function(Rank = 1,
           Norrr = NA,
           optim.maxit = 20,
           Parscale = if (ITolerances) 0.001 else 1.0,
-          SD.Cinit = 0.02,
+          sd.Cinit = 0.02,
           SmallNo = 5.0e-13,
           trace = TRUE,
           Use.Init.Poisson.QO = TRUE,
@@ -50,28 +52,28 @@ qrrvglm.control = function(Rank = 1,
       stop("bad input for 'iShape'")
     if (!is.Numeric(iKvector, positive = TRUE))
       stop("bad input for 'iKvector'")
-    if (!is.Numeric(isdlv, positive = TRUE))
-      stop("bad input for 'isdlv'")
-    if (any(isdlv < 0.2 |
-            isdlv > 10))
-        stop("isdlv values must lie between 0.2 and 10")
-    if (length(isdlv) > 1 && any(diff(isdlv) > 0))
-        stop("successive isdlv values must not increase")
+    if (!is.Numeric(isd.latvar, positive = TRUE))
+      stop("bad input for 'isd.latvar'")
+    if (any(isd.latvar < 0.2 |
+            isd.latvar > 10))
+        stop("isd.latvar values must lie between 0.2 and 10")
+    if (length(isd.latvar) > 1 && any(diff(isd.latvar) > 0))
+        stop("successive isd.latvar values must not increase")
     if (!is.Numeric(epsilon, positive = TRUE,
-                    allowable.length = 1)) 
+                    length.arg = 1)) 
         stop("bad input for 'epsilon'")
     if (!is.Numeric(Etamat.colmax, positive = TRUE,
-                    allowable.length = 1) ||
+                    length.arg = 1) ||
         Etamat.colmax < Rank)
         stop("bad input for 'Etamat.colmax'")
     if (!is.Numeric(Hstep, positive = TRUE, 
-                   allowable.length = 1)) 
+                   length.arg = 1)) 
         stop("bad input for 'Hstep'")
     if (!is.Numeric(maxitl, positive = TRUE,
-                    allowable.length = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE)) 
         stop("bad input for 'maxitl'")
     if (!is.Numeric(imethod, positive = TRUE,
-                    allowable.length = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE)) 
         stop("bad input for 'imethod'")
     if (!is.Numeric(Maxit.optim, integer.valued = TRUE, positive = TRUE))
         stop("Bad input for 'Maxit.optim'")
@@ -79,19 +81,19 @@ qrrvglm.control = function(Rank = 1,
         stop("bad input for 'MUXfactor'")
     if (any(MUXfactor < 1 | MUXfactor > 10))
         stop("MUXfactor values must lie between 1 and 10")
-    if (!is.Numeric(optim.maxit, allowable.length = 1,
+    if (!is.Numeric(optim.maxit, length.arg = 1,
                     integer.valued = TRUE, positive = TRUE))
         stop("Bad input for 'optim.maxit'")
     if (!is.Numeric(Rank, positive = TRUE,
-                    allowable.length = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE)) 
         stop("bad input for 'Rank'")
-    if (!is.Numeric(SD.Cinit, positive = TRUE,
-                    allowable.length = 1)) 
-        stop("bad input for 'SD.Cinit'")
+    if (!is.Numeric(sd.Cinit, positive = TRUE,
+                    length.arg = 1)) 
+        stop("bad input for 'sd.Cinit'")
     if (ITolerances && !EqualTolerances)
         stop("'EqualTolerances' must be TRUE if 'ITolerances' is TRUE")
     if (!is.Numeric(Bestof, positive = TRUE,
-                    allowable.length = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE)) 
         stop("bad input for 'Bestof'")
 
 
@@ -109,16 +111,16 @@ qrrvglm.control = function(Rank = 1,
         length(checkwz) != 1)
         stop("bad input for 'checkwz'")
     if (!is.Numeric(wzepsilon,
-                    allowable.length = 1, positive = TRUE))
+                    length.arg = 1, positive = TRUE))
         stop("bad input for 'wzepsilon'")
 
     ans <- list(
            Bestof = Bestof,
-           checkwz=checkwz,
+           checkwz = checkwz,
            Cinit = Cinit,
            Crow1positive=as.logical(rep(Crow1positive, len = Rank)),
-           ConstrainedQO = TRUE, # A constant, not a control parameter
-           Corner = FALSE, # Needed for valt.1iter()
+           ConstrainedQO = TRUE,  # A constant, not a control parameter
+           Corner = FALSE,  # Needed for valt.1iter()
            Dzero = NULL,
            epsilon = epsilon,
            EqualTolerances = EqualTolerances,
@@ -126,14 +128,14 @@ qrrvglm.control = function(Rank = 1,
            FastAlgorithm = FastAlgorithm,
            GradientFunction = GradientFunction,
            Hstep = Hstep,
-           isdlv = rep(isdlv, len = Rank),
+           isd.latvar = rep(isd.latvar, len = Rank),
            iKvector = as.numeric(iKvector),
            iShape = as.numeric(iShape),
            ITolerances = ITolerances,
            maxitl = maxitl,
            imethod = imethod,
            Maxit.optim = Maxit.optim,
-           min.criterion = TRUE, # needed for calibrate 
+           min.criterion = TRUE,  # needed for calibrate 
            MUXfactor = rep(MUXfactor, length = Rank),
            noRRR = noRRR,
            optim.maxit = optim.maxit,
@@ -142,13 +144,15 @@ qrrvglm.control = function(Rank = 1,
            Quadratic = TRUE,
            Rank = Rank,
            save.weight = FALSE,
-           SD.Cinit = SD.Cinit,
+           sd.Cinit = sd.Cinit,
            SmallNo = SmallNo,
-           szero = NULL,
-           Svd.arg = TRUE, Alpha = 0.5, Uncorrelated.lv = TRUE,
+           str0 = NULL,
+           Svd.arg = TRUE, Alpha = 0.5, Uncorrelated.latvar = TRUE,
            trace = trace,
            Use.Init.Poisson.QO = as.logical(Use.Init.Poisson.QO)[1],
            wzepsilon = wzepsilon)
     ans
 }
 
+
+
diff --git a/R/qtplot.q b/R/qtplot.q
index 0bcdc54..404641a 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -13,35 +13,33 @@
 
 
  
-qtplot.lms.bcn <- function(percentiles = c(25,50,75),
-                           eta = NULL, yoffset = 0)
-{
-
-    lp = length(percentiles)
-    answer <- matrix(as.numeric(NA), nrow(eta), lp,
-                     dimnames = list(dimnames(eta)[[1]],
-                     paste(as.character(percentiles), "%", sep = "")))
-    for(ii in 1:lp) {
-        answer[, ii] <- eta[, 2] * (1+eta[, 1] * eta[, 3] *
-                        qnorm(percentiles[ii]/100))^(1/eta[, 1])
-    }
-    answer 
+qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
+                           eta = NULL, yoffset = 0) {
+
+  lp <- length(percentiles)
+  answer <- matrix(as.numeric(NA), nrow(eta), lp,
+                   dimnames = list(dimnames(eta)[[1]],
+                   paste(as.character(percentiles), "%", sep = "")))
+  for (ii in 1:lp) {
+    answer[, ii] <- eta[, 2] * (1+eta[, 1] * eta[, 3] *
+                    qnorm(percentiles[ii]/100))^(1/eta[, 1])
+  }
+  answer 
 }
  
  
 qtplot.lms.bcg <- function(percentiles = c(25,50,75),
-                           eta = NULL, yoffset = 0)
-{
+                           eta = NULL, yoffset = 0) {
 
   cc <- percentiles
-  lp = length(percentiles)
+  lp <- length(percentiles)
   answer <- matrix(as.numeric(NA), nrow(eta), lp,
                    dimnames = list(dimnames(eta)[[1]],
                    paste(as.character(percentiles), "%", sep = "")))
   lambda <- eta[, 1]
   sigma <- eta[, 3]
   shape <- 1 / (lambda * sigma)^2
-  for(ii in 1:lp) {
+  for (ii in 1:lp) {
     ccc <- rep(cc[ii]/100, len=nrow(eta))
     ccc <- ifelse(lambda > 0, ccc, 1-ccc)
     answer[, ii] <- eta[, 2] *
@@ -53,18 +51,17 @@ qtplot.lms.bcg <- function(percentiles = c(25,50,75),
  
 qtplot.lms.yjn2 <- 
 qtplot.lms.yjn <- function(percentiles = c(25,50,75),
-                           eta = NULL, yoffset = 0)
-{
+                           eta = NULL, yoffset = 0) {
 
   cc <- percentiles
-  lp = length(percentiles)
+  lp <- length(percentiles)
   answer <- matrix(as.numeric(NA), nrow(eta), lp,
                    dimnames = list(dimnames(eta)[[1]],
                    paste(as.character(percentiles), "%", sep = "")))
   lambda <- eta[, 1]
   mu <- eta[, 2]
   sigma <- eta[, 3]  # Link function already taken care of above
-  for(ii in 1:lp) {
+  for (ii in 1:lp) {
     ccc <- mu + sigma * qnorm(cc[ii]/100)
     answer[, ii] <- yeo.johnson(ccc, lambda, inverse= TRUE) - yoffset
   }
@@ -74,193 +71,192 @@ qtplot.lms.yjn <- function(percentiles = c(25,50,75),
  
 qtplot.default <- function(object, ...) {
 
-    warning("no methods function. Returning the object")
-    invisible(object)
+  warning("no methods function. Returning the object")
+  invisible(object)
 }
 
 
 
 "qtplot.vglm" <- function(object, Attach= TRUE, ...) {
 
-    LL <- length(object at family@vfamily)
-    newcall = paste("qtplot.", object at family@vfamily[LL], 
-                    "(object, ...)", sep = "")
-    newcall = parse(text = newcall)[[1]]
+  LL <- length(object at family@vfamily)
+  newcall <- paste("qtplot.", object at family@vfamily[LL], 
+                   "(object, ...)", sep = "")
+  newcall <- parse(text = newcall)[[1]]
 
-    if (Attach) {
-        object at post$qtplot = eval(newcall)
-        invisible(object)
-    } else 
-        eval(newcall)
+  if (Attach) {
+    object at post$qtplot <- eval(newcall)
+    invisible(object)
+  } else 
+    eval(newcall)
 }
 
 
 qtplot.lmscreg <- function(object,
                            newdata = NULL,
-                           percentiles=object at misc$percentiles,
-                           plot.it= TRUE, ...) {
-
-    same <- length(percentiles) == length(object at misc$percentiles) &&
-            all(percentiles==object at misc$percentiles)
-
-    lp <- length(percentiles)
-    if (same) {
-        fitted.values <- if (!length(newdata))
-          object at fitted.values else {
-          predict(object, newdata = newdata, type = "response") 
-        }
-        fitted.values <- as.matrix(fitted.values)
-    } else {
-        if (!is.numeric(percentiles))
-            stop("'percentiles' must be specified")
+                           percentiles = object at misc$percentiles,
+                           show.plot = TRUE, ...) {
 
-        eta <- if (length(newdata))
-                 predict(object, newdata = newdata, type = "link") else
-                 object at predictors
+  same <- length(percentiles) == length(object at misc$percentiles) &&
+          all(percentiles == object at misc$percentiles)
 
+  lp <- length(percentiles)
+  if (same) {
+    fitted.values <- if (!length(newdata))
+      object at fitted.values else {
+      predict(object, newdata = newdata, type = "response") 
+    }
+    fitted.values <- as.matrix(fitted.values)
+  } else {
+    if (!is.numeric(percentiles))
+        stop("'percentiles' must be specified")
 
-        if (!length(double.check.earg <- object at misc$earg))
-          double.check.earg <- list(theta = NULL)
-        eta  <- eta2theta(eta, link = object at misc$link,
-                          earg = double.check.earg) # lambda, mu, sigma
+    eta <- if (length(newdata))
+             predict(object, newdata = newdata, type = "link") else
+             object at predictors
 
 
+    if (!length(double.check.earg <- object at misc$earg))
+      double.check.earg <- list(theta = NULL)
+      eta  <- eta2theta(eta, link = object at misc$link,
+                        earg = double.check.earg)  # lambda, mu, sigma
 
-        if (!is.logical(expectiles <- object at misc$expectiles)) {
-            expectiles <- FALSE
-        }
 
-        newcall = paste(if (expectiles) "explot." else "qtplot.",
-                        object at family@vfamily[1],
-                        "(percentiles = percentiles",
-                        ", eta = eta, yoffset=object at misc$yoffset)",
-                        sep = "")
-        newcall = parse(text = newcall)[[1]]
-        fitted.values = as.matrix( eval(newcall) )
-        dimnames(fitted.values) <-
-          list(dimnames(eta)[[1]],
-               paste(as.character(percentiles), "%", sep = ""))
-    }
 
-    if (plot.it) {
-        plotqtplot.lmscreg(fitted.values = fitted.values,
-                           object = object,
-                           newdata = newdata,
-                           lp = lp,
-                           percentiles = percentiles, ...)
+    if (!is.logical(expectiles <- object at misc$expectiles)) {
+      expectiles <- FALSE
     }
 
-    list(fitted.values = fitted.values, percentiles = percentiles)
+    newcall <- paste(if (expectiles) "explot." else "qtplot.",
+                    object at family@vfamily[1],
+                    "(percentiles = percentiles",
+                    ", eta = eta, yoffset=object at misc$yoffset)",
+                    sep = "")
+    newcall <- parse(text = newcall)[[1]]
+    fitted.values <- as.matrix( eval(newcall) )
+    dimnames(fitted.values) <-
+      list(dimnames(eta)[[1]],
+           paste(as.character(percentiles), "%", sep = ""))
+  }
+
+  if (show.plot) {
+    plotqtplot.lmscreg(fitted.values = fitted.values,
+                       object = object,
+                       newdata = newdata,
+                       lp = lp,
+                       percentiles = percentiles, ...)
+  }
+
+  list(fitted.values = fitted.values, percentiles = percentiles)
 }
     
  
 
-plotqtplot.lmscreg <- function(fitted.values, object,
-                          newdata = NULL,
-                          percentiles = object at misc$percentiles, 
-                          lp = NULL,
-                          add.arg = FALSE,
-                          y = if (length(newdata)) FALSE else TRUE,
-                          spline.fit = FALSE,
-                          label = TRUE,
-                          size.label = 0.06,
-                          xlab = NULL, ylab = "",
-                          pch = par()$pch, pcex = par()$cex,
-                          pcol.arg = par()$col,
-                          xlim = NULL, ylim = NULL,
-                          llty.arg = par()$lty,
-                          lcol.arg = par()$col, llwd.arg = par()$lwd,
-                          tcol.arg = par()$col, 
-                          tadj = 1, ...)
-{
-
-
-
-    if (!length(newdata)) {
-        X <- model.matrixvlm(object, type = "lm")
-        if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
-           dimnames(X)[[2]][1] == "(Intercept)")
-        {
-            xx <- X[, 2]
-            if (is.null(xlab)) {
-                xlab <- if (object at misc$nonparametric)
-                        as.vector(slot(object, "s.xargument")) else
-                        names(object at assign)[2]
-                }
-
-            if (!add.arg) {
-                if (!is.numeric(xlim))
-                    xlim <- if (label)
-                        c(min(xx), max(xx)+size.label*diff(range(xx))) else
-                        c(min(xx), max(xx))
-                fred <- cbind(object at y, fitted.values)
-                if (!is.numeric(ylim))
-                    ylim <- c(min(fred), max(fred))
-                matplot(x=xx, y=fred,
-                        xlab = xlab, ylab = ylab, type = "n", 
-                        xlim=xlim, ylim=ylim, ...)
-            }
-
-            if (y && length(object at y))
-                matpoints(x=xx, y=object at y, pch = pch, cex = pcex,
-                          col = pcol.arg)
-        } else {
-                    warning(paste("there is not a single covariate.",
-                                  "Returning the object."))
-                    return(fitted.values) 
-        }
+plotqtplot.lmscreg <-
+  function(fitted.values, object,
+           newdata = NULL,
+           percentiles = object at misc$percentiles, 
+           lp = NULL,
+           add.arg = FALSE,
+           y = if (length(newdata)) FALSE else TRUE,
+           spline.fit = FALSE,
+           label = TRUE,
+           size.label = 0.06,
+           xlab = NULL, ylab = "",
+           pch = par()$pch, pcex = par()$cex,
+           pcol.arg = par()$col,
+           xlim = NULL, ylim = NULL,
+           llty.arg = par()$lty,
+           lcol.arg = par()$col, llwd.arg = par()$lwd,
+           tcol.arg = par()$col, 
+           tadj = 1, ...) {
+
+
+
+  if (!length(newdata)) {
+    X <- model.matrixvlm(object, type = "lm")
+    if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
+       dimnames(X)[[2]][1] == "(Intercept)") {
+      xx <- X[, 2]
+      if (is.null(xlab)) {
+        xlab <- if (object at misc$nonparametric)
+                as.vector(slot(object, "s.xargument")) else
+                names(object at assign)[2]
+      }
+
+      if (!add.arg) {
+        if (!is.numeric(xlim))
+          xlim <- if (label)
+              c(min(xx), max(xx) + size.label*diff(range(xx))) else
+              c(min(xx), max(xx))
+        fred <- cbind(object at y, fitted.values)
+        if (!is.numeric(ylim))
+          ylim <- c(min(fred), max(fred))
+        matplot(x = xx, y = fred,
+                xlab = xlab, ylab = ylab, type = "n", 
+                xlim = xlim, ylim = ylim, ...)
+      }
+
+      if (y && length(object at y))
+        matpoints(x = xx, y = object at y, pch = pch, cex = pcex,
+                  col = pcol.arg)
     } else {
+      warning("there is not a single covariate. ",
+              "Returning the object.")
+      return(fitted.values) 
+    }
+  } else {
+
+    firstterm <- attr(terms(object), "term.labels")[1]
 
-        firstterm = attr(terms(object), "term.labels")[1]
-
-        if (object at misc$nonparametric &&
-           length(object at s.xargument[firstterm]))
-            firstterm <-  object at s.xargument[firstterm]
-
-        xx <- newdata[[firstterm]] 
-        if (!is.numeric(xx))
-            stop("couldn't extract the 'primary' variable from newdata")
-
-        if (!add.arg) {
-            if (is.null(xlab)) 
-                xlab <- firstterm 
-            if (!is.numeric(xlim))
-                xlim <- if (label)
-                    c(min(xx), max(xx)+size.label*diff(range(xx))) else
-                    c(min(xx), max(xx))
-            if (!is.numeric(ylim))
-                ylim <- c(min(fitted.values), max(fitted.values))
-            matplot(x=xx, y=fitted.values,
-                    xlab = xlab, ylab = ylab, type = "n", 
-                        xlim=xlim, ylim=ylim, col = pcol.arg)
-        }
-        if (y && length(object at y))
-            matpoints(x=xx, y=object at y, pch = pch, cex = pcex,
-                      col = pcol.arg)
+    if (object at misc$nonparametric &&
+       length(object at s.xargument[firstterm]))
+      firstterm <-  object at s.xargument[firstterm]
 
+    xx <- newdata[[firstterm]] 
+    if (!is.numeric(xx))
+      stop("couldn't extract the 'primary' variable from newdata")
+
+    if (!add.arg) {
+      if (is.null(xlab)) 
+        xlab <- firstterm 
+      if (!is.numeric(xlim))
+        xlim <- if (label)
+            c(min(xx), max(xx)+size.label*diff(range(xx))) else
+            c(min(xx), max(xx))
+      if (!is.numeric(ylim))
+        ylim <- c(min(fitted.values), max(fitted.values))
+      matplot(x = xx, y = fitted.values,
+              xlab = xlab, ylab = ylab, type = "n", 
+                  xlim = xlim, ylim = ylim, col = pcol.arg)
     }
+    if (y && length(object at y))
+      matpoints(x = xx, y = object at y, pch = pch, cex = pcex,
+                col = pcol.arg)
+
+  }
 
-    tcol.arg = rep(tcol.arg, length = lp)
-    lcol.arg = rep(lcol.arg, length = lp)
-    llwd.arg  = rep(llwd.arg,  length = lp)
-    llty.arg  = rep(llty.arg,  length = lp)
-    for(ii in 1:lp) {
-        temp <- cbind(xx, fitted.values[, ii])
-        temp <- temp[sort.list(temp[, 1]),]
-        index <- !duplicated(temp[, 1])
-        if (spline.fit) {
-          lines(spline(temp[index, 1], temp[index, 2]),
-                lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
-                lwd = llwd.arg[ii])
-        } else {
-            lines(temp[index, 1], temp[index, 2],
-                  lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
-                  lwd = llwd.arg[ii])
-        }
-        if (label)
-            text(par()$usr[2], temp[nrow(temp), 2],
-                 paste( percentiles[ii], "%", sep = ""),
-                 adj = tadj, col = tcol.arg[ii], err = -1)
+    tcol.arg <- rep(tcol.arg, length = lp)
+    lcol.arg <- rep(lcol.arg, length = lp)
+    llwd.arg <- rep(llwd.arg, length = lp)
+    llty.arg <- rep(llty.arg, length = lp)
+    for (ii in 1:lp) {
+      temp <- cbind(xx, fitted.values[, ii])
+      temp <- temp[sort.list(temp[, 1]), ]
+      index <- !duplicated(temp[, 1])
+      if (spline.fit) {
+        lines(spline(temp[index, 1], temp[index, 2]),
+              lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
+              lwd = llwd.arg[ii])
+      } else {
+        lines(temp[index, 1], temp[index, 2],
+              lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
+              lwd = llwd.arg[ii])
+      }
+      if (label)
+        text(par()$usr[2], temp[nrow(temp), 2],
+             paste( percentiles[ii], "%", sep = ""),
+             adj = tadj, col = tcol.arg[ii], err = -1)
     }
 
     invisible(fitted.values)
@@ -276,9 +272,9 @@ if (TRUE) {
   setMethod("qtplot", signature(object = "vglm"),
             function(object, ...) 
             invisible(qtplot.vglm(object, ...)))
-     setMethod("qtplot", signature(object = "vgam"),
-               function(object, ...) 
-               invisible(qtplot.vglm(object, ...)))
+  setMethod("qtplot", signature(object = "vgam"),
+            function(object, ...) 
+            invisible(qtplot.vglm(object, ...)))
 }
 
 
@@ -290,16 +286,16 @@ if (TRUE) {
 "qtplot.vextremes" <- function(object, ...) {
 
 
-    newcall = paste("qtplot.", object at family@vfamily[1],
-                    "(object = object, ... )", sep = "")
-    newcall = parse(text = newcall)[[1]]
-    eval(newcall)
+  newcall <- paste("qtplot.", object at family@vfamily[1],
+                  "(object = object, ... )", sep = "")
+  newcall <- parse(text = newcall)[[1]]
+  eval(newcall)
 }
     
  
 qtplot.egumbel <-
 qtplot.gumbel <-
-    function(object, plot.it = TRUE, y.arg = TRUE,
+    function(object, show.plot = TRUE, y.arg = TRUE,
              spline.fit = FALSE, label = TRUE,
              R = object at misc$R,
              percentiles = object at misc$percentiles,
@@ -309,89 +305,86 @@ qtplot.gumbel <-
              pch = par()$pch, pcol.arg = par()$col,
              llty.arg = par()$lty, lcol.arg = par()$col,
              llwd.arg = par()$lwd,
-             tcol.arg = par()$col, tadj = 1, ...)
-{
-    if (!is.logical(mpv) || length(mpv) != 1)
-      stop("bad input for 'mpv'")
-    if (!length(percentiles) ||
-       (!is.Numeric(percentiles, positive = TRUE) ||
-        max(percentiles) >= 100))
-      stop("bad input for 'percentiles'")
+             tcol.arg = par()$col, tadj = 1, ...) {
+  if (!is.logical(mpv) || length(mpv) != 1)
+    stop("bad input for 'mpv'")
+  if (!length(percentiles) ||
+     (!is.Numeric(percentiles, positive = TRUE) ||
+      max(percentiles) >= 100))
+    stop("bad input for 'percentiles'")
 
 
 
-    eta <- predict(object)
+  eta <- predict(object)
 
 
-    if (is.Numeric(R))
-        R <- rep(R, length=nrow(eta))
+  if (is.Numeric(R))
+    R <- rep(R, length=nrow(eta))
 
-    if (!is.Numeric(percentiles))
-        stop("the 'percentiles' argument needs to be assigned a value")
+  if (!is.Numeric(percentiles))
+    stop("the 'percentiles' argument needs to be assigned a value")
 
 
-    extra = object at extra
-    extra$mpv = mpv  # Overwrite if necessary
-    extra$R = R
-    extra$percentiles = percentiles
-    fitted.values = object at family@linkinv(eta = eta, extra = extra) 
+  extra <- object at extra
+  extra$mpv <- mpv  # Overwrite if necessary
+  extra$R <- R
+  extra$percentiles <- percentiles
+  fitted.values <- object at family@linkinv(eta = eta, extra = extra) 
 
-    answer = list(fitted.values = fitted.values,
-                  percentiles = percentiles)
+  answer <- list(fitted.values = fitted.values,
+                 percentiles = percentiles)
 
-    if (!plot.it)
-        return(answer)
+  if (!show.plot)
+    return(answer)
 
 
 
-    lp = length(percentiles)  # Does not include mpv
-    tcol.arg = rep(tcol.arg, length = lp+mpv)
-    lcol.arg = rep(lcol.arg, length = lp+mpv)
-    llwd.arg  = rep(llwd.arg,  length = lp+mpv)
-    llty.arg  = rep(llty.arg,  length = lp+mpv)
+  lp <- length(percentiles)  # Does not include mpv
+  tcol.arg <- rep(tcol.arg, length = lp+mpv)
+  lcol.arg <- rep(lcol.arg, length = lp+mpv)
+  llwd.arg <- rep(llwd.arg, length = lp+mpv)
+  llty.arg <- rep(llty.arg, length = lp+mpv)
 
-    X <- model.matrixvlm(object, type = "lm")
-    if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
-       dimnames(X)[[2]][1] == "(Intercept)")
-    {
-        xx <- X[, 2]
-        if (!length(xlab)) 
-            xlab <- if (object at misc$nonparametric &&
-                       length(object at s.xargument))
-                        object at s.xargument else names(object at assign)[2]
-
-        if (!add.arg)
-            matplot(x=xx, y=cbind(object at y, fitted.values), main = main,
-                    xlab = xlab, ylab = ylab, type = "n", ...)
-
-        if (y.arg) {
-               matpoints(x=xx, y=object at y, pch = pch, col = pcol.arg) 
-        }
-    } else {
-        warning("there is not a single covariate.")
-        return(answer)
+  X <- model.matrixvlm(object, type = "lm")
+  if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
+     dimnames(X)[[2]][1] == "(Intercept)") {
+    xx <- X[, 2]
+    if (!length(xlab)) 
+      xlab <- if (object at misc$nonparametric &&
+                 length(object at s.xargument))
+                  object at s.xargument else names(object at assign)[2]
+
+    if (!add.arg)
+      matplot(x = xx, y = cbind(object at y, fitted.values), main = main,
+              xlab = xlab, ylab = ylab, type = "n", ...)
+
+    if (y.arg) {
+       matpoints(x = xx, y = object at y, pch = pch, col = pcol.arg) 
     }
+  } else {
+    warning("there is not a single covariate.")
+    return(answer)
+  }
 
-    for(ii in 1:(lp+mpv))
-    {
-        temp <- cbind(xx, fitted.values[, ii])
-        temp <- temp[sort.list(temp[, 1]),]
-        index <- !duplicated(temp[, 1])
-        if (spline.fit) {
-            lines(spline(temp[index, 1], temp[index, 2]),
-                  lty = llty.arg[ii], col = lcol.arg[ii], lwd = llwd.arg[ii])
-        } else {
-            lines(temp[index, 1], temp[index, 2],
-                  lty = llty.arg[ii], col = lcol.arg[ii], lwd = llwd.arg[ii])
-        }
-        if (label) {
-            mylabel = (dimnames(answer$fitted)[[2]])[ii]
-            text(par()$usr[2], temp[nrow(temp), 2],
-                 mylabel, adj=tadj, col=tcol.arg[ii], err = -1)
-        }
+    for (ii in 1:(lp+mpv)) {
+      temp <- cbind(xx, fitted.values[, ii])
+      temp <- temp[sort.list(temp[, 1]), ]
+      index <- !duplicated(temp[, 1])
+      if (spline.fit) {
+        lines(spline(temp[index, 1], temp[index, 2]),
+              lty = llty.arg[ii], col = lcol.arg[ii], lwd = llwd.arg[ii])
+      } else {
+        lines(temp[index, 1], temp[index, 2],
+              lty = llty.arg[ii], col = lcol.arg[ii], lwd = llwd.arg[ii])
+      }
+      if (label) {
+        mylabel <- (dimnames(answer$fitted)[[2]])[ii]
+        text(par()$usr[2], temp[nrow(temp), 2],
+             mylabel, adj = tadj, col = tcol.arg[ii], err = -1)
     }
+  }
 
-    invisible(answer)
+  invisible(answer)
 }
 
 
@@ -401,16 +394,15 @@ qtplot.gumbel <-
 deplot.lms.bcn <- function(object,
                            newdata,
                            y.arg, 
-                           eta0)
-{
-    if (!any(object at family@vfamily == "lms.bcn")) 
-        warning("I think you've called the wrong function")
+                           eta0) {
+  if (!any(object at family@vfamily == "lms.bcn")) 
+    warning("I think you've called the wrong function")
 
-    Zvec <- ((y.arg/eta0[, 2])^(eta0[, 1]) -1) / (eta0[, 1] * eta0[, 3])
-    dZ.dy <- ((y.arg/eta0[, 2])^(eta0[, 1]-1)) / (eta0[, 2] * eta0[, 3])
-    yvec <- dnorm(Zvec) * abs(dZ.dy) 
+  Zvec <- ((y.arg/eta0[, 2])^(eta0[, 1]) -1) / (eta0[, 1] * eta0[, 3])
+  dZ.dy <- ((y.arg/eta0[, 2])^(eta0[, 1]-1)) / (eta0[, 2] * eta0[, 3])
+  yvec <- dnorm(Zvec) * abs(dZ.dy) 
 
-    list(newdata=newdata, y=y.arg, density=yvec)
+  list(newdata = newdata, y = y.arg, density = yvec)
 }
 
 
@@ -418,19 +410,18 @@ deplot.lms.bcn <- function(object,
 deplot.lms.bcg <- function(object,
                            newdata,
                            y.arg, 
-                           eta0)
-{
-    if (!any(object at family@vfamily == "lms.bcg")) 
-        warning("I think you've called the wrong function")
-
-    Zvec <- (y.arg/eta0[, 2])^(eta0[, 1])  # different from lms.bcn
-    dZ.dy <- ((y.arg/eta0[, 2])^(eta0[, 1]-1)) * eta0[, 1] / eta0[, 2]
-    lambda <- eta0[, 1]
-    sigma <- eta0[, 3]
-    shape <- 1 / (lambda * sigma)^2
-    yvec <- dgamma(Zvec, shape=shape, rate=shape) * abs(dZ.dy)
-
-    list(newdata=newdata, y=y.arg, density=yvec)
+                           eta0) {
+  if (!any(object at family@vfamily == "lms.bcg")) 
+    warning("I think you've called the wrong function")
+
+  Zvec <- (y.arg/eta0[, 2])^(eta0[, 1])  # different from lms.bcn
+  dZ.dy <- ((y.arg/eta0[, 2])^(eta0[, 1]-1)) * eta0[, 1] / eta0[, 2]
+  lambda <- eta0[, 1]
+  sigma <- eta0[, 3]
+  shape <- 1 / (lambda * sigma)^2
+  yvec <- dgamma(Zvec, shape = shape, rate = shape) * abs(dZ.dy)
+
+  list(newdata = newdata, y = y.arg, density = yvec)
 }
 
 
@@ -438,83 +429,83 @@ deplot.lms.yjn2 <-
 deplot.lms.yjn <- function(object,
                            newdata,
                            y.arg, 
-                           eta0)
-{
+                           eta0) {
 
-    if (!length(intersect(object at family@vfamily, c("lms.yjn","lms.yjn2"))))
-        warning("I think you've called the wrong function")
+  if (!length(intersect(object at family@vfamily, c("lms.yjn","lms.yjn2"))))
+    warning("I think you've called the wrong function")
 
-    lambda <- eta0[, 1]
-    Zvec <- (yeo.johnson(y.arg+object at misc$yoffset, lambda = eta0[, 1]) -
-                 eta0[, 2]) / eta0[, 3]
-    dZ.dy <- dyj.dy.yeojohnson(y.arg+object at misc$yoffset,
-                               lambda = eta0[, 1]) / eta0[, 3]
-    yvec <- dnorm(Zvec) * abs(dZ.dy) 
+  lambda <- eta0[, 1]
+  Zvec <- (yeo.johnson(y.arg+object at misc$yoffset, lambda = eta0[, 1]) -
+               eta0[, 2]) / eta0[, 3]
+  dZ.dy <- dyj.dy.yeojohnson(y.arg+object at misc$yoffset,
+                             lambda = eta0[, 1]) / eta0[, 3]
+  yvec <- dnorm(Zvec) * abs(dZ.dy) 
 
-    list(newdata=newdata, y=y.arg, density=yvec)
+  list(newdata = newdata, y = y.arg, density = yvec)
 }
 
  
 deplot.default <- function(object, ...) {
 
-    warning("no methods function. Returning the object")
-    invisible(object)
+  warning("no methods function. Returning the object")
+  invisible(object)
 }
 
 
 
 
 "deplot.vglm" <- function(object, Attach= TRUE, ...) {
-    LL <- length(object at family@vfamily)
-    newcall = paste("deplot.", object at family@vfamily[LL], 
-                    "(object, ...)", sep = "")
-    newcall = parse(text = newcall)[[1]]
-
-    if (Attach) {
-        object at post$deplot = eval(newcall)
-        invisible(object)
-    } else 
-        eval(newcall)
+  LL <- length(object at family@vfamily)
+  newcall <- paste("deplot.", object at family@vfamily[LL], 
+                   "(object, ...)", sep = "")
+  newcall <- parse(text = newcall)[[1]]
+
+  if (Attach) {
+    object at post$deplot <- eval(newcall)
+    invisible(object)
+  } else {
+    eval(newcall)
+  }
 }
 
 
 
 "deplot.lmscreg" <- function(object,
-                       newdata = NULL,
-                       x0,
-                       y.arg, plot.it= TRUE, ...) {
+                             newdata = NULL,
+                             x0,
+                             y.arg, show.plot = TRUE, ...) {
 
 
-    if (!length(newdata)) {
-        newdata <- data.frame(x0=x0)
-        var1name <- attr(terms(object), "term.labels")[1] 
-        names(newdata) <- var1name
+  if (!length(newdata)) {
+    newdata <- data.frame(x0=x0)
+    var1name <- attr(terms(object), "term.labels")[1] 
+    names(newdata) <- var1name
 
-        ii <- if (object at misc$nonparametric) 
-                slot(object, "s.xargument") else NULL
-        if (length(ii) && any(logic.vec <-
-          names(slot(object, "s.xargument")) == var1name))
-          names(newdata) <- ii[logic.vec] # should be the first one 
-    }
+    ii <- if (object at misc$nonparametric) 
+          slot(object, "s.xargument") else NULL
+    if (length(ii) && any(logic.vec <-
+        names(slot(object, "s.xargument")) == var1name))
+      names(newdata) <- ii[logic.vec]  # should be the first one
+  }
 
-    eta0 <- if (length(newdata)) predict(object, newdata) else
-                                 predict(object)
+  eta0 <- if (length(newdata)) predict(object, newdata) else
+                               predict(object)
 
-    if (!length(double.check.earg <- object at misc$earg))
-      double.check.earg <- list(theta = NULL)
-    eta0 <- eta2theta(eta0, link = object at misc$link,
-                      earg = double.check.earg) # lambda, mu, sigma
+  if (!length(double.check.earg <- object at misc$earg))
+    double.check.earg <- list(theta = NULL)
+  eta0 <- eta2theta(eta0, link = object at misc$link,
+                    earg = double.check.earg)  # lambda, mu, sigma
 
-    newcall = paste("deplot.", object at family@vfamily[1], 
-                    "(object, newdata, y.arg = y.arg, eta0 = eta0)",
-                    sep = "")
-    newcall = parse(text = newcall)[[1]]
-    answer = eval(newcall)
+  newcall <- paste("deplot.", object at family@vfamily[1], 
+                   "(object, newdata, y.arg = y.arg, eta0 = eta0)",
+                   sep = "")
+  newcall <- parse(text = newcall)[[1]]
+  answer <- eval(newcall)
 
-    if (plot.it) 
-        plotdeplot.lmscreg(answer, y.arg=y.arg, ...)
+  if (show.plot) 
+    plotdeplot.lmscreg(answer, y.arg=y.arg, ...)
 
-    invisible(answer) 
+  invisible(answer) 
 }
 
 
@@ -525,29 +516,28 @@ plotdeplot.lmscreg <- function(answer,
                            xlab = "", ylab = "density",
                            xlim = NULL, ylim = NULL,
                            llty.arg = par()$lty, col.arg = par()$col,
-                           llwd.arg = par()$lwd, ...)
-{
-
-    yvec <- answer$density
-    xx <- y.arg
-
-    if (!add.arg) {
-        if (!is.numeric(xlim))
-            xlim <- c(min(xx), max(xx))
-        if (!is.numeric(ylim))
-            ylim <- c(min(yvec), max(yvec))
-        matplot(x=xx, y=yvec,
-                xlab = xlab, ylab = ylab, type = "n", 
-                xlim=xlim, ylim=ylim, ...)
-    }
+                           llwd.arg = par()$lwd, ...) {
+
+  yvec <- answer$density
+  xx <- y.arg
+
+  if (!add.arg) {
+    if (!is.numeric(xlim))
+      xlim <- c(min(xx), max(xx))
+    if (!is.numeric(ylim))
+      ylim <- c(min(yvec), max(yvec))
+    matplot(x = xx, y = yvec,
+            xlab = xlab, ylab = ylab, type = "n", 
+            xlim = xlim, ylim = ylim, ...)
+  }
 
-    temp <- cbind(xx, yvec)
-    temp <- temp[sort.list(temp[, 1]),]
-    index <- !duplicated(temp[, 1])
-    lines(temp[index, 1], temp[index, 2],
-          lty = llty.arg, col = col.arg, err = -1, lwd = llwd.arg)
+  temp <- cbind(xx, yvec)
+  temp <- temp[sort.list(temp[, 1]), ]
+  index <- !duplicated(temp[, 1])
+  lines(temp[index, 1], temp[index, 2],
+        lty = llty.arg, col = col.arg, err = -1, lwd = llwd.arg)
 
-    invisible(answer)
+  invisible(answer)
 }
  
  
@@ -555,15 +545,15 @@ plotdeplot.lmscreg <- function(answer,
 
 if (TRUE) {
 
-    if (!isGeneric("deplot"))
-    setGeneric("deplot", function(object, ...) standardGeneric("deplot"))
+  if (!isGeneric("deplot"))
+  setGeneric("deplot", function(object, ...) standardGeneric("deplot"))
 
-    setMethod("deplot", signature(object = "vglm"),
-              function(object, ...) 
-              invisible(deplot.vglm(object, ...)))
-    setMethod("deplot", signature(object = "vgam"),
-              function(object, ...) 
-              invisible(deplot.vglm(object, ...)))
+  setMethod("deplot", signature(object = "vglm"),
+            function(object, ...) 
+            invisible(deplot.vglm(object, ...)))
+  setMethod("deplot", signature(object = "vgam"),
+            function(object, ...) 
+            invisible(deplot.vglm(object, ...)))
 }
 
 
@@ -571,30 +561,31 @@ if (TRUE) {
 
 if (TRUE) {
 
-    if (!isGeneric("cdf"))
+  if (!isGeneric("cdf"))
     setGeneric("cdf", function(object, ...) standardGeneric("cdf"))
 
-    setMethod("cdf", signature(object = "vglm"),
-              function(object, ...) 
-              cdf.vglm(object, ...))
+  setMethod("cdf", signature(object = "vglm"),
+            function(object, ...) 
+            cdf.vglm(object, ...))
 
-    setMethod("cdf", signature(object = "vgam"),
-              function(object, ...) 
-              cdf.vglm(object, ...))
+  setMethod("cdf", signature(object = "vgam"),
+            function(object, ...) 
+            cdf.vglm(object, ...))
 }
 
 
-"cdf.vglm" <- function(object, newdata = NULL, Attach= FALSE, ...) {
-    LL <- length(object at family@vfamily)
-    newcall = paste("cdf.", object at family@vfamily[LL], 
-                    "(object, newdata, ...)", sep = "")
-    newcall = parse(text = newcall)[[1]]
+"cdf.vglm" <- function(object, newdata = NULL, Attach = FALSE, ...) {
+  LL <- length(object at family@vfamily)
+  newcall <- paste("cdf.", object at family@vfamily[LL], 
+                  "(object, newdata, ...)", sep = "")
+  newcall <- parse(text = newcall)[[1]]
 
-    if (Attach) {
-        object at post$cdf = eval(newcall)
-        object
-    } else 
-        eval(newcall)
+  if (Attach) {
+    object at post$cdf <- eval(newcall)
+    object
+  } else {
+    eval(newcall)
+  }
 }
 
 
@@ -607,66 +598,63 @@ if (TRUE) {
   if (!length(newdata))
     return(object at post$cdf)
 
-  eta0 = if (length(newdata)) predict(object, newdata) else predict(object)
+  eta0 <- if (length(newdata)) predict(object, newdata) else predict(object)
 
   if (!length(double.check.earg <- object at misc$earg))
     double.check.earg <- list(theta = NULL)
   eta0 <- eta2theta(eta0, link = object at misc$link,
-                    earg = double.check.earg) # lambda, mu, sigma
+                    earg = double.check.earg)  # lambda, mu, sigma
 
-  y = vgety(object, newdata)   # Includes yoffset 
+  y <- vgety(object, newdata)   # Includes yoffset 
 
-  newcall = paste("cdf.", object at family@vfamily[1], 
-                  "(y, eta0, ... )", sep = "")
-  newcall = parse(text = newcall)[[1]]
+  newcall <- paste("cdf.", object at family@vfamily[1], 
+                   "(y, eta0, ... )", sep = "")
+  newcall <- parse(text = newcall)[[1]]
   eval(newcall)
 }
 
 
 
-cdf.lms.bcn <- function(y, eta0)
-{
+cdf.lms.bcn <- function(y, eta0) {
     Zvec <- ((y/eta0[, 2])^(eta0[, 1]) -1) / (eta0[, 1] * eta0[, 3])
-    Zvec[abs(eta0[, 3]) < 1e-5] = log(y/eta0[, 2]) / eta0[, 3] # Singularity at 0
-    ans = c(pnorm(Zvec))
-    names(ans) = dimnames(eta0)[[1]]
+    Zvec[abs(eta0[, 3]) < 1e-5] <- log(y/eta0[, 2]) / eta0[, 3]
+    ans <- c(pnorm(Zvec))
+    names(ans) <- dimnames(eta0)[[1]]
     ans
 }
 
 
-cdf.lms.bcg <- function(y, eta0)
-{
-    shape = 1 / (eta0[, 1] * eta0[, 3])^2
-    Gvec = shape * (y/eta0[, 2])^(eta0[, 1])
-    ans = c(pgamma(Gvec, shape = shape))
-    ans[eta0[, 1] < 0] = 1-ans
-    names(ans) = dimnames(eta0)[[1]]
+cdf.lms.bcg <- function(y, eta0) {
+    shape <- 1 / (eta0[, 1] * eta0[, 3])^2
+    Gvec <- shape * (y/eta0[, 2])^(eta0[, 1])
+    ans <- c(pgamma(Gvec, shape = shape))
+    ans[eta0[, 1] < 0] <- 1-ans
+    names(ans) <- dimnames(eta0)[[1]]
     ans
 }
 
 
-cdf.lms.yjn <- function(y, eta0)
-{
+cdf.lms.yjn <- function(y, eta0) {
 
 
-    Zvec = (yeo.johnson(y, eta0[, 1]) - eta0[, 2])/eta0[, 3]
-    ans = c(pnorm(Zvec))
-    names(ans) = dimnames(eta0)[[1]]
-    ans
+  Zvec <- (yeo.johnson(y, eta0[, 1]) - eta0[, 2])/eta0[, 3]
+  ans <- c(pnorm(Zvec))
+  names(ans) <- dimnames(eta0)[[1]]
+  ans
 }
 
 
-vgety = function(object, newdata = NULL) {
+vgety <- function(object, newdata = NULL) {
 
-    y = if (length(newdata)) {
-        yname = dimnames(attr(terms(object at terms),"factors"))[[1]][1]
-        newdata[[yname]]
-    } else {
-        object at y
-    }
-    if (length(object at misc$yoffset))
-        y = y + object at misc$yoffset
-    y
+  y <- if (length(newdata)) {
+    yname <- dimnames(attr(terms(object at terms),"factors"))[[1]][1]
+    newdata[[yname]]
+  } else {
+    object at y
+  }
+  if (length(object at misc$yoffset))
+    y <- y + object at misc$yoffset
+  y
 }
 
 
@@ -676,16 +664,17 @@ vgety = function(object, newdata = NULL) {
 
 "rlplot.vglm" <- function(object, Attach = TRUE, ...) {
 
-    LL <- length(object at family@vfamily)
-    newcall = paste("rlplot.", object at family@vfamily[LL],
-                    "(object, ...)", sep = "")
-    newcall = parse(text = newcall)[[1]]
+  LL <- length(object at family@vfamily)
+  newcall <- paste("rlplot.", object at family@vfamily[LL],
+                   "(object, ...)", sep = "")
+  newcall <- parse(text = newcall)[[1]]
 
-    if (Attach) {
-        object at post$rlplot = eval(newcall)
-        invisible(object)
-    } else
-        eval(newcall)
+  if (Attach) {
+    object at post$rlplot <- eval(newcall)
+    invisible(object)
+  } else {
+    eval(newcall)
+  }
 }
 
 
@@ -695,135 +684,136 @@ vgety = function(object, newdata = NULL) {
 "rlplot.vextremes" <- function(object, ...) {
 
 
-    newcall = paste("rlplot.", object at family@vfamily[1],
-                    "(object = object, ... )", sep = "")
-    newcall = parse(text = newcall)[[1]]
-    eval(newcall)
+  newcall <- paste("rlplot.", object at family@vfamily[1],
+                   "(object = object, ... )", sep = "")
+  newcall <- parse(text = newcall)[[1]]
+  eval(newcall)
 }
     
     
  
 rlplot.egev <-
 rlplot.gev <-
-  function(object, plot.it = TRUE,
-    probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
-    add.arg = FALSE,
-    xlab = "Return Period",
-    ylab = "Return Level",
-    main = "Return Level Plot",
-    pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
-    llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
-    slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
-    ylim = NULL,
-    log = TRUE,
-    CI = TRUE,
-    epsilon = 1.0e-05,
-    ...)
-{
-    log.arg = log
-    rm(log)
-    if (!is.Numeric(epsilon, allowable.length = 1) ||
-        abs(epsilon) > 0.10)
-      stop("bad input for 'epsilon'")
-    if (!is.Numeric(probability, positive = TRUE) ||
-        max(probability) >= 1 ||
-        length(probability) < 5)
-      stop("bad input for 'probability'")
-
-    if (!is.logical(log.arg) || length(log.arg) != 1)
-        stop("bad input for argument 'log'")
-    if (!is.logical(CI) || length(CI) != 1)
-        stop("bad input for argument 'CI'")
-
-    if (!object at misc$intercept.only)
-       stop("object must be an intercept-only fit, ",
-            "i.e., y ~ 1 is the response")
-
-    extra2 = object at extra
-    extra2$percentiles = 100 * probability  # Overwrite
-    zp = object at family@linkinv(eta = predict(object)[1:2,],
-                               extra = extra2)[1,]
-    yp = -log(probability)
-    ydata = sort(object at y[, 1])
-    n = object at misc$n
-    if (log.arg) {
-        if (!add.arg)
-            plot(log(1/yp), zp, log = "", type = "n",
-                 ylim = if (length(ylim)) ylim else
-                      c(min(c(ydata, zp)), max(c(ydata, zp))),
-                 xlab = xlab, ylab = ylab, main = main, ...)
-        points(log(-1/log((1:n)/(n+1))), ydata, col = pcol.arg,
-               pch = pch, cex = pcex)
-        lines(log(1/yp), zp,
-              lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
-    } else {
-        if (!add.arg)
-            plot(1/yp, zp, log = "x", type = "n",
-                 ylim = if (length(ylim)) ylim else
-                      c(min(c(ydata, zp)), max(c(ydata, zp))),
-                 xlab = xlab, ylab = ylab, main = main, ...)
-        points(-1/log((1:n)/(n+1)), ydata, col = pcol.arg,
-               pch = pch, cex = pcex)
-        lines(1/yp, zp, lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
-    }
+  function(object, show.plot = TRUE,
+           probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
+           add.arg = FALSE,
+           xlab = "Return Period",
+           ylab = "Return Level",
+           main = "Return Level Plot",
+           pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
+           llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+           slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
+           ylim = NULL,
+           log = TRUE,
+           CI = TRUE,
+           epsilon = 1.0e-05,
+           ...) {
+
+  log.arg <- log
+  rm(log)
+  if (!is.Numeric(epsilon, length.arg = 1) ||
+      abs(epsilon) > 0.10)
+    stop("bad input for 'epsilon'")
+  if (!is.Numeric(probability, positive = TRUE) ||
+      max(probability) >= 1 ||
+      length(probability) < 5)
+    stop("bad input for 'probability'")
+
+  if (!is.logical(log.arg) || length(log.arg) != 1)
+    stop("bad input for argument 'log'")
+  if (!is.logical(CI) || length(CI) != 1)
+    stop("bad input for argument 'CI'")
+
+  if (!object at misc$intercept.only)
+    stop("object must be an intercept-only fit, ",
+         "i.e., y ~ 1 is the response")
+
+  extra2 <- object at extra
+  extra2$percentiles <- 100 * probability  # Overwrite
+  zp <- object at family@linkinv(eta = predict(object)[1:2, ],
+                              extra = extra2)[1, ]
+  yp <- -log(probability)
+  ydata <- sort(object at y[, 1])
+  n <- object at misc$n
+  if (log.arg) {
+    if (!add.arg)
+      plot(log(1/yp), zp, log = "", type = "n",
+           ylim = if (length(ylim)) ylim else
+                c(min(c(ydata, zp)), max(c(ydata, zp))),
+           xlab = xlab, ylab = ylab, main = main, ...)
+    points(log(-1/log((1:n)/(n+1))), ydata, col = pcol.arg,
+           pch = pch, cex = pcex)
+    lines(log(1/yp), zp,
+          lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
+  } else {
+    if (!add.arg)
+      plot(1/yp, zp, log = "x", type = "n",
+           ylim = if (length(ylim)) ylim else
+                  c(min(c(ydata, zp)),
+                    max(c(ydata, zp))),
+           xlab = xlab, ylab = ylab, main = main, ...)
+    points(-1/log((1:n)/(n+1)), ydata, col = pcol.arg,
+           pch = pch, cex = pcex)
+    lines(1/yp, zp, lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
+  }
 
-    if (CI) {
-        zpp = cbind(zp, zp, zp)  # lp x 3
-        eta = predict(object)
-        Links = object at misc$link
-        earg = object at misc$earg
-        M = object at misc$M
-        for(ii in 1:M) {
-            TTheta = eta[, ii]
-            use.earg = earg[[ii]]
-            newcall = paste(Links[ii],
-                      "(theta=TTheta, earg=use.earg, inverse = TRUE)",
-                       sep = "")
-            newcall = parse(text = newcall)[[1]]
-            uteta = eval(newcall) # Theta, the untransformed parameter
-            uteta = uteta + epsilon  # perturb it
-            newcall = paste(Links[ii],
-                            "(theta=uteta, earg=use.earg)", sep = "")
-            newcall = parse(text = newcall)[[1]]
-            teta = eval(newcall) # The transformed parameter
-            peta = eta
-            peta[, ii] = teta
-            zpp[, ii] = object at family@linkinv(eta = peta,
-                                              extra = extra2)[1,]
-            zpp[, ii] = (zpp[, ii] - zp) / epsilon # On the transformed scale
-        }
-        VCOV = vcovvlm(object, untransform = TRUE)
-        v = numeric(nrow(zpp))
-        for(ii in 1:nrow(zpp))
-            v[ii] = t(as.matrix(zpp[ii,])) %*% VCOV %*% as.matrix(zpp[ii,])
-        if (log.arg) {
-            lines(log(1/yp), zp - 1.96 * sqrt(v),
-                  lwd=slwd.arg, col=scol.arg, lty=slty.arg)
-            lines(log(1/yp), zp + 1.96 * sqrt(v),
-                  lwd=slwd.arg, col=scol.arg, lty=slty.arg)
-        } else {
-            lines(1/yp, zp - 1.96 * sqrt(v),
-                  lwd=slwd.arg, col=scol.arg, lty=slty.arg)
-            lines(1/yp, zp + 1.96 * sqrt(v),
-                  lwd=slwd.arg, col=scol.arg, lty=slty.arg)
-        }
+  if (CI) {
+    zpp <- cbind(zp, zp, zp)  # lp x 3
+    eta <- predict(object)
+    Links <- object at misc$link
+    earg <- object at misc$earg
+    M <- object at misc$M
+    for (ii in 1:M) {
+      TTheta <- eta[, ii]
+      use.earg <- earg[[ii]]
+      newcall <- paste(Links[ii],
+                "(theta = TTheta, earg = use.earg, inverse = TRUE)",
+                 sep = "")
+      newcall <- parse(text = newcall)[[1]]
+      uteta <- eval(newcall)  # Theta, the untransformed parameter
+      uteta <- uteta + epsilon  # Perturb it
+      newcall <- paste(Links[ii],
+                       "(theta = uteta, earg = use.earg)", sep = "")
+      newcall <- parse(text = newcall)[[1]]
+      teta <- eval(newcall)  # The transformed parameter
+      peta <- eta
+      peta[, ii] <- teta
+      zpp[, ii] <- object at family@linkinv(eta = peta,
+                                         extra = extra2)[1, ]
+      zpp[, ii] <- (zpp[, ii] - zp) / epsilon  # On the transformed scale
     }
-    answer = list(yp = yp,
-                  zp = zp)
-    if (CI) {
-        answer$lower = zp - 1.96 * sqrt(v)
-        answer$upper = zp + 1.96 * sqrt(v)
+    VCOV <- vcovvlm(object, untransform = TRUE)
+    vv <- numeric(nrow(zpp))
+    for (ii in 1:nrow(zpp))
+      vv[ii] <- t(as.matrix(zpp[ii, ])) %*% VCOV %*% as.matrix(zpp[ii, ])
+    if (log.arg) {
+      lines(log(1/yp), zp - 1.96 * sqrt(vv),
+            lwd = slwd.arg, col = scol.arg, lty = slty.arg)
+      lines(log(1/yp), zp + 1.96 * sqrt(vv),
+            lwd = slwd.arg, col = scol.arg, lty = slty.arg)
+    } else {
+      lines(1/yp, zp - 1.96 * sqrt(vv),
+            lwd = slwd.arg, col = scol.arg, lty = slty.arg)
+      lines(1/yp, zp + 1.96 * sqrt(vv),
+            lwd = slwd.arg, col = scol.arg, lty = slty.arg)
     }
-    invisible(answer)
+  }
+  answer <- list(yp = yp,
+                zp = zp)
+  if (CI) {
+    answer$lower <- zp - 1.96 * sqrt(vv)
+    answer$upper <- zp + 1.96 * sqrt(vv)
+  }
+  invisible(answer)
 }
 
 
 if (!isGeneric("rlplot"))
-    setGeneric("rlplot",
-               function(object, ...) standardGeneric("rlplot"))
+  setGeneric("rlplot",
+             function(object, ...) standardGeneric("rlplot"))
 
 setMethod("rlplot",  "vglm", function(object, ...)
-        rlplot.vglm(object, ...))
+          rlplot.vglm(object, ...))
 
 
 
@@ -831,19 +821,18 @@ setMethod("rlplot",  "vglm", function(object, ...)
 
 
 
-explot.lms.bcn <- function(percentiles = c(25,50,75),
-                           eta = NULL, yoffset = 0)
-{
+explot.lms.bcn <- function(percentiles = c(25, 50, 75),
+                           eta = NULL, yoffset = 0) {
 
-    lp = length(percentiles)
-    answer <- matrix(as.numeric(NA), nrow(eta), lp,
-                     dimnames = list(dimnames(eta)[[1]],
-                     paste(as.character(percentiles), "%", sep = "")))
-    for(ii in 1:lp) {
-        answer[, ii] <- eta[, 2] * (1+eta[, 1] * eta[, 3] *
-                        qenorm(percentiles[ii]/100))^(1/eta[, 1])
-    }
-    answer 
+  lp <- length(percentiles)
+  answer <- matrix(as.numeric(NA), nrow(eta), lp,
+                   dimnames = list(dimnames(eta)[[1]],
+                   paste(as.character(percentiles), "%", sep = "")))
+  for (ii in 1:lp) {
+    answer[, ii] <- eta[, 2] * (1 + eta[, 1] * eta[, 3] *
+                    qenorm(percentiles[ii]/100))^(1/eta[, 1])
+  }
+  answer 
 }
  
 
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 7fbf9d1..4efce8c 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -9,13 +9,16 @@
 
 
 
+
+
 residualsvlm  <-
   function(object,
            type = c("response", "deviance", "pearson", "working")) {
 
   if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
-  type <- match.arg(type, c("response", "deviance", "pearson", "working"))[1]
+  type <- match.arg(type,
+                    c("response", "deviance", "pearson", "working"))[1]
 
   na.act <- object at na.action
   object at na.action <- list()
@@ -26,40 +29,39 @@ residualsvlm  <-
 
   answer <- 
   switch(type,
-      working = if (pooled.weight) NULL else object at residuals,
-      pearson = {
-          if (pooled.weight) return(NULL)
-          n <- object at misc$n
-          M <- object at misc$M
-          wz <- weights(object, type = "w") # $weights
-          if (!length(wz))
-            wz <- if (M == 1) rep(1, n) else matrix(1, n, M)
-
-          if (M == 1) {
-            if (any(wz < 0))
-              warning(paste("some weights are negative.",
-                            "Their residual will be assigned NA"))
-            ans <- sqrt(c(wz)) * c(object at residuals)
-            names(ans) <- names(object at residuals)
-            ans 
-          } else {
-            wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
-            ans <- mux22(wz.sqrt, object at residuals,
-                           M = M, upper = FALSE)
-            dim(ans) <- c(M, n) 
-            ans <- t(ans) 
-            dimnames(ans) <- dimnames(object at residuals) # n x M
-            ans
-          }
-      },
-      deviance = {
+    working = if (pooled.weight) NULL else object at residuals,
+    pearson = {
+        if (pooled.weight) return(NULL)
+        n <- object at misc$n
         M <- object at misc$M
-        if (M > 1)
-          return(NULL)
-        ans <- residualsvlm(object, type = "pearson")
-        ans
-      },
-      response = object at residuals
+        wz <- weights(object, type = "w")  # $weights
+        if (!length(wz))
+          wz <- if (M == 1) rep(1, n) else matrix(1, n, M)
+
+        if (M == 1) {
+          if (any(wz < 0))
+            warning(paste("some weights are negative.",
+                          "Their residual will be assigned NA"))
+          ans <- sqrt(c(wz)) * c(object at residuals)
+          names(ans) <- names(object at residuals)
+          ans 
+        } else {
+          wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
+          ans <- mux22(wz.sqrt, object at residuals,
+                       M = M, upper = FALSE)
+          dim(ans) <- c(M, n) 
+          ans <- t(ans) 
+          dimnames(ans) <- dimnames(object at residuals)  # n x M
+          ans
+        }
+    },
+    deviance = {
+      M <- object at misc$M
+      if (M > 1)
+        return(NULL)
+      residualsvlm(object, type = "pearson")
+    },
+    response = object at residuals
   )
 
   if (length(answer) && length(na.act)) {
@@ -71,11 +73,11 @@ residualsvlm  <-
 
 
 
+
 residualsvglm  <-
   function(object,
            type = c("working", "pearson", "response", "deviance", "ldot"),
-           matrix.arg = TRUE)
-{
+           matrix.arg = TRUE) {
 
   if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
@@ -91,108 +93,110 @@ residualsvglm  <-
 
   answer <- 
   switch(type,
-      working = if (pooled.weight) NULL else object at residuals,
-      pearson = {
-          if (pooled.weight) return(NULL)
-
-          n <- object at misc$n
-          M <- object at misc$M
-          wz <- weights(object, type = "w")   # $weights
-
-          if (M == 1) {
-            if (any(wz < 0))
-              warning(paste("some weights are negative.",
-                            "Their residual will be assigned NA"))
-            ans <- sqrt(c(wz)) * c(object at residuals)
-            names(ans) <- names(object at residuals)
-            ans 
+    working = if (pooled.weight) NULL else object at residuals,
+    pearson = {
+      if (pooled.weight) return(NULL)
+
+      n <- object at misc$n
+      M <- object at misc$M
+      wz <- weights(object, type = "w")   # $weights
+
+      if (M == 1) {
+        if (any(wz < 0))
+          warning(paste("some weights are negative.",
+                        "Their residual will be assigned NA"))
+        ans <- sqrt(c(wz)) * c(object at residuals)
+        names(ans) <- names(object at residuals)
+        ans 
+      } else {
+        wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
+        ans <- mux22(wz.sqrt, object at residuals,
+                     M = M, upper = FALSE)
+        dim(ans) <- c(M, n)
+        ans <- t(ans) 
+        dimnames(ans) <- dimnames(object at residuals)   # n x M
+        ans
+      }
+    },
+    deviance = {
+      n <- object at misc$n
+
+      y <- as.matrix(object at y)
+      mu <- object at fitted.values
+
+
+      w <- object at prior.weights
+      if (!length(w))
+        w <- rep(1, n)
+      eta <- object at predictors
+
+      dev.fn <- object at family@deviance  # May not 'exist' for that model
+      if (length(body(dev.fn)) > 0) {
+        extra <- object at extra
+        ans <- dev.fn(mu = mu,y = y, w = w,
+                      residuals = TRUE, eta = eta, extra)
+        if (length(ans)) {
+          lob <- labels(object at residuals)
+          if (is.list(lob)) {
+            if (is.matrix(ans))
+              dimnames(ans) <- lob else
+              names(ans) <- lob[[1]]
           } else {
-            wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
-            ans <- mux22(wz.sqrt, object at residuals,
-                         M = M, upper = FALSE)
-            dim(ans) <- c(M, n)
-            ans <- t(ans) 
-            dimnames(ans) <- dimnames(object at residuals)   # n x M
-            ans
+            names(ans) <- lob
           }
-      },
-      deviance = {
-        n <- object at misc$n
+        }
+        ans
+      } else {
+        NULL 
+      }
+    },
+    ldot = {
+      n <- object at misc$n
+      y <- as.matrix(object at y)
+      mu <- object at fitted
+      w <- object at prior.weights
+      if (is.null(w))
+          w <- rep(1, n)
+      eta <- object at predictors
+      if (!is.null(ll.fn <- object at family@loglikelihood)) {
+        extra <- object at extra
+        ans <- ll.fn(mu = mu,y = y,w = w,
+                     residuals = TRUE, eta = eta, extra)
+        if (!is.null(ans)) {
+          ans <- c(ans)  # ldot residuals can only be a vector
+          names(ans) <- labels(object at residuals)
+        }
+        ans
+      } else {
+        NULL 
+      }
+    },
+    response = {
+      y <- object at y
 
-        y <- as.matrix(object at y)
-        mu <- object at fitted.values
+      mu <- fitted(object)
 
+      true.mu <- object at misc$true.mu
+      if (is.null(true.mu))
+        true.mu <- TRUE
 
-        w <- object at prior.weights
-        if (!length(w))
-          w <- rep(1, n)
-        eta <- object at predictors
-
-        dev.fn <- object at family@deviance # May not 'exist' for that model
-        if (length(body(dev.fn)) > 0) {
-          extra <- object at extra
-          ans <- dev.fn(mu = mu,y = y, w = w,
-                        residuals = TRUE, eta = eta, extra)
-          if (length(ans)) {
-            lob <- labels(object at residuals)
-            if (is.list(lob)) {
-              if (is.matrix(ans))
-                dimnames(ans) <- lob else
-                names(ans) <- lob[[1]]
-            } else {
-              names(ans) <- lob
-            }
-          }
+      ans <- if (true.mu) y - mu else NULL
+
+
+      if (!matrix.arg && length(ans)) {
+        if (ncol(ans) == 1) {
+          names.ans <- dimnames(ans)[[1]] 
+          ans <- c(ans) 
+          names(ans) <- names.ans
           ans
         } else {
-          NULL 
+          warning("ncol(ans) is not 1")
+          ans
         }
-      },
-      ldot = {
-          n <- object at misc$n
-          y <- as.matrix(object at y)
-          mu <- object at fitted
-          w <- object at prior.weights
-          if (is.null(w))
-              w <- rep(1, n)
-          eta <- object at predictors
-          if (!is.null(ll.fn <- object at family@loglikelihood)) {
-              extra <- object at extra
-              ans <- ll.fn(mu = mu,y = y,w = w,
-                           residuals = TRUE, eta = eta, extra)
-              if (!is.null(ans)) {
-                ans <- c(ans) # ldot residuals can only be a vector
-                names(ans) <- labels(object at residuals)
-              }
-              ans
-          } else NULL 
-      },
-      response = {
-          y <- object at y
-
-          mu <- fitted(object)
-
-          true.mu <- object at misc$true.mu
-          if (is.null(true.mu))
-            true.mu <- TRUE
-
-          ans <- if (true.mu) y - mu else NULL
-
-
-          if (!matrix.arg && length(ans)) {
-            if (ncol(ans) == 1) {
-              names.ans <- dimnames(ans)[[1]] 
-              ans <- c(ans) 
-              names(ans) <- names.ans
-              ans
-            } else {
-              warning("ncol(ans) is not 1")
-              ans
-            }
-          } else {
-            ans
-          }
-      })
+      } else {
+        ans
+      }
+    })
 
   if (length(answer) && length(na.act)) {
     napredict(na.act[[1]], answer)
@@ -207,8 +211,7 @@ residualsvglm  <-
 
 residualsqrrvglm  <- function(object,
                               type = c("response"),
-                              matrix.arg = TRUE)
-{
+                              matrix.arg = TRUE) {
 
 
   if (mode(type) != "character" && mode(type) != "name")
@@ -255,7 +258,9 @@ residualsqrrvglm  <- function(object,
           warning("ncol(ans) is not 1")
           ans
         }
-      } else ans
+      } else {
+        ans
+      }
   })
 
   if (length(answer) && length(na.act)) {
@@ -268,32 +273,31 @@ residualsqrrvglm  <- function(object,
 
 
 
-
-    setMethod("residuals",  "vlm",
-              function(object, ...) 
-              residualsvlm(object, ...))
-    setMethod("residuals",  "vglm",
-              function(object, ...) 
-              residualsvglm(object, ...))
-    setMethod("residuals",  "vgam",
-              function(object, ...) 
-              residualsvglm(object, ...))
-    setMethod("residuals",  "qrrvglm",
-              function(object, ...) 
-              residualsqrrvglm(object, ...))
-
-    setMethod("resid",  "vlm",
-              function(object, ...) 
-              residualsvlm(object, ...))
-    setMethod("resid",  "vglm",
-              function(object, ...) 
-              residualsvglm(object, ...))
-    setMethod("resid",  "vgam",
-              function(object, ...) 
-              residualsvglm(object, ...))
-    setMethod("resid",  "qrrvglm",
-              function(object, ...) 
-              residualsqrrvglm(object, ...))
+setMethod("residuals",  "vlm",
+          function(object, ...) 
+          residualsvlm(object, ...))
+setMethod("residuals",  "vglm",
+          function(object, ...) 
+          residualsvglm(object, ...))
+setMethod("residuals",  "vgam",
+          function(object, ...) 
+          residualsvglm(object, ...))
+setMethod("residuals",  "qrrvglm",
+          function(object, ...) 
+          residualsqrrvglm(object, ...))
+
+setMethod("resid",  "vlm",
+          function(object, ...) 
+          residualsvlm(object, ...))
+setMethod("resid",  "vglm",
+          function(object, ...) 
+          residualsvglm(object, ...))
+setMethod("resid",  "vgam",
+          function(object, ...) 
+          residualsvglm(object, ...))
+setMethod("resid",  "qrrvglm",
+          function(object, ...) 
+          residualsqrrvglm(object, ...))
 
 
 
diff --git a/R/rrvglm.R b/R/rrvglm.R
index cdc5ac5..2f220ca 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -19,8 +19,7 @@ rrvglm <- function(formula,
                  contrasts = NULL, 
                  constraints = NULL,
                  extra = NULL, 
-                 qr.arg = FALSE, smart = TRUE, ...)
-{
+                 qr.arg = FALSE, smart = TRUE, ...) {
     dataname <- as.character(substitute(data))  # "list" if no data=
     function.name <- "rrvglm"
 
@@ -34,8 +33,9 @@ rrvglm <- function(formula,
         data <- environment(formula)
 
     mf <- match.call(expand.dots = FALSE)
-    mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
-        mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+    mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
+    mf$control <- mf$contrasts <- mf$constraints <- mf$extra <-
+    mf$qr.arg <- NULL
     mf$coefstart <- mf$etastart <- mf$... <- NULL
     mf$smart <- NULL
     mf$drop.unused.levels <- TRUE 
@@ -90,47 +90,49 @@ rrvglm <- function(formula,
 
     rrvglm.fitter <- get(method)
 
-    fit <- rrvglm.fitter(x=x, y=y, w=w, offset=offset, 
-                       etastart=etastart, mustart=mustart, coefstart=coefstart,
-                       family=family, 
-                       control=control,
-                       constraints=constraints,
-                       criterion=control$criterion,
-                       extra=extra,
-                       qr.arg = qr.arg,
-                       Terms=mt, function.name=function.name, ...)
+    fit <- rrvglm.fitter(x = x, y = y, w = w, offset = offset,
+                        etastart = etastart, mustart = mustart,
+                        coefstart = coefstart,
+                        family = family, 
+                        control = control,
+                        constraints = constraints,
+                        criterion = control$criterion,
+                        extra = extra,
+                        qr.arg  =  qr.arg,
+                        Terms = mt, function.name = function.name, ...)
 
     if (control$Bestof > 1) {
-        deviance.Bestof = rep(fit$crit.list$deviance, len= control$Bestof)
-        for(tries in 2:control$Bestof) {
-             if (control$trace && (control$Bestof>1))
-             cat(paste("\n========================= Fitting model", tries,
-                         "=========================\n\n"))
-             it <- rrvglm.fitter(x=x, y=y, w=w, offset=offset, 
-                       etastart=etastart, mustart=mustart, coefstart=coefstart,
-                       family=family, 
-                       control=control,
-                       constraints=constraints,
-                       criterion=control$criterion,
-                       extra=extra,
-                       qr.arg = qr.arg,
-                       Terms=mt, function.name=function.name, ...)
-            deviance.Bestof[tries] = it$crit.list$deviance
-            if (min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
-                fit = it
-        }
-        fit$misc$deviance.Bestof = deviance.Bestof
+      deviance.Bestof <- rep(fit$crit.list$deviance, len= control$Bestof)
+      for (tries in 2:control$Bestof) {
+         if (control$trace && (control$Bestof>1))
+           cat(paste("\n========================= Fitting model", tries,
+                       "=========================\n\n"))
+         it <- rrvglm.fitter(x = x, y = y, w = w, offset = offset, 
+                   etastart = etastart, mustart = mustart,
+                   coefstart = coefstart,
+                   family = family, 
+                   control = control,
+                   constraints = constraints,
+                   criterion = control$criterion,
+                   extra = extra,
+                   qr.arg = qr.arg,
+                   Terms = mt, function.name = function.name, ...)
+        deviance.Bestof[tries] <- it$crit.list$deviance
+        if (min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+          fit <- it
+      }
+      fit$misc$deviance.Bestof = deviance.Bestof
     }
 
     fit$misc$dataname <- dataname
 
     if (smart) {
-        fit$smart.prediction <- get.smart.prediction()
-        wrapup.smart()
+      fit$smart.prediction <- get.smart.prediction()
+      wrapup.smart()
     }
 
     answer <-
-    new(if(control$Quadratic) "qrrvglm" else "rrvglm",
+    new(if (control$Quadratic) "qrrvglm" else "rrvglm",
       "assign"       = attr(x, "assign"),
       "call"         = ocall,
       "coefficients" = fit$coefficients,
@@ -146,9 +148,9 @@ rrvglm <- function(formula,
       "R"            = fit$R,
       "rank"         = fit$rank,
       "residuals"    = as.matrix(fit$residuals),
-      "rss"          = fit$rss,
+      "res.ss"       = fit$res.ss,
       "smart.prediction" = as.list(fit$smart.prediction),
-      "terms"        = list(terms=mt))
+      "terms"        = list(terms = mt))
 
     if (!smart) answer at smart.prediction <- list(smart.arg = FALSE)
 
@@ -180,7 +182,7 @@ rrvglm <- function(formula,
             warning("\"extra\" is not a list, therefore placing \"extra\" into a list")
             list(fit$extra)
         }
-    } else list() # R-1.5.0
+    } else list()  # R-1.5.0
 
     slot(answer, "iter") = fit$iter
     fit$predictors = as.matrix(fit$predictors)  # Must be a matrix 
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index 0696d4d..51fd403 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -5,29 +5,37 @@
 
 
 
-rrvglm.control <- function(Rank = 1,
-                          Algorithm = c("alternating", "derivative"),
-                          Corner = TRUE,
-                          Uncorrelated.lv = FALSE,
-                          Wmat = NULL,
-                          Svd.arg = FALSE,
-                          Index.corner = if (length(szero)) 
-                          head((1:1000)[-szero], Rank) else 1:Rank,
-                          Ainit = NULL,
-                          Alpha = 0.5, 
-                          Bestof = 1,
-                          Cinit = NULL,
-                          Etamat.colmax = 10,
-                          SD.Ainit = 0.02,
-                          SD.Cinit = 0.02,
-                          szero = NULL,
-                          noRRR = ~ 1, 
-                          Norrr = NA,
-                          trace = FALSE,
-                          Use.Init.Poisson.QO = FALSE,
-                          checkwz = TRUE,
-                          wzepsilon = .Machine$double.eps^0.75,
-                          ...) {
+
+
+rrvglm.control <-
+  function(Rank = 1,
+           Algorithm = c("alternating", "derivative"),
+           Corner = TRUE,
+           Uncorrelated.latvar = FALSE,
+           Wmat = NULL,
+           Svd.arg = FALSE,
+           Index.corner = if (length(str0)) 
+           head((1:1000)[-str0], Rank) else 1:Rank,
+           Ainit = NULL,
+           Alpha = 0.5, 
+           Bestof = 1,
+           Cinit = NULL,
+           Etamat.colmax = 10,
+           sd.Ainit = 0.02,
+           sd.Cinit = 0.02,
+           str0 = NULL,
+
+           noRRR = ~ 1, 
+           Norrr = NA,
+
+           noWarning = FALSE,
+ 
+           trace = FALSE,
+           Use.Init.Poisson.QO = FALSE,
+           checkwz = TRUE,
+           Check.rank = TRUE,
+           wzepsilon = .Machine$double.eps^0.75,
+           ...) {
 
 
 
@@ -35,62 +43,61 @@ rrvglm.control <- function(Rank = 1,
 
   if (length(Norrr) != 1 || !is.na(Norrr)) {
     warning("argument 'Norrr' has been replaced by 'noRRR'. ",
-            "Assigning the latter but using 'Norrr' will become an error in ",
-            "the next VGAM version soon.")
+            "Assigning the latter but using 'Norrr' will become an ",
+            "error in the next VGAM version soon.")
     noRRR <- Norrr
   }
 
 
   if (mode(Algorithm) != "character" && mode(Algorithm) != "name")
-      Algorithm <- as.character(substitute(Algorithm))
+    Algorithm <- as.character(substitute(Algorithm))
   Algorithm <- match.arg(Algorithm, c("alternating", "derivative"))[1]
 
-    if (Svd.arg)
-      Corner <- FALSE 
+  if (Svd.arg)
+    Corner <- FALSE 
 
   if (!is.Numeric(Rank, positive = TRUE,
-                  allowable.length = 1, integer.valued = TRUE))
+                  length.arg = 1, integer.valued = TRUE))
     stop("bad input for 'Rank'")
   if (!is.Numeric(Alpha, positive = TRUE,
-                  allowable.length = 1) || Alpha > 1)
+                  length.arg = 1) || Alpha > 1)
     stop("bad input for 'Alpha'")
   if (!is.Numeric(Bestof, positive = TRUE,
-                  allowable.length = 1, integer.valued = TRUE))
+                  length.arg = 1, integer.valued = TRUE))
     stop("bad input for 'Bestof'")
-  if (!is.Numeric(SD.Ainit, positive = TRUE,
-                  allowable.length = 1))
-    stop("bad input for 'SD.Ainit'")
-  if (!is.Numeric(SD.Cinit, positive = TRUE,
-                  allowable.length = 1))
-    stop("bad input for 'SD.Cinit'")
+  if (!is.Numeric(sd.Ainit, positive = TRUE,
+                  length.arg = 1))
+    stop("bad input for 'sd.Ainit'")
+  if (!is.Numeric(sd.Cinit, positive = TRUE,
+                  length.arg = 1))
+    stop("bad input for 'sd.Cinit'")
   if (!is.Numeric(Etamat.colmax, positive = TRUE,
-                  allowable.length = 1) ||
+                  length.arg = 1) ||
       Etamat.colmax < Rank)
     stop("bad input for 'Etamat.colmax'")
 
-  if (length(szero) &&
-     (any(round(szero) != szero) ||
-     any(szero < 1)))
-    stop("bad input for the argument 'szero'")
+  if (length(str0) &&
+     (any(round(str0) != str0) || any(str0 < 1)))
+    stop("bad input for the argument 'str0'")
 
 
   Quadratic <- FALSE
   if (!Quadratic && Algorithm == "derivative" && !Corner) {
-      dd <- "derivative algorithm only supports corner constraints"
-      if (length(Wmat) || Uncorrelated.lv || Svd.arg)
-        stop(dd)
-      warning(dd)
-      Corner <- TRUE
+    dd <- "derivative algorithm only supports corner constraints"
+    if (length(Wmat) || Uncorrelated.latvar || Svd.arg)
+      stop(dd)
+    warning(dd)
+    Corner <- TRUE
   }
   if (Quadratic && Algorithm != "derivative")
-      stop("Quadratic model can only be fitted using the derivative algorithm")
+   stop("Quadratic model can only be fitted using the derivative algorithm")
 
-  if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
+  if (Corner && (Svd.arg || Uncorrelated.latvar || length(Wmat)))
       stop("cannot have 'Corner = TRUE' and either 'Svd = TRUE' or ",
-           "'Uncorrelated.lv = TRUE' or Wmat")
+           "'Uncorrelated.latvar = TRUE' or Wmat")
 
-  if (Corner && length(intersect(szero, Index.corner)))
-    stop("cannot have 'szero' and 'Index.corner' having ",
+  if (Corner && length(intersect(str0, Index.corner)))
+    stop("cannot have arguments 'str0' and 'Index.corner' having ",
          "common values")
 
   if (length(Index.corner) != Rank)
@@ -100,15 +107,23 @@ rrvglm.control <- function(Rank = 1,
       length(checkwz) != 1)
     stop("bad input for 'checkwz'")
 
-  if (!is.Numeric(wzepsilon, allowable.length = 1,
+  if (!is.Numeric(wzepsilon, length.arg = 1,
                   positive = TRUE))
     stop("bad input for 'wzepsilon'")
 
   if (class(noRRR) != "formula" && !is.null(noRRR))
     stop("argument 'noRRR' should be a formula or a NULL")
 
+
   ans <-
-  c(vglm.control(trace = trace, ...),
+  c(vglm.control(
+                 trace = trace,
+                 checkwz = checkwz,
+                 Check.rank = Check.rank,
+                 wzepsilon = wzepsilon,
+                 noWarning = noWarning,
+                 ...),
+
     switch(Algorithm,
            "alternating" = valt.control(...),
            "derivative" = rrvglm.optim.control(...)),
@@ -120,28 +135,29 @@ rrvglm.control <- function(Rank = 1,
          Cinit = Cinit,
          Index.corner = Index.corner,
          noRRR = noRRR,
+
          Corner = Corner,
-         Uncorrelated.lv = Uncorrelated.lv,
+         Uncorrelated.latvar = Uncorrelated.latvar,
          Wmat = Wmat,
-         OptimizeWrtC = TRUE, # OptimizeWrtC,
-         Quadratic = FALSE, # A constant now, here.
-         SD.Ainit = SD.Ainit,
-         SD.Cinit = SD.Cinit,
+         OptimizeWrtC = TRUE,  # OptimizeWrtC,
+         Quadratic = FALSE,  # A constant now, here.
+         sd.Ainit = sd.Ainit,
+         sd.Cinit = sd.Cinit,
          Etamat.colmax = Etamat.colmax,
-         szero = szero,
+         str0 = str0,
          Svd.arg = Svd.arg,
          Use.Init.Poisson.QO = Use.Init.Poisson.QO),
-         checkwz = checkwz,
-         wzepsilon = wzepsilon,
     if (Quadratic) qrrvglm.control(Rank = Rank, ...) else NULL)
 
+
   if (Quadratic && ans$ITolerances) {
       ans$Svd.arg <- FALSE
-      ans$Uncorrelated.lv <- FALSE
+      ans$Uncorrelated.latvar <- FALSE
       ans$Corner <- FALSE
   }
 
-  ans$half.stepsizing <- FALSE # Turn it off 
+
+  ans$half.stepsizing <- FALSE  # Turn it off 
   ans
 }
 
@@ -182,6 +198,7 @@ show.summary.rrvglm <- function(x, digits = NULL,
 
 
 
+
  setMethod("show", "summary.rrvglm",
            function(object)
              show.summary.rrvglm(x = object))
@@ -189,5 +206,10 @@ show.summary.rrvglm <- function(x, digits = NULL,
 
 
 
+setMethod("coefficients", "summary.rrvglm", function(object, ...)
+          object at coef3)
+setMethod("coef",         "summary.rrvglm", function(object, ...)
+          object at coef3)
+
 
 
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 73fdddf..2062637 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -12,16 +12,19 @@ rrvglm.fit <-
   function(x, y, w = rep(1, length(x[, 1])),
            etastart = NULL, mustart = NULL, coefstart = NULL,
            offset = 0, family,
-           control=rrvglm.control(...),
+           control = rrvglm.control(...),
            criterion = "coefficients",
            qr.arg = FALSE,
            constraints = NULL,
            extra = NULL,
            Terms = Terms, function.name = "rrvglm", ...) {
 
+    eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
+
     specialCM <- NULL
     post <- list()
-    check.rank <- TRUE # !control$Quadratic
+    check.rank <- TRUE  # !control$Quadratic
+    check.rank <- control$Check.rank
     nonparametric <- FALSE
     epsilon <- control$epsilon
     maxit <- control$maxit
@@ -40,158 +43,173 @@ rrvglm.fit <-
     n <- dim(x)[1]
 
     new.s.call <- expression({
-        if (c.list$one.more) {
-            fv <- c.list$fit
-            new.coeffs <- c.list$coeff
+      if (c.list$one.more) {
+        fv <- c.list$fit
+        new.coeffs <- c.list$coeff
+
+        if (length(family at middle))
+          eval(family at middle)
+
+        eta <- fv + offset
+
+        mu <- family at linkinv(eta, extra)
+
+        if (length(family at middle2))
+          eval(family at middle2)
+
+        old.crit <- new.crit
+        new.crit <- 
+          switch(criterion,
+                 coefficients = new.coeffs,
+                 tfun(mu = mu, y = y, w = w,
+                      res = FALSE, eta = eta, extra))
+
+
+
+        if (trace && orig.stepsize == 1) {
+          cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+              "   linear loop ", iter, ": ", criterion, "= ")
+          UUUU <- switch(criterion,
+                         coefficients =
+                           format(new.crit,
+                                  dig = round(1 - log10(epsilon))),
+                           format(new.crit, 
+                                  dig = max(4, 
+                                            round(-0 - log10(epsilon) +
+                                                  log10(sqrt(eff.n))))))
+
+          switch(criterion,
+              coefficients = {if (length(new.crit) > 2) cat("\n");
+                 cat(UUUU, fill = TRUE, sep = ", ")},
+              cat(UUUU, fill = TRUE, sep = ", "))
+           }
+
+        take.half.step <- (control$half.stepsizing &&
+                           length(old.coeffs)) &&
+                          !control$Quadratic &&
+                           ((orig.stepsize != 1) ||
+                            (criterion != "coefficients" &&
+                            (if (minimize.criterion)
+                               new.crit > old.crit else
+                            new.crit < old.crit)))
+        if (!is.logical(take.half.step))
+          take.half.step <- TRUE
+        if (take.half.step) {
+          stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+          new.coeffs.save <- new.coeffs
+          if (trace) 
+            cat("Taking a modified step")
+          repeat {
+            if (trace) {
+              cat(".")
+              flush.console()
+            }
+            stepsize <- stepsize / 2
+            if (too.small <- stepsize < 0.001)
+              break
+            new.coeffs <- (1 - stepsize) * old.coeffs +
+                               stepsize  * new.coeffs.save
 
             if (length(family at middle))
-                eval(family at middle)
+              eval(family at middle)
+
+            fv <- X.vlm.save %*% new.coeffs
+            if (M > 1)
+              fv <- matrix(fv, n, M, byrow = TRUE)
 
             eta <- fv + offset
 
             mu <- family at linkinv(eta, extra)
 
             if (length(family at middle2))
-                eval(family at middle2)
-
-            old.crit <- new.crit
-            new.crit <- 
-                switch(criterion,
-                    coefficients = new.coeffs,
-                    tfun(mu = mu, y = y, w = w, res = FALSE, eta = eta, extra))
+              eval(family at middle2)
 
 
+            new.crit <- 
+              switch(criterion,
+                     coefficients = new.coeffs,
+                     tfun(mu = mu,y = y,w = w,res = FALSE,
+                          eta = eta,extra))
+
+            if ((criterion == "coefficients") || 
+                ( minimize.criterion && new.crit < old.crit) ||
+                (!minimize.criterion && new.crit > old.crit))
+              break
+            }
 
-            if (trace && orig.stepsize == 1) {
-                cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+            if (trace) 
+              cat("\n")
+            if (too.small) {
+              warning("iterations terminated because ",
+                      "half-step sizes are very small")
+              one.more <- FALSE
+            } else {
+              if (trace) {
+                cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
                     "   linear loop ", iter, ": ", criterion, "= ")
-                UUUU <- switch(criterion, coefficients=
-                       format(new.crit, dig=round(2-log10(epsilon))),
-                       format(round(new.crit, 4)))
+                UUUU <-
+                  switch(criterion,
+                         coefficients =
+                           format(new.crit,
+                                  dig = round(1 - log10(epsilon))),
+                           format(new.crit, 
+                                  dig = max(4, 
+                                            round(-0 - log10(epsilon) +
+                                                  log10(sqrt(eff.n))))))
+
                 switch(criterion,
-                    coefficients = {if(length(new.crit) > 2) cat("\n");
-                       cat(UUUU, fill = TRUE, sep = ", ")},
-                    cat(UUUU, fill = TRUE, sep = ", "))
-           }
+                       coefficients = {if (length(new.crit) > 2)
+                                         cat("\n");
+                                       cat(UUUU, fill = TRUE, sep = ", ")},
+                       cat(UUUU, fill = TRUE, sep = ", "))
+              }
 
-            {
-                take.half.step <- (control$half.stepsizing && length(old.coeffs)) && 
-                             !control$Quadratic &&
-                             ((orig.stepsize != 1) ||
-                              (criterion != "coefficients" &&
-                             (if(minimize.criterion) new.crit > old.crit else
-                             new.crit < old.crit)))
-                if (!is.logical(take.half.step))
-                    take.half.step <- TRUE
-                if (take.half.step) {
-                    stepsize <- 2 * min(orig.stepsize, 2*stepsize)
-                    new.coeffs.save <- new.coeffs
-                    if (trace) 
-                        cat("Taking a modified step")
-                    repeat {
-                        if (trace) {
-                            cat(".")
-                            flush.console()
-                        }
-                        stepsize <- stepsize / 2
-                        if (too.small <- stepsize < 0.001)
-                            break
-                        new.coeffs <- (1-stepsize)*old.coeffs +
-                                       stepsize*new.coeffs.save
-
-                        if (length(family at middle))
-                            eval(family at middle)
-
-                        fv <- X_vlm_save %*% new.coeffs
-                        if (M > 1)
-                            fv <- matrix(fv, n, M, byrow = TRUE)
-
-                        eta <- fv + offset
-
-                        mu <- family at linkinv(eta, extra)
-
-                        if (length(family at middle2))
-                            eval(family at middle2)
-
-
-                        new.crit <- 
-                            switch(criterion,
-                                coefficients = new.coeffs,
-                                tfun(mu = mu,y = y,w = w,res = FALSE,eta = eta,extra))
-
-                        if ((criterion == "coefficients") || 
-                           ( minimize.criterion && new.crit < old.crit) ||
-                           (!minimize.criterion && new.crit > old.crit))
-                            break
-                    }
-
-                    if (trace) 
-                        cat("\n")
-                    if (too.small) {
-                        warning("iterations terminated because ",
-                                "half-step sizes are very small")
-                        one.more <- FALSE
-                    } else {
-                        if (trace) {
-                       cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
-                    "   linear loop ", iter, ": ", criterion, "= ")
-                            UUUU <- switch(criterion, coefficients =
-                                  format(new.crit, dig = round(2-log10(epsilon))),
-                                  format(round(new.crit, 4)))
-
-                            switch(criterion,
-                            coefficients = {if(length(new.crit) > 2) cat("\n");
-                               cat(UUUU, fill = TRUE, sep = ", ")},
-                            cat(UUUU, fill = TRUE, sep = ", "))
-                        }
-
-                        one.more <- eval(control$convergence)
-                    }
-                } else {
-                    one.more <- eval(control$convergence)
-                }
+              one.more <- eval(control$convergence)
             }
-            flush.console()
-
-            if (one.more) {
-                iter <- iter + 1
-                deriv.mu <- eval(family at deriv)
-                wz <- eval(family at weight)
-                if (control$checkwz)
-                    wz <- checkwz(wz, M = M, trace = trace,
-                                 wzepsilon = control$wzepsilon)
+          } else {
+            one.more <- eval(control$convergence)
+          }
+        flush.console()
 
+        if (one.more) {
+          iter <- iter + 1
+          deriv.mu <- eval(family at deriv)
+          wz <- eval(family at weight)
+          if (control$checkwz)
+            wz <- checkwz(wz, M = M, trace = trace,
+                          wzepsilon = control$wzepsilon)
 
-                wz <- matrix(wz, nrow = n)
-                U <- vchol(wz, M = M, n = n, silent=!trace)
-                tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
-                z <- eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
 
-                rrr.expression <- get(RRR.expression)
-                eval(rrr.expression)
+          wz <- matrix(wz, nrow = n)
+          U <- vchol(wz, M = M, n = n, silent=!trace)
+          tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+          z <- eta + vbacksub(U, tvfor, M, n) - offset  # Contains \bI \bnu
 
-                c.list$z <- z  # contains \bI_{Rank} \bnu
-                c.list$U <- U
-                if (copy_X_vlm) c.list$X_vlm <- X_vlm_save
-            }
+          rrr.expression <- get(RRR.expression)
+          eval(rrr.expression)
 
-            c.list$one.more <- one.more
-            c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
-            old.coeffs <- new.coeffs
+          c.list$z <- z  # contains \bI_{Rank} \bnu
+          c.list$U <- U
+          if (copy.X.vlm) c.list$X.vlm <- X.vlm.save
         }
-        c.list
-    }) # end of new.s.call
+
+        c.list$one.more <- one.more
+        c.list$coeff <- runif(length(new.coeffs))  # 12/3/03; twist needed!
+        old.coeffs <- new.coeffs
+      }
+      c.list
+    })  # end of new.s.call
 
 
 
 
 
-    copy_X_vlm <- FALSE    # May be overwritten in @initialize
+    copy.X.vlm <- FALSE    # May be overwritten in @initialize
     stepsize <- orig.stepsize
     old.coeffs <- coefstart
 
     intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
-    y.names <- predictors.names <- NULL    # May be overwritten in @initialize
+    y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
  
     n.save <- n 
@@ -202,42 +220,42 @@ rrvglm.fit <-
     rrcontrol <- control  #
 
     if (length(slot(family, "initialize")))
-        eval(slot(family, "initialize")) # Initialize mu & M (and optionally w)
+      eval(slot(family, "initialize"))  # Initlz mu & M (and optionally w)
 
 
     eval(rrr.init.expression)  
 
 
     if (length(etastart)) {
-        eta <- etastart
-        mu <- if (length(mustart)) mustart else
-              if (length(body(slot(family, "linkinv"))))
-                slot(family, "linkinv")(eta, extra) else
-                warning("argument 'etastart' assigned a value ",
-                        "but there is no 'linkinv' slot to use it")
+      eta <- etastart
+      mu <- if (length(mustart)) mustart else
+            if (length(body(slot(family, "linkinv"))))
+              slot(family, "linkinv")(eta, extra) else
+              warning("argument 'etastart' assigned a value ",
+                      "but there is no 'linkinv' slot to use it")
     }
 
     if (length(mustart)) {
-        mu <- mustart
-        if (length(body(slot(family, "linkfun")))) {
-          eta <- slot(family, "linkfun")(mu, extra)
-        } else {
-          warning("argument 'mustart' assigned a value ",
-                  "but there is no 'link' slot to use it")
-        }
+      mu <- mustart
+      if (length(body(slot(family, "linkfun")))) {
+        eta <- slot(family, "linkfun")(mu, extra)
+      } else {
+        warning("argument 'mustart' assigned a value ",
+                "but there is no 'link' slot to use it")
+      }
     }
 
 
     M <- if (is.matrix(eta)) ncol(eta) else 1
 
     if (is.character(rrcontrol$Dzero)) {
-        index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]]) 
-        if (any(is.na(index)))
-            stop("Dzero argument didn't fully match y-names")
-        if (length(index) == M)
-            stop("all linear predictors are linear in the ",
-                 "latent variable(s); so set 'Quadratic = FALSE'")
-        rrcontrol$Dzero <- control$Dzero <- index
+      index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]]) 
+      if (any(is.na(index)))
+        stop("Dzero argument didn't fully match y-names")
+      if (length(index) == M)
+        stop("all linear predictors are linear in the ",
+             "latent variable(s); so set 'Quadratic = FALSE'")
+      rrcontrol$Dzero <- control$Dzero <- index
     }
 
 
@@ -245,65 +263,69 @@ rrvglm.fit <-
 
 
     if (length(family at constraints))
-        eval(family at constraints)
+      eval(family at constraints)
 
 
-    special.matrix <- matrix(-34956.125, M, M)    # An unlikely used matrix 
-    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
+    special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix 
+    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR,
+                            constraints)
 
     findex <- trivial.constraints(just.testing, special.matrix)
-    if (is.null(just.testing)) findex <- NULL # 20100617
+    if (is.null(just.testing)) findex <- NULL  # 20100617
     tc1 <- trivial.constraints(constraints)
 
     if (!is.null(findex) && !control$Quadratic && sum(!tc1)) {
-        for(ii in names(tc1))
-            if (!tc1[ii] && !any(ii == names(findex)[findex == 1]))
-                warning("'", ii, "' is a non-trivial constraint that ",
-                        "will be overwritten by reduced-rank regression")
+      for (ii in names(tc1))
+        if (!tc1[ii] && !any(ii == names(findex)[findex == 1]))
+          warning("'", ii, "' is a non-trivial constraint that ",
+                  "will be overwritten by reduced-rank regression")
     }
 
     if (!is.null(findex) && all(findex == 1))
-        stop("use vglm(), not rrvglm()!")
+      stop("use vglm(), not rrvglm()!")
     colx1.index <- names.colx1.index <- NULL
     dx2 <- dimnames(x)[[2]]
     if (sum(findex)) {
-        asx <- attr(x, "assign")
-        for(ii in names(findex))
-            if (findex[ii]) {
-                names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
-                colx1.index <- c(colx1.index, asx[[ii]])
-        }
-        names(colx1.index) <- names.colx1.index
+      asx <- attr(x, "assign")
+      for (ii in names(findex))
+        if (findex[ii]) {
+          names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
+          colx1.index <- c(colx1.index, asx[[ii]])
+      }
+      names(colx1.index) <- names.colx1.index
     }
     rrcontrol$colx1.index <- control$colx1.index <-
-                            colx1.index # Save it on the object
+                                     colx1.index  # Save it on the object
     colx2.index <- 1:ncol(x)
     names(colx2.index) <- dx2
     if (length(colx1.index)) 
-        colx2.index <- colx2.index[-colx1.index]
+      colx2.index <- colx2.index[-colx1.index]
 
-    p1 <- length(colx1.index); p2 <- length(colx2.index)
-    rrcontrol$colx2.index <- control$colx2.index <-
-                                     colx2.index # Save it on the object
+    p1 <- length(colx1.index)
+    p2 <- length(colx2.index)
+    rrcontrol$colx2.index <-
+      control$colx2.index <- colx2.index  # Save it on the object
     Index.corner <- control$Index.corner
 
 
 
 
     Amat <- if (length(rrcontrol$Ainit)) rrcontrol$Ainit else
-            matrix(rnorm(M * Rank, sd = rrcontrol$SD.Cinit), M, Rank)
+            matrix(rnorm(M * Rank, sd = rrcontrol$sd.Cinit), M, Rank)
     Cmat <- if (length(rrcontrol$Cinit)) rrcontrol$Cinit else {
                 if (!rrcontrol$Use.Init.Poisson.QO) {
-                    matrix(rnorm(p2 * Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
-                } else
-                .Init.Poisson.QO(ymat = as.matrix(y), 
+                  matrix(rnorm(p2 * Rank, sd = rrcontrol$sd.Cinit),
+                         p2, Rank)
+                } else {
+                  .Init.Poisson.QO(ymat = as.matrix(y),
                     X1 = if (length(colx1.index))
                          x[, colx1.index, drop = FALSE] else NULL,
                     X2 = x[, colx2.index, drop = FALSE],
                     Rank = rrcontrol$Rank, trace = rrcontrol$trace,
                     max.ncol.etamat = rrcontrol$Etamat.colmax,
                     Crow1positive = rrcontrol$Crow1positive,
-                    isdlv = rrcontrol$isdlv)
+                    isd.latvar = rrcontrol$isd.latvar)
+                }
             }
 
 
@@ -314,16 +336,18 @@ rrvglm.fit <-
 
 
     if (control$Corner)
-        Amat[control$Index.corner,] <- diag(Rank)
-    if (length(control$szero))
-        Amat[control$szero,] <- 0
+      Amat[control$Index.corner,] <- diag(Rank)
+    if (length(control$str0))
+      Amat[control$str0, ] <- 0
 
-    rrcontrol$Ainit <- control$Ainit <- Amat   # Good for valt()
-    rrcontrol$Cinit <- control$Cinit <- Cmat   # Good for valt()
+    rrcontrol$Ainit <- control$Ainit <- Amat  # Good for valt()
+    rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
 
-    Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
+    Blist <- process.constraints(constraints, x, M,
+                                 specialCM = specialCM)
 
-    nice31 <- control$Quadratic && (!control$EqualTol || control$ITolerances) &&
+    nice31 <-  control$Quadratic &&
+             (!control$EqualTol || control$ITolerances) &&
               all(trivial.constraints(Blist) == 1)
 
     Blist <- Blist.save <- replace.constraints(Blist, Amat, colx2.index)
@@ -333,55 +357,56 @@ rrvglm.fit <-
     dimB <- sum(ncolBlist)
 
 
-    X_vlm_save <- if (control$Quadratic) {
-        tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
-                       C = Cmat, control = control)
-        xsmall.qrr <- tmp500$new.lv.model.matrix 
-        B.list <- tmp500$constraints # Doesn't change or contain \bI_{Rank} \bnu
-        if (FALSE && modelno == 3) {
-          B.list[[1]] <- (B.list[[1]])[, c(TRUE, FALSE), drop = FALSE] # Amat
-          B.list[[2]] <- (B.list[[2]])[, c(TRUE, FALSE), drop = FALSE] # D
-        }
+    X.vlm.save <- if (control$Quadratic) {
+      tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+                     C = Cmat, control = control)
+      xsmall.qrr <- tmp500$new.latvar.model.matrix 
+      B.list <- tmp500$constraints
+      if (FALSE && modelno == 3) {
+        B.list[[1]] <- (B.list[[1]])[, c(TRUE, FALSE), drop = FALSE]  # Amat
+        B.list[[2]] <- (B.list[[2]])[, c(TRUE, FALSE), drop = FALSE]  # D
+      }
 
-        lv.mat <- tmp500$lv.mat
-        if (length(tmp500$offset)) {
-            offset <- tmp500$offset 
-        }
-        lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
+      latvar.mat <- tmp500$latvar.mat
+      if (length(tmp500$offset)) {
+        offset <- tmp500$offset 
+      }
+      lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
     } else {
-        lv.mat <- x[,colx2.index,drop = FALSE] %*% Cmat 
-        lm2vlm.model.matrix(x, Blist, xij=control$xij)
+      latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat 
+      lm2vlm.model.matrix(x, Blist, xij = control$xij)
     }
 
 
 
 
     if (length(coefstart)) {
-        eta <- if (ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
-                   offset else X_vlm_save * coefstart + offset
-        eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta) 
+      eta <- if (ncol(X.vlm.save) > 1)
+               X.vlm.save %*% coefstart + offset else
+               X.vlm.save  *  coefstart + offset
+      eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
 
 
-        mu <- family at linkinv(eta, extra)
+      mu <- family at linkinv(eta, extra)
     }
 
     if (criterion != "coefficients") {
-        tfun <- slot(family, criterion)   # family[[criterion]]
+      tfun <- slot(family, criterion)  # family[[criterion]]
     }
 
     iter <- 1
     new.crit <- switch(criterion,
-                      coefficients = 1,
-                      tfun(mu = mu, y = y, w = w, res = FALSE, eta = eta, extra))
-    old.crit <- if (minimize.criterion) 10*new.crit+10 else -10*new.crit-10
-
-
+                       coefficients = 1,
+                       tfun(mu = mu, y = y, w = w, res = FALSE,
+                            eta = eta, extra))
+    old.crit <- ifelse(minimize.criterion,  10 * new.crit + 10,
+                                           -10 * new.crit - 10)
     deriv.mu <- eval(family at deriv)
 
     wz <- eval(family at weight)
     if (control$checkwz)
       wz <- checkwz(wz, M = M, trace = trace,
-                   wzepsilon = control$wzepsilon)
+                    wzepsilon = control$wzepsilon)
 
     U <- vchol(wz, M = M, n = n, silent = !trace)
     tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
@@ -389,72 +414,74 @@ rrvglm.fit <-
 
     c.list <- list(z = as.double(z), fit = as.double(t(eta)),
                    one.more = TRUE,
-                   coeff = as.double(rep(1,ncol(X_vlm_save))),
+                   coeff = as.double(rep(1,ncol(X.vlm.save))),
                    U = as.double(U),
-                   copy_X_vlm = copy_X_vlm,
-                   X_vlm = if (copy_X_vlm) as.double(X_vlm_save) else
-                   double(3))
+                   copy.X.vlm = copy.X.vlm,
+                   X.vlm = if (copy.X.vlm) as.double(X.vlm.save) else
+                           double(3))
 
 
 
-    dX_vlm <- as.integer(dim(X_vlm_save))
-    nrow_X_vlm <- dX_vlm[[1]]
-    ncol_X_vlm <- dX_vlm[[2]]
+    dX.vlm <- as.integer(dim(X.vlm.save))
+    nrow.X.vlm <- dX.vlm[[1]]
+    ncol.X.vlm <- dX.vlm[[2]]
 
-    if (nrow_X_vlm < ncol_X_vlm)
-        stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm, " observations")
+    if (nrow.X.vlm < ncol.X.vlm)
+      stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
 
-    {
-        bf.call <- expression(vlm.wfit(xmat=X_vlm_save, zedd, 
+    bf.call <- expression(vlm.wfit(xmat=X.vlm.save, zedd, 
             Blist = if (control$Quadratic) B.list else Blist,
             ncolx=ncol(x), U=U,
             Eta.range = control$Eta.range,
             matrix.out = if (control$Quadratic) FALSE else TRUE,
             is.vlmX = TRUE, qr = qr.arg, xij = control$xij))
 
-        while(c.list$one.more) {
-          if (control$Quadratic) {
-            zedd <- as.matrix(z)
-            if (control$Corner)
-              zedd[,Index.corner] <- zedd[,Index.corner] - lv.mat 
-          } else {
-            zedd <- z 
-          }
+    while (c.list$one.more) {
+      if (control$Quadratic) {
+        zedd <- as.matrix(z)
+        if (control$Corner)
+          zedd[, Index.corner] <- zedd[, Index.corner] - latvar.mat
+      } else {
+        zedd <- z 
+      }
 
-            if (!nice31)
-              tfit <- eval(bf.call)   # tfit$fitted.values is n x M
+      if (!nice31)
+        tfit <- eval(bf.call)  # tfit$fitted.values is n x M
 
-            if (!control$Quadratic) {
-                Cmat <- tfit$mat.coef[colx2.index,,drop = FALSE] %*%
-                       Amat %*% solve(t(Amat) %*% Amat)
-                rrcontrol$Ainit <- control$Ainit <- Amat  # Good for valt()
-                rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
-            }
+      if (!control$Quadratic) {
+        Cmat <- tfit$mat.coef[colx2.index,,drop = FALSE] %*%
+                Amat %*% solve(t(Amat) %*% Amat)
+        rrcontrol$Ainit <- control$Ainit <- Amat  # Good for valt()
+        rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
+      }
     
-            if (!nice31) c.list$coeff <- tfit$coefficients 
+      if (!nice31) c.list$coeff <- tfit$coefficients 
     
-            if (control$Quadratic) {
-                if (control$Corner)
-                    tfit$fitted.values[,Index.corner] <-
-                        tfit$fitted.values[,Index.corner] + lv.mat 
-            }
+      if (control$Quadratic) {
+        if (control$Corner)
+          tfit$fitted.values[, Index.corner] <-
+          tfit$fitted.values[, Index.corner] + latvar.mat 
+      }
 
-            if (!nice31)
-              tfit$predictors <- tfit$fitted.values # Doesn't contain the offset
-            if (!nice31)
-              c.list$fit <- tfit$fitted.values
-            c.list <- eval(new.s.call)
-            NULL
-        }
+      if (!nice31)
+        tfit$predictors <- tfit$fitted.values  # Does not contain the offset
+      if (!nice31)
+        c.list$fit <- tfit$fitted.values
+      c.list <- eval(new.s.call)
+      NULL
     }
 
-    if (maxit > 1 && iter >= maxit)
-        warning("convergence not obtained in ", maxit, " iterations")
+
+  if (maxit > 1 && iter >= maxit && !control$noWarning)
+    warning("convergence not obtained in ", maxit, " iterations")
+
+
 
 
-    dnrow_X_vlm <- labels(X_vlm_save)
-    xnrow_X_vlm <- dnrow_X_vlm[[2]]
-    ynrow_X_vlm <- dnrow_X_vlm[[1]]
+
+    dnrow.X.vlm <- labels(X.vlm.save)
+    xnrow.X.vlm <- dnrow.X.vlm[[2]]
+    ynrow.X.vlm <- dnrow.X.vlm[[1]]
 
     if (length(family at fini))
         eval(family at fini)
@@ -462,60 +489,61 @@ rrvglm.fit <-
     if (M > 1 && !nice31)
         tfit$predictors <- matrix(tfit$predictors, n, M)
 
-    asgn <- attr(X_vlm_save, "assign")
+    asgn <- attr(X.vlm.save, "assign")
     if (nice31) {
-        coefs <- rep(0, len = length(xnrow_X_vlm))
-        rank <- ncol_X_vlm
+      coefs <- rep(0, len = length(xnrow.X.vlm))
+        rank <- ncol.X.vlm
     } else {
-        coefs <- tfit$coefficients
-        names(coefs) <- xnrow_X_vlm
-        rank <- tfit$rank
+      coefs <- tfit$coefficients
+      names(coefs) <- xnrow.X.vlm
+      rank <- tfit$rank
     }
 
-    cnames <- xnrow_X_vlm
+    cnames <- xnrow.X.vlm
 
-    if (check.rank && rank < ncol_X_vlm)
-        stop("rrvglm only handles full-rank models (currently)")
+    if (check.rank && rank < ncol.X.vlm)
+      stop("rrvglm only handles full-rank models (currently)")
 
     if (nice31) {
-        R <- matrix(as.numeric(NA), 5, 5)
+      R <- matrix(as.numeric(NA), 5, 5)
     } else {
-        R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
-        R[lower.tri(R)] <- 0
-        attributes(R) <- list(dim = c(ncol_X_vlm, ncol_X_vlm),
-                              dimnames = list(cnames, cnames), rank = rank)
+      R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
+      R[lower.tri(R)] <- 0
+      attributes(R) <- list(dim = c(ncol.X.vlm, ncol.X.vlm),
+                            dimnames = list(cnames, cnames), rank = rank)
     }
 
     if (nice31) {
-        effects <- rep(0, len = 77)
+      effects <- rep(0, len = 77)
     } else {
-        effects <- tfit$effects
-        neff <- rep("", nrow_X_vlm)
-        neff[seq(ncol_X_vlm)] <- cnames
-        names(effects) <- neff
+      effects <- tfit$effects
+      neff <- rep("", nrow.X.vlm)
+      neff[seq(ncol.X.vlm)] <- cnames
+      names(effects) <- neff
 
-        dim(tfit$predictors) <- c(n, M)
+      dim(tfit$predictors) <- c(n, M)
     }
     dn <- labels(x)
     yn <- dn[[1]]
     xn <- dn[[2]]
 
     if (nice31) {
-        residuals <- z - fv
-        if (M == 1) {
-            residuals <- as.vector(residuals)
-            names(residuals) <- yn
-        } else {
-            dimnames(residuals) <- list(yn, predictors.names)
-        }
+      residuals <- z - fv
+      if (M == 1) {
+        residuals <- as.vector(residuals)
+        names(residuals) <- yn
+      } else {
+        dimnames(residuals) <- list(yn, predictors.names)
+      }
     } else {
         residuals <- z - tfit$predictors
         if (M == 1) {
-            tfit$predictors <- as.vector(tfit$predictors)
-            residuals <- as.vector(residuals)
-            names(residuals) <- names(tfit$predictors) <- yn
+          tfit$predictors <- as.vector(tfit$predictors)
+          residuals <- as.vector(residuals)
+          names(residuals) <- names(tfit$predictors) <- yn
         } else {
-            dimnames(residuals) <- dimnames(tfit$predictors) <- list(yn, predictors.names)
+          dimnames(residuals) <-
+          dimnames(tfit$predictors) <- list(yn, predictors.names)
         }
     }
 
@@ -536,10 +564,10 @@ rrvglm.fit <-
 
 
 
-    elts.tildeA <- (M - Rank - length(control$szero)) * Rank
+    elts.tildeA <- (M - Rank - length(control$str0)) * Rank
     no.dpar <- 0
-    df.residual <- nrow_X_vlm - rank -
-                   (if(control$Quadratic) Rank*p2 else 0) -
+    df.residual <- nrow.X.vlm - rank -
+                   ifelse(control$Quadratic, Rank*p2, 0) -
                    no.dpar - elts.tildeA
 
 
@@ -554,11 +582,11 @@ rrvglm.fit <-
                 rank = rank,
                 residuals = residuals,
                 R = R,
-                terms = Terms) # terms: This used to be done in vglm() 
+                terms = Terms)  # terms: This used to be done in vglm() 
 
     if (qr.arg && !nice31) {
-        fit$qr <- tfit$qr
-        dimnames(fit$qr$qr) <- dnrow_X_vlm
+      fit$qr <- tfit$qr
+      dimnames(fit$qr$qr) <- dnrow.X.vlm
     }
 
     if (M == 1) {
@@ -569,7 +597,7 @@ rrvglm.fit <-
 
     misc <- list(
         colnames.x = xn,
-        colnames.X_vlm = xnrow_X_vlm,
+        colnames.X.vlm = xnrow.X.vlm,
         criterion = criterion,
         function.name = function.name, 
         intercept.only=intercept.only,
@@ -577,10 +605,10 @@ rrvglm.fit <-
         M = M,
         n = n,
         nonparametric = nonparametric,
-        nrow_X_vlm = nrow_X_vlm,
+        nrow.X.vlm = nrow.X.vlm,
         orig.assign = attr(x, "assign"),
         p = ncol(x),
-        ncol_X_vlm = ncol_X_vlm,
+        ncol.X.vlm = ncol.X.vlm,
         ynames = dimnames(y)[[2]])
 
     if (one.more)
@@ -591,23 +619,23 @@ rrvglm.fit <-
     if (criterion != "coefficients")
         crit.list[[criterion]] <- fit[[criterion]] <- new.crit
 
-    for(ii in names(.min.criterion.VGAM)) {
-        if (ii != criterion &&
-           any(slotNames(family) == ii) &&
-               length(body(slot(family, ii)))) {
-                fit[[ii]] <- crit.list[[ii]] <-
-                (slot(family, ii))(mu = mu, y = y, w = w,
-                                   res = FALSE, eta = eta, extra)
-        }
+    for (ii in names(.min.criterion.VGAM)) {
+      if (ii != criterion &&
+       any(slotNames(family) == ii) &&
+           length(body(slot(family, ii)))) {
+            fit[[ii]] <- crit.list[[ii]] <-
+            (slot(family, ii))(mu = mu, y = y, w = w,
+                               res = FALSE, eta = eta, extra)
+      }
     }
 
 
 
     if (w[1] != 1 || any(w != w[1]))
-        fit$prior.weights <- w
+      fit$prior.weights <- w
 
     if (length(family at last))
-        eval(family at last)
+      eval(family at last)
 
 
     structure(c(fit, list(predictors = if (nice31) matrix(eta, n, M) else
@@ -620,7 +648,7 @@ rrvglm.fit <-
         iter = iter,
         misc = misc,
         post = post,
-        rss = if (nice31) 000 else tfit$rss,
+        res.ss = if (nice31) 000 else tfit$res.ss,
         x = x,
         y = y)),
         vclass = family at vfamily)
diff --git a/R/s.vam.q b/R/s.vam.q
index a40613a..34d0f84 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -12,14 +12,14 @@
 
 s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
                   bf.epsilon = 0.001, trace = FALSE, se.fit = TRUE,
-                  X_vlm_save, Blist, ncolBlist, M, qbig, Umat,
+                  X.vlm.save, Blist, ncolBlist, M, qbig, Umat,
                   all.knots = FALSE, nk = NULL,
                   sf.only = FALSE) {
   nwhich <- names(which)
 
 
-  dX_vlm <- as.integer(dim(X_vlm_save))
-  pbig <- dX_vlm[2]
+  dX.vlm <- as.integer(dim(X.vlm.save))
+  pbig <- dX.vlm[2]
 
 
   if (!length(smooth.frame$first)) {
@@ -28,8 +28,8 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     smooth.frame$first <- TRUE  # Only executed at the first time
 
     dx <- as.integer(dim(x))
-    smooth.frame$n_lm <- dx[1]
-    smooth.frame$p_lm <- dx[2]
+    smooth.frame$n.lm <- dx[1]
+    smooth.frame$p.lm <- dx[2]
     attr(data, "class") <- NULL
 
     osparv <- lapply(data, attr, "spar")  # "o" for original
@@ -47,17 +47,17 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
         warning("only the first ", ncolBlist[ii], " values of ",
                 "'spar' are used for variable '", s.xargument, "'")
       }
-      osparv[[ii]] <- rep(temp, length = ncolBlist[ii])   # recycle
+      osparv[[ii]] <- rep(temp, length = ncolBlist[ii])  # Recycle
     
       temp <- odfvec[[ii]]
       if (!is.numeric(temp) || any(temp < 1)) {
-        stop("df is non-numeric or less than 1")
+        stop("argument 'df' is non-numeric or less than 1")
       }
       if (length(temp) > ncolBlist[ii]) {
         warning("only the first ", ncolBlist[ii], " value(s) of 'df' ",
                 "are used for variable '", s.xargument, "'")
       }
-      odfvec[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
+      odfvec[[ii]] <- rep(temp, length = ncolBlist[ii])  # Recycle
       if (max(temp) > smooth.frame$neffec[kk]-1) {
         stop("'df' value too high for variable '", s.xargument, "'")
       }
@@ -75,25 +75,24 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     smooth.frame$odfvec <- odfvec  # Original
     
     if (sum(smooth.frame$dfvec[smooth.frame$osparv == 0]) + pbig >
-      smooth.frame$n_lm * sum(ncolBlist[nwhich])) {
+      smooth.frame$n.lm * sum(ncolBlist[nwhich])) {
       stop("too many parameters/dof for data on hand")
     }
     
-    xnrow_X_vlm <- labels(X_vlm_save)[[2]]
-    asgn <- attr(X_vlm_save, "assign")
+    xnrow.X.vlm <- labels(X.vlm.save)[[2]]
+    asgn <- attr(X.vlm.save, "assign")
     aa <- NULL
     for (ii in nwhich) {
-      aa <- c(aa, xnrow_X_vlm[asgn[[ii]]])
+      aa <- c(aa, xnrow.X.vlm[asgn[[ii]]])
     }
     smooth.frame$ndfsparv <- aa                # Stored here
-    smooth.frame$xnrow_X_vlm <- xnrow_X_vlm    # Stored here
+    smooth.frame$xnrow.X.vlm <- xnrow.X.vlm    # Stored here
     smooth.frame$s.xargument <- s.xargument    # Stored here
 
-    smooth.frame$smap <- as.vector(cumsum(
-        c(1, ncolBlist[nwhich]))[1:length(nwhich)])
+    smooth.frame$smap <-
+      as.vector(cumsum(c(1, ncolBlist[nwhich]))[1:length(nwhich)])
 
     smooth.frame$try.sparv <- osparv
-    smooth.frame$lamvector <- double(length(odfvec))
 
 
     smooth.frame$bindex <-
@@ -104,7 +103,6 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
       as.integer(cumsum(c(1, smooth.frame$neffec * ncolBlist[nwhich])))
 
 
-
     smooth.frame$kindex <-
       as.integer(cumsum(c(1, 4 + smooth.frame$nknots)))
   } else {
@@ -112,17 +110,19 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
   }
 
 
+
+
   if (sf.only) {
     return(smooth.frame)
   }
 
 
-  ldk <- 3 * max(ncolBlist[nwhich]) + 1  # 20020711
 
+  ldk <- 3 * max(ncolBlist[nwhich]) + 1  # 20020711
 
   which <- unlist(which)
-  p_lm <- smooth.frame$p_lm
-  n_lm <- smooth.frame$n_lm
+  p.lm <- smooth.frame$p.lm
+  n.lm <- smooth.frame$n.lm
   dim2wz <- if (is.matrix(wz)) ncol(wz) else 1
 
   dim1U <- if (is.matrix(Umat)) nrow(Umat) else 1
@@ -139,9 +139,6 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
   ncolbmax <- max(ncbvec)
 
 
-
-
-
   contr.sp <- list(low   = -1.5,  ## low = 0.      was default till R 1.3.x
                    high  =  1.5,
                    tol   = 1e-4,  ## tol = 0.001   was default till R 1.3.x
@@ -149,39 +146,32 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
                    maxit =  500)
 
 
-
-
   fit <-
-    dotC(name = "Yee_vbfa",  # ---------------------------------
-         npetc = as.integer(c(n_lm, p_lm, length(which), se.fit, 0,
-                              bf.maxit, qrank = 0, M, nbig = n_lm * M, pbig,
+    .C("Yee_vbfa",  # ---------------------------------
+         npetc = as.integer(c(n.lm, p.lm, length(which), se.fit, 0,
+                              bf.maxit, qrank = 0, M, nbig = n.lm * M, pbig,
                               qbig, dim2wz, dim1U, ier = 0,
-                              ldk = ldk, # ldk may be unused
+                              ldk = ldk,  # ldk may be unused
                               contr.sp$maxit, iinfo = 0
                              )),
        doubvec = as.double(c(bf.epsilon, resSS = 0, unlist(contr.sp[1:4]))),
      as.double(x),
          y = as.double(zedd), wz = as.double(wz),
          dfvec  = as.double(smooth.frame$odfvec + 1),  # 20130427; + 1 added
-         lamvec = as.double(smooth.frame$lamvector),
+         lamvec = double(length(smooth.frame$odfvec)),
          sparv  = as.double(smooth.frame$try.sparv),
    as.integer(smooth.frame$matcho), as.integer(smooth.frame$neffec),
          as.integer(which),
-   smomat = as.double(smomat), etamat = double(M * n_lm),
+   smomat = as.double(smomat), etamat = double(M * n.lm),
    beta = double(pbig),
        varmat = if (se.fit) as.double(smomat) else double(1),
-     qr = as.double(X_vlm_save), qraux = double(pbig),
+     qr = as.double(X.vlm.save), qraux = double(pbig),
      qpivot = as.integer(1:pbig),
          as.double(Umat),
          as.double(unlist(Blist)),
      as.integer(ncbvec), as.integer(smooth.frame$smap),
       trivc = as.integer(trivc),
 
-
-
-
-
-
          levmat = double(sum(smooth.frame$neffec * ncbvec)),  # 20130427;
 
 
@@ -190,61 +180,46 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
      bindex = as.integer(smooth.frame$bindex),
      lindex = as.integer(smooth.frame$lindex),
          nknots = as.integer(smooth.frame$nknots),
-         kindex = as.integer(smooth.frame$kindex)) # End of dotC
+         kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM")  # End of dotC
+
 
-  if (exists("flush.console")) flush.console()
+  if (exists("flush.console"))
+    flush.console()
  
 
-  if (smooth.frame$first) {
-  }
 
 
-  dim(fit$qr) <- dim(X_vlm_save)
-  dimnames(fit$qr) <- dimnames(X_vlm_save)
+  dim(fit$qr) <- dim(X.vlm.save)
+  dimnames(fit$qr) <- dimnames(X.vlm.save)
   dim(fit$y) <- dim(zedd)
   dimnames(fit$y) <- dimnames(zedd)
   dim(fit$smomat) <- dim(smomat)
-  dimnames(fit$smomat) <- dimnames(smomat)   # Needed for vgam.nlchisq
+  dimnames(fit$smomat) <- dimnames(smomat)  # Needed for vgam.nlchisq
   if (se.fit) {
     dim(fit$varmat) <- dim(smomat)
     dimnames(fit$varmat) <- dimnames(smomat)
   }
 
-
-
-
-
-
   if (fit$npetc[14] != 0 ||
       fit$npetc[17] != 0) {
     stop("something went wrong in the C function 'vbfa'")
   }
 
-  fit$etamat <- if (M > 1) matrix(fit$etamat, n_lm, M, byrow = TRUE) else
-                           c(fit$etamat)  # May no longer be a matrix
+  fit$etamat <- if (M > 1)
+                matrix(fit$etamat, n.lm, M, byrow = TRUE) else
+                c(fit$etamat)  # May no longer be a matrix
   nits <- fit$npetc[5]
   qrank <- fit$npetc[7]
 
 
 
 
-
-
-
-
-
-
-
-
-
   if (smooth.frame$first) {
     smooth.frame$try.sparv <- fit$sparv
   }
 
-
-
-  if ((nits == bf.maxit) & bf.maxit > 1) {
-    warning("'s.vam' convergence not obtained in ", bf.maxit,
+  if ((nits == bf.maxit) && bf.maxit > 1) {
+    warning("'s.vam()' convergence not obtained in ", bf.maxit,
             " iterations")
   }
 
@@ -256,12 +231,12 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
   Bspline <- vector("list", length(nwhich))
   names(Bspline) <- nwhich
   for (ii in 1:length(nwhich)) {
-    b_coefs <- fit$bcoeff[(smooth.frame$bindex[ii]):
-                          (smooth.frame$bindex[ii+1]-1)]
-    b_coefs <- matrix(b_coefs, ncol = ncolBlist[nwhich[ii]])
+    b.coefs <- fit$bcoeff[(smooth.frame$bindex[ii]):
+                          (smooth.frame$bindex[ii + 1] - 1)]
+    b.coefs <- matrix(b.coefs, ncol = ncolBlist[nwhich[ii]])
     Bspline[[ii]] <-
         new("vsmooth.spline.fit",
-            "Bcoefficients" = b_coefs,
+            "Bcoefficients" = b.coefs,
             "xmax"          = smooth.frame$xmax[ii],
             "xmin"          = smooth.frame$xmin[ii],
             "knots"         = as.vector(smooth.frame$knots[[ii]]))
@@ -283,13 +258,12 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
 
 
 
-  nl.df <- fit$dfvec - 1  # Used to be -1; Decrement/increment ?
-
+  nl.df <- fit$dfvec - 1  # Decrement/increment ?
 
   retlist <- list(
     Bspline = Bspline,
     coefficients = fit$beta,
-    df.residual = n_lm * M - qrank - sum(nl.df),  # Decrement/increment ?
+    df.residual = n.lm * M - qrank - sum(nl.df),  # Decrement/increment ?
     fitted.values = fit$etamat,
     Leverages = Leverages,
     nl.df = nl.df,
@@ -298,13 +272,13 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     R = R, 
     rank = qrank, 
     residuals = fit$y - fit$etamat,
-    rss = fit$doubvec[2],
+    res.ss = fit$doubvec[2],
     smomat = fit$smomat,
     sparv = fit$sparv,
     s.xargument = unlist(smooth.frame$s.xargument))
 
 
-  names(retlist$coefficients) <- smooth.frame$xnrow_X_vlm
+  names(retlist$coefficients) <- smooth.frame$xnrow.X.vlm
   names(retlist$sparv) <-
   names(retlist$nl.df) <- smooth.frame$ndfspar
 
diff --git a/R/smart.R b/R/smart.R
index c33e385..dd802f3 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -26,8 +26,8 @@ smartpredenv <- new.env()
 
 smart.mode.is <- function(mode.arg = NULL) {
   if (!length(mode.arg)) {
-    if (exists(".smart.prediction", envir = VGAM:::smartpredenv)) {
-      get(".smart.prediction.mode", envir = VGAM:::smartpredenv)
+    if (exists(".smart.prediction", envir = smartpredenv)) {
+      get(".smart.prediction.mode", envir = smartpredenv)
     } else {
       "neutral"
     }
@@ -37,8 +37,8 @@ smart.mode.is <- function(mode.arg = NULL) {
         mode.arg != "write")
       stop("argument \"mode.arg\" must be one of",
            " \"neutral\", \"read\" or \"write\"")
-    if (exists(".smart.prediction", envir = VGAM:::smartpredenv)) {
-      get(".smart.prediction.mode", envir = VGAM:::smartpredenv) ==
+    if (exists(".smart.prediction", envir = smartpredenv)) {
+      get(".smart.prediction.mode", envir = smartpredenv) ==
         mode.arg
     } else {
       mode.arg == "neutral"
@@ -58,35 +58,35 @@ setup.smart <- function(mode.arg, smart.prediction = NULL,
   if (length(actual)) {
 
 
-    assign(".smart.prediction", actual, envir = VGAM:::smartpredenv)
-    assign(".smart.prediction.counter", 0, envir = VGAM:::smartpredenv)
-    assign(".smart.prediction.mode", mode.arg, envir = VGAM:::smartpredenv)
-    assign(".max.smart", max.smart, envir = VGAM:::smartpredenv)
-    assign(".smart.prediction", actual, envir = VGAM:::smartpredenv)
+    assign(".smart.prediction", actual, envir = smartpredenv)
+    assign(".smart.prediction.counter", 0, envir = smartpredenv)
+    assign(".smart.prediction.mode", mode.arg, envir = smartpredenv)
+    assign(".max.smart", max.smart, envir = smartpredenv)
+    assign(".smart.prediction", actual, envir = smartpredenv)
   }
 }
 
 wrapup.smart <- function() {
-  if (exists(".smart.prediction", envir = VGAM:::smartpredenv))
-    rm(".smart.prediction", envir = VGAM:::smartpredenv)
-  if (exists(".smart.prediction.counter", envir = VGAM:::smartpredenv))
-    rm(".smart.prediction.counter", envir = VGAM:::smartpredenv)
-  if (exists(".smart.prediction.mode", envir = VGAM:::smartpredenv))
-    rm(".smart.prediction.mode", envir = VGAM:::smartpredenv)
-  if (exists(".max.smart", envir = VGAM:::smartpredenv))
-    rm(".max.smart", envir = VGAM:::smartpredenv)
+  if (exists(".smart.prediction", envir = smartpredenv))
+    rm(".smart.prediction", envir = smartpredenv)
+  if (exists(".smart.prediction.counter", envir = smartpredenv))
+    rm(".smart.prediction.counter", envir = smartpredenv)
+  if (exists(".smart.prediction.mode", envir = smartpredenv))
+    rm(".smart.prediction.mode", envir = smartpredenv)
+  if (exists(".max.smart", envir = smartpredenv))
+    rm(".max.smart", envir = smartpredenv)
 }
 
 
 get.smart.prediction <- function() {
 
   smart.prediction.counter <- get(".smart.prediction.counter",
-                                  envir = VGAM:::smartpredenv)
-  max.smart <- get(".max.smart", envir = VGAM:::smartpredenv)
+                                  envir = smartpredenv)
+  max.smart <- get(".max.smart", envir = smartpredenv)
 
   if (smart.prediction.counter > 0) {
     # Save this on the object for smart prediction later
-    smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+    smart.prediction <- get(".smart.prediction", envir = smartpredenv)
     if (max.smart >= (smart.prediction.counter + 1))
       for(i in max.smart:(smart.prediction.counter + 1))
         smart.prediction[[i]] <- NULL
@@ -100,34 +100,34 @@ put.smart <- function(smart) {
 
 
 
-  max.smart <- get(".max.smart", envir = VGAM:::smartpredenv)
+  max.smart <- get(".max.smart", envir = smartpredenv)
   smart.prediction.counter <- get(".smart.prediction.counter",
-                                  envir = VGAM:::smartpredenv)
-  smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+                                  envir = smartpredenv)
+  smart.prediction <- get(".smart.prediction", envir = smartpredenv)
   smart.prediction.counter <- smart.prediction.counter + 1
 
   if (smart.prediction.counter > max.smart) {
     # if list is too small, make it larger
     max.smart <- max.smart + (inc.smart <- 10) # can change inc.smart
     smart.prediction <- c(smart.prediction, vector("list", inc.smart))
-    assign(".max.smart", max.smart, envir = VGAM:::smartpredenv)
+    assign(".max.smart", max.smart, envir = smartpredenv)
   }
 
   smart.prediction[[smart.prediction.counter]] <- smart
-  assign(".smart.prediction", smart.prediction, envir = VGAM:::smartpredenv)
+  assign(".smart.prediction", smart.prediction, envir = smartpredenv)
   assign(".smart.prediction.counter", smart.prediction.counter,
-         envir = VGAM:::smartpredenv)
+         envir = smartpredenv)
 }
 
 
 get.smart <- function() {
   # Returns one list component of information
-  smart.prediction <- get(".smart.prediction", envir = VGAM:::smartpredenv)
+  smart.prediction <- get(".smart.prediction", envir = smartpredenv)
   smart.prediction.counter <- get(".smart.prediction.counter",
-                                  envir = VGAM:::smartpredenv)
+                                  envir = smartpredenv)
   smart.prediction.counter <- smart.prediction.counter + 1
   assign(".smart.prediction.counter", smart.prediction.counter,
-         envir = VGAM:::smartpredenv)
+         envir = smartpredenv)
   smart <- smart.prediction[[smart.prediction.counter]]
   smart
 }
@@ -136,13 +136,13 @@ smart.expression <- expression({
 
 
   smart  <- get.smart()
-  assign(".smart.prediction.mode", "neutral", envir = VGAM:::smartpredenv)
+  assign(".smart.prediction.mode", "neutral", envir = smartpredenv)
 
   .smart.match.call <- as.character(smart$match.call)
   smart$match.call <- NULL  # Kill it off for the do.call 
 
   ans.smart <- do.call(.smart.match.call[1], c(list(x=x), smart))
-  assign(".smart.prediction.mode", "read", envir = VGAM:::smartpredenv)
+  assign(".smart.prediction.mode", "read", envir = smartpredenv)
 
   ans.smart
 })
@@ -176,14 +176,12 @@ is.smart <- function(object) {
 
 
 
-library(splines) 
 
 
 
 bs <-
 function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE, 
-    Boundary.knots = range(x)) 
-{
+    Boundary.knots = range(x)) {
     x <- x  # Evaluate x
     if (smart.mode.is("read")) {
         return(eval(smart.expression))
@@ -276,8 +274,8 @@ attr(bs, "smart") <- TRUE
 
 
 ns <-
-function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(x)) 
-{
+  function (x, df = NULL, knots = NULL, intercept = FALSE,
+            Boundary.knots = range(x)) {
     x <- x  # Evaluate x
     if (smart.mode.is("read")) {
         return(eval(smart.expression))
@@ -367,8 +365,7 @@ attr(ns, "smart") <- TRUE
 
 
 poly <-
-function (x, ..., degree = 1, coefs = NULL, raw = FALSE) 
-{
+  function (x, ..., degree = 1, coefs = NULL, raw = FALSE) {
     x <- x  # Evaluate x
     if (!raw && smart.mode.is("read")) {
         smart <- get.smart()
@@ -468,8 +465,7 @@ attr(poly, "smart") <- TRUE
 
 
 scale.default <-
-function (x, center = TRUE, scale = TRUE) 
-{
+  function (x, center = TRUE, scale = TRUE) {
     x <- as.matrix(x)
 
     if (smart.mode.is("read")) {
@@ -518,7 +514,7 @@ attr(scale, "smart") <- TRUE
 
 
 
-"my1" <- function(x, minx=min(x)) {
+"my1" <- function(x, minx = min(x)) {
 
     x <- x   # Evaluate x
 
@@ -536,7 +532,7 @@ attr(my1, "smart") <- TRUE
 
 
 
-"my2" <- function(x, minx=min(x)) {
+"my2" <- function(x, minx = min(x)) {
 
     x <- x   # Evaluate x
 
@@ -554,7 +550,7 @@ attr(my2, "smart") <- TRUE
 
 
 
-"stdze1" <- function(x, center=TRUE, scale=TRUE) {
+"stdze1" <- function(x, center = TRUE, scale = TRUE) {
 
     x <- x  # Evaluate x
 
@@ -579,7 +575,9 @@ attr(my2, "smart") <- TRUE
 }
 attr(stdze1, "smart") <- TRUE
 
-"stdze2" <- function(x, center=TRUE, scale=TRUE) {
+
+
+"stdze2" <- function(x, center = TRUE, scale = TRUE) {
 
     x <- x  # Evaluate x
 
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 9a761e4..74f7684 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -7,11 +7,14 @@
 
 
 summaryvgam <- function(object, dispersion = NULL,
-                        digits = options()$digits-2) {
+                        digits = options()$digits-2,
+                        presid = TRUE) {
+
+
 
   if (length(dispersion) && dispersion == 0 &&
-   length(object at family@summary.dispersion) &&
-   !object at family@summary.dispersion) {
+      length(object at family@summary.dispersion) &&
+      !object at family@summary.dispersion) {
     stop("cannot use the general VGLM formula (based on a residual ",
          "sum of squares) for computing the dispersion parameter")
   }
@@ -19,10 +22,10 @@ summaryvgam <- function(object, dispersion = NULL,
   newobject <- object 
   class(newobject) <- "vglm"
   stuff <- summaryvglm(newobject, dispersion = dispersion)
-  rdf <- stuff at df[2] <- object at df.residual # NA 
+  rdf <- stuff at df[2] <- object at df.residual  # NA 
 
   M <- object at misc$M
-  nrow_X_vlm <- object at misc$nrow_X_vlm
+  nrow.X.vlm <- object at misc$nrow.X.vlm
   rank <- if (is.null(object at qr$rank)) length(object at coefficients) else
           object at qr$rank
 
@@ -31,6 +34,7 @@ summaryvgam <- function(object, dispersion = NULL,
 
 
 
+  # Overwrite some of the stuff with the correct stuff
 
 
   useF <- object at misc$useF
@@ -92,9 +96,11 @@ summaryvgam <- function(object, dispersion = NULL,
   if (is.numeric(stuff at dispersion))
     slot(answer, "dispersion") <- stuff at dispersion
 
-  presid <- residuals(object, type = "pearson")
-  if (length(presid))
-    answer at pearson.resid <- as.matrix(presid)
+  if (presid) {
+    Presid <- residuals(object, type = "pearson")
+    if (length(Presid))
+      answer at pearson.resid <- as.matrix(Presid)
+  }
 
   slot(answer, "anova") <- aod 
 
@@ -113,19 +119,20 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
   cat("\nCall:\n")
   dput(x at call)
 
-  presid <- x at pearson.resid
+  Presid <- x at pearson.resid
   rdf <- x at df[2]
   if (FALSE &&
-     !is.null(presid) && all(!is.na(presid))) {
-    cat("\nPearson Residuals:\n")
+     !is.null(Presid) && all(!is.na(Presid))) {
     if (rdf/M > 5) {
-      rq <-  apply(as.matrix(presid), 2, quantile) # 5 x M
+      rq <-  apply(as.matrix(Presid), 2, quantile)  # 5 x M
       dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
                            x at misc$predictors.names)
+      cat("\nPearson residuals:\n")
       print(t(rq), digits = digits)
     } else
     if (rdf > 0) {
-      print(presid, digits = digits)
+      cat("\nPearson residuals:\n")
+      print(Presid, digits = digits)
     }
   }
 
@@ -169,7 +176,7 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
 
   if (length(x at criterion)) {
     ncrit <- names(x at criterion)
-    for(ii in ncrit)
+    for (ii in ncrit)
       if (ii != "loglikelihood" && ii != "deviance")
         cat(paste(ii, ":", sep = ""), format(x at criterion[[ii]]), "\n")
   }
@@ -207,12 +214,12 @@ show.vanova <- function(x, digits = .Options$digits, ...) {
   if (!is.null(heading))
     cat(heading, sep = "\n")
   attr(x, "heading") <- NULL
-  for(i in 1:length(x)) {
-    xx <- x[[i]]
+  for (ii in 1:length(x)) {
+    xx <- x[[ii]]
     xna <- is.na(xx)
     xx <- format(zapsmall(xx, digits))
     xx[xna] <- ""
-    x[[i]] <- xx
+    x[[ii]] <- xx
   }
   print.data.frame(as.data.frame(x, row.names = rrr))
   invisible(x)
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 2981e04..fc8ce01 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -21,7 +21,10 @@ yformat <- function(x, digits = options()$digits) {
 
 
 summaryvglm <- function(object, correlation = FALSE,
-                        dispersion = NULL, digits = NULL) {
+                        dispersion = NULL, digits = NULL,
+                        presid = TRUE) {
+
+
 
 
 
@@ -31,8 +34,8 @@ summaryvglm <- function(object, correlation = FALSE,
       dispersion == 0 && 
       length(object at family@summary.dispersion) && 
       !object at family@summary.dispersion) {
-      stop("cannot use the general VGLM formula (based on a residual ",
-           "sum of squares) for computing the dispersion parameter")
+    stop("cannot use the general VGLM formula (based on a residual ",
+         "sum of squares) for computing the dispersion parameter")
   }
 
   stuff <- summaryvlm(as(object, "vlm"),
@@ -50,9 +53,11 @@ summaryvglm <- function(object, correlation = FALSE,
       df = stuff at df,
       sigma = stuff at sigma)
 
-  presid <- resid(object, type = "pearson")
-  if (length(presid))
-    answer at pearson.resid <- as.matrix(presid)
+  if (presid) {
+    Presid <- resid(object, type = "pearson")
+    if (length(Presid))
+      answer at pearson.resid <- as.matrix(Presid)
+  }
 
   slot(answer, "misc") <- stuff at misc  # Replace
 
@@ -92,14 +97,16 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
       length(Presid) &&
       all(!is.na(Presid)) &&
       is.finite(rdf)) {
-    cat("\nPearson Residuals:\n")
+
     if (rdf/M > 5) {
-      rq <-  apply(as.matrix(Presid), 2, quantile) # 5 x M
+      rq <- apply(as.matrix(Presid), 2, quantile)  # 5 x M
       dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
                            x at misc$predictors.names)
+      cat("\nPearson residuals:\n")
       print(t(rq), digits = digits)
     } else
     if (rdf > 0) {
+      cat("\nPearson residuals:\n")
       print(Presid, digits = digits)
     }
   }
@@ -126,7 +133,6 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
        x at misc$estimated.dispersion) {
       prose <- "(Estimated) "
     }  else {
-
       if (is.numeric(x at misc$default.dispersion) &&
           x at dispersion == x at misc$default.dispersion)
         prose <- "(Default) "
@@ -160,7 +166,7 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
 
   if (length(x at criterion)) {
     ncrit <- names(x at criterion)
-    for(ii in ncrit)
+    for (ii in ncrit)
       if (ii != "loglikelihood" && ii != "deviance")
         cat(paste(ii, ":", sep = ""), yformat(x at criterion[[ii]], digits),
             "\n")
@@ -170,13 +176,13 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
   cat("\nNumber of iterations:", format(trunc(x at iter)), "\n")
 
   if (!is.null(correl)) {
-    ncol_X_vlm <- dim(correl)[2]
-    if (ncol_X_vlm > 1) {
+    ncol.X.vlm <- dim(correl)[2]
+    if (ncol.X.vlm > 1) {
       cat("\nCorrelation of Coefficients:\n")
       ll <- lower.tri(correl)
       correl[ll] <- format(round(correl[ll], digits))
       correl[!ll] <- ""
-      print(correl[-1,  -ncol_X_vlm, drop = FALSE], quote = FALSE,
+      print(correl[-1,  -ncol.X.vlm, drop = FALSE], quote = FALSE,
             digits = digits)
     }
   }
@@ -205,6 +211,7 @@ setMethod("show", "summary.vglm",
 
 
 
+
 vcovdefault <- function(object, ...) {
   if (is.null(object at vcov))
     stop("no default")
@@ -223,7 +230,8 @@ function(object, dispersion = NULL, untransform = FALSE) {
   so <- summaryvlm(object, correlation = FALSE,
                    dispersion = dispersion)
   d <- if (any(slotNames(so) == "dispersion") && 
-           is.Numeric(so at dispersion)) so at dispersion else 1
+           is.Numeric(so at dispersion))
+       so at dispersion else 1
   answer <- d * so at cov.unscaled
 
   if (is.logical(OKRC <- object at misc$RegCondOK) && !OKRC)
@@ -257,9 +265,9 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
 
   tvector <- numeric(M)
-  etavector <- predict(object)[1, ] # Contains transformed parameters
-  LINK <- object at misc$link # link.names # This should be a character vector.
-  EARG <- object at misc$earg # This could be a NULL
+  etavector <- predict(object)[1, ]  # Contains transformed parameters
+  LINK <- object at misc$link  # link.names # This should be a character vector.
+  EARG <- object at misc$earg  # This could be a NULL
   if (is.null(EARG))
     EARG <- list(theta = NULL)
   if (!is.list(EARG))
@@ -295,7 +303,7 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
   learg <- length(EARG)
   for (ii in 1:M) {
-    TTheta <- etavector[ii] # Transformed theta
+    TTheta <- etavector[ii]  # Transformed theta
 
     use.earg      <-
       if (llink == 1) EARG[[1]] else EARG[[ii]]
@@ -316,7 +324,7 @@ function(object, dispersion = NULL, untransform = FALSE) {
       stop("link functions handled in the new way now")
 
     }
-  } # of for(ii in 1:M)
+  }  # of for (ii in 1:M)
 
   tvector <- abs(tvector)
   answer <- (cbind(tvector) %*% rbind(tvector)) * answer
@@ -344,5 +352,3 @@ setMethod("vcov", "vglm",
 
 
 
-
-
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 9055726..542f459 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -12,7 +12,8 @@
 
 summaryvlm <-
   function(object, correlation = FALSE, dispersion = NULL,
-           Colnames = c("Estimate", "Std. Error", "z value")) {
+           Colnames = c("Estimate", "Std. Error", "z value"),
+           presid = TRUE) {
                          
 
 
@@ -23,12 +24,15 @@ summaryvlm <-
 
   M <- object at misc$M
   n <- object at misc$n
-  nrow_X_vlm <- object at misc$nrow_X_vlm 
-  ncol_X_vlm <- object at misc$ncol_X_vlm   # May be NULL for CQO objects
+  nrow.X.vlm <- object at misc$nrow.X.vlm 
+  ncol.X.vlm <- object at misc$ncol.X.vlm  # May be NULL for CQO objects
 
   coef <- object at coefficients
   cnames <- names(coef)
-  presid <- residualsvlm(object, type = "pearson") # NULL if pooled.weight
+
+  if (presid) {
+    Presid <- residualsvlm(object, type = "pearson")  # NULL if pooled.weight
+  }
 
   if (any(is.na(coef))) {
     warning(paste("Some NAs in the coefficients---no summary",
@@ -38,48 +42,49 @@ summaryvlm <-
   rdf <- object at df.residual   
 
   if (!length(dispersion)) {
-      if (is.numeric(object at misc$dispersion)) {
-          dispersion <- object at misc$dispersion
-          if (all(dispersion == 0))
-              stop("dispersion shouldn't be zero here!")
-      } else {
-          dispersion <- 1
-          object at misc$estimated.dispersion <- FALSE
-      }
+    if (is.numeric(object at misc$dispersion)) {
+      dispersion <- object at misc$dispersion
+      if (all(dispersion == 0))
+        stop("dispersion shouldn't be zero here!")
+    } else {
+      dispersion <- 1
+      object at misc$estimated.dispersion <- FALSE
+    }
   } else if (dispersion == 0) {
-      dispersion <- if (!length(object at rss)) {
-          stop("object at rss is empty")
+      dispersion <-
+        if (!length(object at res.ss)) {
+          stop("object at res.ss is empty")
       } else {
-          object at rss / object at df.residual
+        object at res.ss / object at df.residual
       }
       object at misc$estimated.dispersion <- TRUE
   } else {
-      if (is.numeric(object at misc$dispersion) &&
-         object at misc$dispersion != dispersion)
-          warning("overriding the value of object at misc$dispersion")
-      object at misc$estimated.dispersion <- FALSE
+    if (is.numeric(object at misc$dispersion) &&
+        object at misc$dispersion != dispersion)
+      warning("overriding the value of object at misc$dispersion")
+    object at misc$estimated.dispersion <- FALSE
   }
-  sigma <- dispersion^0.5     # Can be a vector 
+  sigma <- dispersion^0.5  # Can be a vector 
 
-  if (is.Numeric(ncol_X_vlm)) {
+  if (is.Numeric(ncol.X.vlm)) {
     R <- object at R
 
-    if (ncol_X_vlm < max(dim(R)))
+    if (ncol.X.vlm < max(dim(R)))
       stop("R is rank deficient")
 
-    rinv <- diag(ncol_X_vlm)
+    rinv <- diag(ncol.X.vlm)
     rinv <- backsolve(R, rinv)
-    rowlen <- drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
+    rowlen <- drop(((rinv^2) %*% rep(1, ncol.X.vlm))^0.5)
     covun <- rinv %*% t(rinv)
     dimnames(covun) <- list(cnames, cnames)
   }
   coef <- matrix(rep(coef, 3), ncol = 3)
   dimnames(coef) <- list(cnames, Colnames)
-  if (length(sigma) == 1 && is.Numeric(ncol_X_vlm)) {
-    coef[, 2] <- rowlen %o% sigma      # Fails here when sigma is a vector 
+  if (length(sigma) == 1 && is.Numeric(ncol.X.vlm)) {
+    coef[, 2] <- rowlen %o% sigma  # Fails here when sigma is a vector 
     coef[, 3] <- coef[, 1] / coef[, 2]
   } else {
-    coef[,1] <- coef[,2] <- coef[,3] <- NA
+    coef[, 1] <- coef[, 2] <- coef[, 3] <- NA
   }
   if (correlation) {
     correl <- covun * outer(1 / rowlen, 1 / rowlen)
@@ -96,15 +101,15 @@ summaryvlm <-
       object,
       coef3 = coef, 
       correlation = correl,
-      df = c(ncol_X_vlm, rdf),
+      df = c(ncol.X.vlm, rdf),
       sigma = sigma)
 
-  if (is.Numeric(ncol_X_vlm))
+  if (is.Numeric(ncol.X.vlm))
     answer at cov.unscaled <- covun
   answer at dispersion <- dispersion  # Overwrite this 
 
-  if (length(presid))
-    answer at pearson.resid <- as.matrix(presid)
+  if (length(Presid))
+    answer at pearson.resid <- as.matrix(Presid)
 
 
   answer
@@ -131,18 +136,19 @@ show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
   cat("\nCall:\n")
   dput(x at call)
 
-  presid <- x at pearson.resid
+  Presid <- x at pearson.resid
   rdf <- x at df[2]
-  if (length(presid) && all(!is.na(presid))) {
-    cat("\nPearson residuals:\n")
+  if (length(Presid) && all(!is.na(Presid))) {
     if (rdf/M > 5) {
-      rq <-  apply(as.matrix(presid), 2, quantile) # 5 x M
+      rq <-  apply(as.matrix(Presid), 2, quantile)  # 5 x M
       dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
                            x at misc$predictors.names)
+      cat("\nPearson residuals:\n")
       print(t(rq), digits = digits)
     } else
     if (rdf > 0) {
-      print(presid, digits = digits)
+      cat("\nPearson residuals:\n")
+      print(Presid, digits = digits)
     }
   }
 
@@ -166,24 +172,23 @@ show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
   }
 
 
-  if (!is.null(x at rss))
-    cat("\nResidual Sum of Squares:", format(round(x at rss, digits)),
+  if (!is.null(x at res.ss))
+    cat("\nResidual Sum of Squares:", format(round(x at res.ss, digits)),
         "on", round(rdf, digits), "degrees of freedom\n")
 
 
   if (length(correl)) {
-    ncol_X_vlm <- dim(correl)[2]
-    if (ncol_X_vlm > 1) {
+    ncol.X.vlm <- dim(correl)[2]
+    if (ncol.X.vlm > 1) {
       cat("\nCorrelation of Coefficients:\n")
       ll <- lower.tri(correl)
       correl[ll] <- format(round(correl[ll], digits))
       correl[!ll] <- ""
-      print(correl[-1, -ncol_X_vlm, drop = FALSE],
+      print(correl[-1, -ncol.X.vlm, drop = FALSE],
             quote = FALSE, digits = digits)
     }
   }
 
-
   invisible(NULL)
 }
 
diff --git a/R/uqo.R b/R/uqo.R
deleted file mode 100644
index 05e3ea5..0000000
--- a/R/uqo.R
+++ /dev/null
@@ -1,916 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-
-
-
-uqo.control = function(Rank = 1,
-          Bestof = if (length(lvstart) && !jitter.sitescores) 1 else 10,
-          CA1 = FALSE,
-          Crow1positive = TRUE,
-          epsilon = 1.0e-07,
-          EqualTolerances = ITolerances,
-          Etamat.colmax = 10,
-          GradientFunction = TRUE,
-          Hstep = 0.001,
-          isdlv = rep(c(2, 1, rep(0.5, len = Rank)), len = Rank),
-          ITolerances = FALSE,
-          lvstart = NULL,
-          jitter.sitescores = FALSE,
-          maxitl = 40,
-          Maxit.optim = 250,
-          MUXfactor = rep(3, length=Rank),
-          optim.maxit = 20,
-          nRmax = 250,
-          SD.sitescores = 1.0,
-          SmallNo = 5.0e-13,
-          trace = TRUE,
-          Use.Init.Poisson.QO = TRUE,
-          ...)
-{
-
-    Kinit = 0.001
-    if (!is.Numeric(MUXfactor, positive = TRUE))
-      stop("bad input for \"MUXfactor\"")
-    if (any(MUXfactor < 1 |
-            MUXfactor > 10))
-      stop("MUXfactor values must lie between 1 and 10")
-    if (!is.Numeric(isdlv, positive = TRUE))
-      stop("bad input for \"isdlv\"")
-    if (any(isdlv < 0.2 | isdlv > 10))
-      stop("isdlv values must lie between 0.2 and 10")
-    if (length(isdlv) > 1 && any(diff(isdlv) > 0))
-      stop("successive isdlv values must not increase")
-
-    if (!is.Numeric(Rank, allowable.length = 1,
-                    integer.valued = TRUE, positive = TRUE))
-      stop("Bad input for \"Rank\"")
-    if (!is.Numeric(Bestof, allowable.length = 1,
-                    integer.valued = TRUE, positive = TRUE))
-      stop("Bad input for \"Bestof\"")
-    if (!is.Numeric(Etamat.colmax, positive = TRUE,
-                    allowable.length = 1) ||
-        Etamat.colmax < Rank)
-      stop("bad input for \"Etamat.colmax\"")
-    if (!is.Numeric(maxitl, allowable.length = 1,
-                    integer.valued = TRUE, positive = TRUE))
-      stop("Bad input for \"maxitl\"")
-    if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
-                    positive = TRUE, allowable.length = 1))
-      stop("Bad input for \"Maxit.optim\"")
-    if (!is.Numeric(optim.maxit, allowable.length = 1,
-                    integer.valued = TRUE, positive = TRUE))
-      stop("Bad input for \"optim.maxit\"")
-    if (!is.Numeric(nRmax, allowable.length = 1,
-                    integer.valued = TRUE, positive = TRUE))
-      stop("Bad input for \"nRmax\"")
-    if (!is.Numeric(Hstep, allowable.length = 1, positive = TRUE))
-      stop("Bad input for \"Hstep\"")
-    if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE))
-      stop("Bad input for \"epsilon\"")
-    if (!is.Numeric(SmallNo, allowable.length = 1, positive = TRUE))
-      stop("Bad input for \"SmallNo\"")
-
-    if ((SmallNo < .Machine$double.eps) || (SmallNo > .0001))
-      stop("SmallNo is out of range") 
-
-    if (Use.Init.Poisson.QO && CA1)
-      stop("cannot have both 'Use.Init.Poisson.QO = TRUE' and 'CA1 = TRUE'")
-
-    ans <- list(
-           Bestof = Bestof,
-           CA1 = CA1,
-           ConstrainedQO = FALSE, # A constant, not a control parameter
-           Corner = FALSE, # Needed for valt.1iter()
-           Crow1positive=as.logical(rep(Crow1positive, len = Rank)),
-           epsilon = epsilon,
-           EqualTolerances = as.logical(EqualTolerances)[1],
-           Etamat.colmax = Etamat.colmax,
-           FastAlgorithm = TRUE, # A constant, not a control parameter
-           GradientFunction = GradientFunction,
-           Hstep = Hstep,
-           isdlv = rep(isdlv, len = Rank),
-           ITolerances = as.logical(ITolerances)[1],
-           lvstart = lvstart,
-           jitter.sitescores = as.logical(jitter.sitescores),
-           Kinit = Kinit,
-           maxitl= maxitl,
-           Maxit.optim = Maxit.optim,
-           MUXfactor = rep(MUXfactor, length=Rank),
-           nRmax = nRmax,
-           optim.maxit = optim.maxit,
-           OptimizeWrtC = FALSE,
-           Quadratic = TRUE,
-           Rank = Rank,
-           SD.sitescores = SD.sitescores,
-           SmallNo = SmallNo,
-           trace = as.logical(trace),
-           Use.Init.Poisson.QO=as.logical(Use.Init.Poisson.QO)[1])
-    ans
-}
-
-
-
-
-uqo  <- function(formula,
-                 family, data = list(), 
-                 weights = NULL, subset = NULL, na.action = na.fail,
-                 etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control = uqo.control(...), 
-                 offset = NULL, 
-                 method = "uqo.fit",
-                 model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
-                 constraints = NULL,
-                 extra = NULL, 
-                 qr.arg = FALSE, ...) {
-    dataname <- as.character(substitute(data))  # "list" if no data=
-    function.name <- "uqo"
-
-    ocall <- match.call()
-
-    mt <- terms(formula, data = data)
-    if (missing(data)) 
-        data <- environment(formula)
-
-    mf <- match.call(expand.dots = FALSE)
-    mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
-        mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
-    mf$coefstart <- mf$etastart <- mf$... <- NULL
-    mf$drop.unused.levels <- TRUE 
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame()) 
-    if (method == "model.frame")
-        return(mf)
-    na.act <- attr(mf, "na.action")
-
-    xvars <- as.character(attr(mt, "variables"))[-1]
-    if ((yvar <- attr(mt, "response")) > 0)
-        xvars <- xvars[-yvar]
-    xlev <- if (length(xvars) > 0) {
-        xlev <- lapply(mf[xvars], levels)
-        xlev[!sapply(xlev, is.null)]
-    }
-
-    y <- model.response(mf, "numeric") # model.extract(mf, "response")
-    x <- model.matrix(mt, mf, contrasts)
-    attr(x, "assign") <- attrassigndefault(x, mt)
-    offset <- model.offset(mf)
-    if (is.null(offset)) 
-        offset <- 0 # yyy ???
-    w <- model.weights(mf)
-    if (!length(w))
-        w <- rep(1, nrow(mf))
-    else if (ncol(as.matrix(w))== 1 && any(w < 0))
-        stop("negative weights not allowed")
-
-    if (is.character(family))
-        family <- get(family)
-    if (is.function(family))
-        family <- family()
-    if (!inherits(family, "vglmff")) {
-        stop("'family = ", family, "' is not a VGAM family function")
-    }
-
-    if (!is.null(family at first))
-        eval(family at first)
-
-    uqo.fitter <- get(method)
-    if (ncol(x) != 1 || dimnames(x)[[2]] != "(Intercept)")
-        stop("uqo()'s formula must have ~ 1 on the RHS") 
-
-    if (control$FastAlgorithm &&
-       length(as.list(family at deviance)) <= 1)
-        stop("The fast algorithm requires the family ",
-             "function to have a deviance slot")
-    deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
-    for(tries in 1:control$Bestof) {
-         if (control$trace && (control$Bestof>1))
-         cat(paste("\n========================= Fitting model", tries,
-                     "=========================\n"))
-         it <- uqo.fitter(x = x, y = y, w = w, offset = offset,
-                   etastart = etastart, mustart = mustart, coefstart = coefstart,
-                   family = family, control = control,
-                   constraints = constraints, extra = extra,
-                   qr.arg = qr.arg, Terms = mt, function.name = function.name,
-                   ca1 = control$CA1 && tries == 1, ...)
-        deviance.Bestof[tries] = it$crit.list$deviance
-        if (tries == 1 ||
-            min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
-          fit <- it
-    }
-    fit$misc$deviance.Bestof <- deviance.Bestof
-    fit$misc$criterion <- "deviance"  # Needed for calibrate; 21/1/05
-
-    fit$misc$dataname <- dataname
-
-    answer <-
-    new("uqo",
-      "call"         = ocall,
-      "coefficients" = fit$coefficients,
-      "constraints"  = fit$constraints,
-      "control"      = fit$control,
-      "criterion"    = fit$crit.list,
-      "lv"           = fit$sitescores,
-      "family"       = fit$family,
-      "fitted.values"= as.matrix(fit$fitted.values),
-      "iter"         = fit$iter,
-      "misc"         = fit$misc,
-      "model"        = if (model) mf else data.frame(),
-      "na.action"    = if (length(na.act)) list(na.act) else list(),
-      "predictors"  = as.matrix(fit$predictors))
-
-    answer at control$min.criterion = TRUE # Needed for calibrate; 21/1/05
-
-    if (length(fit$weights))
-        slot(answer, "weights") <- as.matrix(fit$weights)
-    if (x.arg)
-        slot(answer, "x") <- x
-    if (y.arg)
-        slot(answer, "y") <- as.matrix(fit$y)
-    slot(answer, "extra") <- if (length(fit$extra)) {
-        if (is.list(fit$extra)) fit$extra else {
-            warning("'extra' is not a list, therefore ",
-                    "placing 'extra' into a list")
-            list(fit$extra)
-        }
-    } else list() # R-1.5.0
-    if (length(fit$prior.weights))
-        slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
-
-    answer
-}
-
-
-calluqof <- function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
-                    Control,
-                    n, M, maxMr5, othint, othdbl, bnumat,
-                    Hstep = NA, alldump) {
-    control <- Control
-    Rank <- control$Rank
-    itol <- othint[14]
-    inited <- if (is.R()) {
-        as.numeric(existsinVGAMenv("etamat", prefix = ".VGAM.UQO."))
-    } else 0
-    othint[5] <- inited  # Replacement
-    usethiseta <- if (inited == 1)
-        getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
-    usethisbeta <- double(othint[13])
-    pstar <- othint[3]
-    nstar <- if (nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
-    NOS <- ifelse(modelno == 3 || modelno==5, M/2, M)
-
-    sitescores <- matrix(sitescores, ncol=Rank)
-    sitescores <- scale(sitescores, center = TRUE, scale = FALSE)
-    if (itol) {
-        numat <- matrix(sitescores, ncol=Rank)
-        if (Rank > 1) {
-            evnu <- eigen(var(numat))
-            numat <- numat %*% evnu$vector
-        }
-
-
-
-        sdnumat <- apply(numat, 2, sd)
-        for(lookat in 1:Rank)
-            if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
-                muxer = control$isdlv[lookat] *
-                        control$MUXfactor[lookat] / sdnumat[lookat]
-                numat[,lookat] <- numat[,lookat] * muxer
-                if (control$trace) {
-                }
-            }
-    } else {
-        numat <- matrix(sitescores, ncol=Rank)
-        evnu <- eigen(var(numat))
-        temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-                evnu$vector %*% evnu$value^(-0.5)
-        numat <- numat %*% temp7
-    }
-
-    ans1 <- 
-    dotFortran(name = if (nice31) "cqo1f" else "cqo2f",
-       numat=as.double(numat), as.double(ymat), 
-             as.double(xmat),
-       as.double(wvec), etamat=as.double(usethiseta),
-           moff=double(if(itol) n else 1),
-           fv=double(NOS*n), z=double(n*M), wz=double(n*M),
-           U=double(M*n), bnumat=as.double(bnumat),
-       qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
-       as.integer(n), as.integer(M), NOS=as.integer(NOS),
-       as.integer(nstar), dimu=as.integer(M),
-           errcode=integer(1), othint=as.integer(othint),
-           rowind=integer(maxMr5), colind=integer(maxMr5),
-       deviance=double(1), beta=as.double(usethisbeta),
-       twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
-           othdbl=as.double(othdbl))
-
-       if (ans1$errcode == 0) {
-            assign2VGAMenv(c("etamat","numat"), ans1, prefix = ".VGAM.UQO.")
-            if (alldump) {
-                ans1$fv = matrix(ans1$fv,n,M,byrow = TRUE,dimnames=dimnames(ymat))
-                assign2VGAMenv(c("beta","fv"), ans1, prefix = ".VGAM.UQO.")
-                assign2VGAMenv(c("z","U"), ans1, prefix = ".VGAM.UQO.")
-            }
-       } else {
-           cat("warning in calluqof: error code  = ", ans1$errcode, "\n")
-           rmfromVGAMenv(c("etamat"), prefix = ".VGAM.UQO.")
-       }
-    ans1$deviance
-}
-
-callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
-                     Control,
-                     n, M, maxMr5, othint, othdbl, bnumat, Hstep, alldump) {
-    control = Control
-    itol = othint[14]
-    inited = if (is.R()) {
-        if (exists(".VGAM.UQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
-    } else 0 # 0 means fortran initializes the etamat
-    othint[5] = inited  # Replacement
-    usethiseta = if (inited == 1)
-        getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
-    usethisbeta = double(othint[13])
-    pstar = othint[3]
-    nstar = if (nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
-    NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
-    Rank = othint[1]
-
-    sitescores = matrix(sitescores, ncol=Rank)
-    sitescores = scale(sitescores, center = TRUE, scale = FALSE)
-    if (itol) {
-        numat = matrix(sitescores, ncol=Rank)
-        if (Rank > 1) {
-            evnu = eigen(var(numat))
-            numat = numat %*% evnu$vector
-        }
-
-        sdnumat = apply(numat, 2, sd)
-        for(lookat in 1:Rank)
-          if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
-                muxer = control$isdlv[lookat] *
-                        control$MUXfactor[lookat] / sdnumat[lookat]
-                numat[,lookat] = numat[,lookat] * muxer
-                if (control$trace) {
-                }
-          }
-    } else {
-        numat = matrix(sitescores, ncol=Rank)
-        evnu = eigen(var(numat))
-        temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
-                evnu$vector %*% evnu$value^(-0.5)
-        numat = numat %*% temp7
-    }
-
-
-
-    ans1 <- 
-    dotFortran(name = "duqof", numat=as.double(numat), as.double(ymat),
-           as.double(xmat),
-           as.double(wvec), etamat=as.double(usethiseta),
-           moff=double(if(itol) n else 1),
-           fv=double(NOS*n), z=double(n*M), wz=double(n*M),
-           U=double(M*n), bnumat=as.double(bnumat),
-       qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
-       as.integer(n), as.integer(M), NOS=as.integer(M), 
-       as.integer(nstar), dimu=as.integer(M),
-           errcode=integer(1), othint=as.integer(othint),
-           rowind=integer(maxMr5), colind=integer(maxMr5),
-       deviance=double(1), beta=as.double(usethisbeta),
-       twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
-           othdbl=as.double(othdbl),
-       onumat=as.double(numat),
-       deriv=double(n*Rank), hstep=as.double(Hstep),
-       betasave=usethisbeta)
-
-       if (ans1$errcode == 0) {
-           assign2VGAMenv(c("etamat"), ans1, prefix = ".VGAM.UQO.")
-       } else {
-           cat("warning in callduqof: error code  = ", ans1$errcode, "\n")
-           rmfromVGAMenv(c("etamat"), prefix = ".VGAM.UQO.")
-       }
-    ans1$deriv
-}
-
-
-
-
-uqo.fit <- function(x, y, w = rep(1, len = nrow(x)),
-    etastart = NULL, mustart = NULL, coefstart = NULL,
-    offset = 0, family, control = uqo.control(...),
-    qr.arg = FALSE, constraints = NULL, extra = NULL,
-    Terms=Terms, function.name = "uqo", ca1 = TRUE, ...)
-{
-    if (!all(offset == 0)) stop("cqo.fit() cannot handle offsets")
-    nonparametric <- FALSE
-    epsilon <- control$epsilon
-    optim.maxit <- control$optim.maxit
-    save.weight <- control$save.weight
-    trace <- control$trace
-    orig.stepsize <- control$stepsize
-
-
-    n <- dim(x)[1]
-
-
-    copy_X_vlm <- FALSE    # May be overwritten in @initialize
-    stepsize <- orig.stepsize
-    old.coeffs <- coefstart
-
-    intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
-    y.names <- predictors.names <- NULL    # May be overwritten in @initialize
-
-    n.save <- n 
-
-
-    Rank <- control$Rank
-    rrcontrol <- control  #
-
-    if (length(family at initialize))
-        eval(family at initialize)       # Initialize mu and M (and optionally w)
-    n <- n.save 
-
-
-    eval(rrr.init.expression)
-
-    if (length(etastart)) {
-        eta <- etastart
-        mu <- if (length(mustart)) mustart else family at linkinv(eta, extra)
-    } else {
-        if (length(mustart))
-            mu <- mustart
-        eta <- family at link(mu, extra)
-    }
-
-    M <- if (is.matrix(eta)) ncol(eta) else 1
-
-    if (is.character(rrcontrol$Dzero)) {
-        index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]]) 
-        if (any(is.na(index)))
-            stop("Dzero argument didn't fully match y-names")
-        if (length(index) == M)
-            stop("all linear predictors are linear in the latent variable(s)")
-        rrcontrol$Dzero = control$Dzero = index
-    }
-
-
-
-    if (length(family at constraints))
-        eval(family at constraints)
-
-
-    colx1.index = 1:ncol(x)
-    names(colx1.index) = names.colx1.index = dimnames(x)[[2]]
-
-    rrcontrol$colx1.index=control$colx1.index=colx1.index #Save it on the object
-    colx2.index = NULL
-    p1 = length(colx1.index); p2 = 0
-    rrcontrol$colx2.index=control$colx2.index=colx2.index #Save it on the object
-    rrcontrol$Quadratic = control$Quadratic = TRUE
-
-
-
-    sitescores <- if (length(rrcontrol$lvstart)) {
-        matrix(rrcontrol$lvstart, n, Rank)
-    } else {
-        if (rrcontrol$Use.Init.Poisson) {
-            .Init.Poisson.QO(ymat=as.matrix(y),
-                             X1=x, X2 = NULL,
-                             Rank=rrcontrol$Rank, trace=rrcontrol$trace,
-                             max.ncol.etamat = rrcontrol$Etamat.colmax,
-                             Crow1positive=rrcontrol$Crow1positive,
-                             isdlv=rrcontrol$isdlv,
-               constwt= any(family at vfamily[1] ==
-                            c("negbinomial","gamma2","gaussianff")),
-               takelog= any(family at vfamily[1] != c("gaussianff")))
-        } else if (ca1) {
-            if (Rank == 1) .VGAM.UQO.CA(y)[,1:Rank]  else {
-                temp = .VGAM.UQO.CA(y)[,1:Rank]
-                temp %*% solve(chol(var(temp)))
-            }
-        } else {
-            matrix((runif(n*Rank)-0.5)*rrcontrol$SD.sitescores,n,Rank)
-        }
-    }
-    if (rrcontrol$jitter.sitescores)
-        sitescores <- jitteruqo(sitescores)
-
-
-    Blist <- process.constraints(constraints, x, M)
-    ncolBlist <- unlist(lapply(Blist, ncol))
-    dimB <- sum(ncolBlist)
-
-
-    modelno = switch(family at vfamily[1], "poissonff"=2,
-              "binomialff" = 1, "quasipoissonff" = 0, "quasibinomialff" = 0,
-              "negbinomial" = 0,
-              "gamma2"=5,
-              0)  # stop("can't fit this model using fast algorithm")
-    if (!modelno)
-      stop("the family function does not work with uqo()")
-    if (modelno == 1)
-      modelno <- get("modelno", envir = VGAM:::VGAMenv)
-    rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.UQO.")
-
-
-    cqofastok <-
-      exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv) &&
-         get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
-
-
-    if (!cqofastok)
-      stop("can't fit this model using fast algorithm")
-
-    nice31 <- (!control$EqualTol ||
-                control$ITolerances) &&
-                control$Quadratic &&
-              all(trivial.constraints(Blist))
-
-
-    X_vlm_1save <- if (nice31) {
-        NULL
-    } else {
-        lm2vlm.model.matrix(x, Blist, xij=control$xij)
-    }
-
-    NOS = ifelse(modelno==3 || modelno==5, M/2, M)
-    p1star = if (nice31) p1*ifelse(modelno==3 || modelno==5,2,1) else ncol(X_vlm_1save)
-    p2star = if (nice31)
-      ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
-      (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
-
-    pstar = p1star + p2star
-    nstar = if (nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
-    maxMr = max(M, Rank)
-    maxMr5 = maxMr*(maxMr+1)/2
-    lenbeta = pstar * ifelse(nice31, NOS, 1)
-
-    othint = c(Rank, control$EqualTol, pstar, dimw = 1, inited=290, # other ints
-               modelno, maxitl=control$maxitl, actnits = 0, twice = 0, p1star,
-               p2star, nice31, lenbeta, control$ITolerances, control$trace,
-               p1, p2, control$imethod)
-    othdbl = c(small=control$SmallNo, fseps=control$epsilon,
-               .Machine$double.eps,
-               kinit=rep(control$Kinit, len = NOS),
-               shapeinit=rep(control$shapeinit, len = NOS))
-    bnumat = if (nice31) matrix(0,nstar,pstar) else
-             cbind(matrix(0,nstar,p2star), X_vlm_1save)
-
-    rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance", "fv",
-                         "cmatrix", "ocmatrix"), prefix = ".VGAM.UQO.")
-
-
-    for(iter in 1:optim.maxit) {
-        if (control$trace)
-            cat("\nIteration", iter, "\n")
-        conjgrad <- optim(par=sitescores, fn=calluqof, 
-                     gr = if (control$GradientFunction) callduqof else NULL,
-                     method = if (n*Rank>control$nRmax) "CG" else "BFGS",
-                     control=list(fnscale = 1, trace=as.integer(control$trace),
-                                  maxit=control$Maxit.optim),
-                     etamat=eta, ymat=y, wvec=w, modelno=modelno,
-                     Control=rrcontrol,
-                     nice31=nice31, xmat = x,
-                     n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
-                     bnumat=bnumat, Hstep=control$Hstep, alldump = FALSE)
-
-        sitescores = getfromVGAMenv("numat", prefix = ".VGAM.UQO.")
-        dim(sitescores) = c(n, Rank)
-        sitescores = scale(sitescores, center = TRUE, scale = FALSE)
-        sitescores = crow1C(sitescores, rrcontrol$Crow1positive)
-        dimnames(sitescores) = list(dimnames(y)[[1]],
-                                    if (Rank == 1) "lv" else
-                                    paste("lv", 1:Rank, sep = ""))
-
-        if (converged <- (conjgrad$convergence == 0)) break
-    }
-
-    if (!converged && optim.maxit>1)
-        warning("convergence not obtained")
-
-
-    temp9 = 
-    calluqof(sitescores, etamat=eta, ymat=y, wvec=w, modelno=modelno,
-             nice31=nice31, xmat = x,
-             Control=rrcontrol,
-             n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
-             bnumat=bnumat, Hstep=NA, alldump = TRUE)
-
-    coefs = getfromVGAMenv("beta", prefix = ".VGAM.UQO.")
-    VGAM.fv = getfromVGAMenv("fv", prefix = ".VGAM.UQO.")
-    etamat = getfromVGAMenv("etamat", prefix = ".VGAM.UQO.")
-    dim(etamat) = c(M,n)
-    etamat = t(etamat)
-    wresids = getfromVGAMenv("z", prefix = ".VGAM.UQO.") - etamat
-    dim(wresids) = c(n,M)
-
-
-    if (!intercept.only)
-        stop("can only handle intercept.only == TRUE currently")
-    if (nice31) {
-        coefs = c(t(matrix(coefs, ncol=M))) # Get into right order
-        coefs = matrix(coefs, nrow=M)
-        Amat = coefs[,1:Rank,drop = FALSE]
-        if (rrcontrol$IToleran) {
-            B1 = coefs[,-(1:Rank),drop = FALSE]
-            Dmat = matrix(0, M, Rank*(Rank+1)/2)
-            Dmat[,1:Rank] = -0.5
-        } else {
-            Dmat = coefs[,(Rank+1):(Rank + Rank*(Rank+1)/2),drop = FALSE]
-            B1 = coefs[,(1+(Rank + Rank*(Rank+1)/2)):ncol(coefs),drop = FALSE]
-        }
-    } else {
-        Amat = t(matrix(coefs[1:(Rank*M)], Rank, M))
-        cptr1 = (Rank*M)
-        Dmat = coefs[(cptr1+1):(cptr1+Rank*(Rank+1)/2)]
-        Dmat = matrix(Dmat, M, Rank*(Rank+1)/2, byrow = TRUE)
-        cptr1 = (Rank*M) + Rank*(Rank+1)/2
-        B1 = coefs[(cptr1+1):length(coefs)]
-    }
-
-    lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "") 
-    lp.names = predictors.names
-    if (!length(lp.names)) lp.names = NULL
-    extra$Amat = matrix(Amat, M, Rank, dimnames = list(lp.names, lv.names))
-    extra$B1   = matrix(B1, ncol=M, dimnames = 
-                        list(names.colx1.index, predictors.names))
-    extra$Dmat = matrix(Dmat, M, Rank*(Rank+1)/2)
-    extra$Cmat = NULL  # This is UQO!!
-
-    VGAM.etamat = getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") 
-    VGAM.etamat = matrix(VGAM.etamat, n, M, byrow = TRUE,
-                         dimnames = list(dimnames(y)[[1]], predictors.names))
-
-    coefficients = c(coefs) # Make a vector because of class "numeric"
-
-    rmfromVGAMenv(c("etamat", "beta", "fv"), prefix = ".VGAM.UQO.")
-
-    if (length(family at fini))
-        eval(family at fini)
-
-    misc <- list(function.name = function.name, 
-        intercept.only=intercept.only,
-        predictors.names = predictors.names,
-        modelno = modelno,
-        M = M, n = n,
-        nstar = nstar, nice31 = nice31,
-        p = ncol(x),
-        pstar = pstar, p1star = p1star, p2star = p2star,
-        ynames = dimnames(y)[[2]])
-
-    crit.list <- list(deviance=conjgrad$value)
-
-
-    structure(c(list(
-        coefficients = coefficients,
-        constraints = Blist,
-        sitescores = sitescores,
-        crit.list = crit.list,
-        control=control,
-        extra=extra,
-        family=family,
-        fitted.values=VGAM.fv,
-        iter=iter,
-        misc=misc,
-        predictors=VGAM.etamat,
-        prior.weights = w,
-        x=x,
-        y=y)),
-        vclass=family at vfamily)
-}
-
-
-
-
-show.uqo <- function(object)
-{
-  if (!is.null(cl <- object at call)) {
-    cat("Call:\n")
-    dput(cl)
-  }
-
-  cat("\n")
-  cat(object at misc$n, "sites and", object at misc$M, "responses/species\n")
-  cat("Rank", object at control$Rank)
-  cat(",", ifelse(object at control$EqualToler, "equal-tolerances", 
-      "unequal-tolerances"), "\n")
-
-  if (length(deviance(object)))
-      cat("\nResidual deviance:", format(deviance(object)), "\n")
-
-  invisible(object)
-  NULL
-}
-
-
-
-
-
-
-    setMethod("show",  "uqo", function(object)  show.uqo(object))
-
-
-
-
-
-
-deviance.uqo <- function(object, ...)
-    object at criterion$deviance
-
-setMethod("deviance", "uqo", function(object, ...)
-           deviance.uqo(object, ...))
-
-
-setMethod("coefficients", "uqo", function(object, ...)
-          Coef.qrrvglm(object, ...))
-setMethod("coef", "uqo", function(object, ...)
-          Coef.qrrvglm(object, ...))
-setMethod("Coef", "uqo", function(object, ...)
-          Coef.qrrvglm(object, ...))
-
-
-
-
-
-
-
-setMethod("show", "Coef.uqo",
-          function(object)
-            show.Coef.qrrvglm(object, C = FALSE))
-
-
-
-
-residualsuqo  <- function(object,
-              type = c("deviance", "pearson", "working", "response"),
-              matrix.arg= TRUE) {
-
-    if (mode(type) != "character" && mode(type) != "name")
-        type = as.character(substitute(type))
-    type = match.arg(type,
-                     c("deviance", "pearson", "working", "response"))[1]
-
-    switch(type,
-        response = object at y - fitted(object),
-        stop("this type of residual hasn't been implemented yet")
-    )
-}
-
-
-setMethod("resid", "uqo", function(object, ...) 
-          residualsuqo(object, ...))
-setMethod("residuals", "uqo", function(object, ...)
-          residualsuqo(object, ...))
-
-
-fitted.values.uqo  <- function(object, ...)
-    object at fitted.values
-
-
-setMethod("fitted", "uqo", function(object, ...) 
-          fitted.values.uqo(object, ...))
-setMethod("fitted.values", "uqo", function(object, ...)
-          fitted.values.uqo(object, ...))
-
-
-
-predict.uqo  <- function(object, newdata = NULL, ...) {
-    if (length(newdata) > 0)
-        stop("can't handle newdata argument yet")
-    object at predictors
-}
-
-setMethod("predict", "uqo", function(object, ...) 
-          predict.uqo(object, ...))
-
-
-setMethod("persp", "uqo", function(x, ...) 
-          perspqrrvglm(x, ...))
-
-setMethod("trplot", "uqo", function(object, ...) 
-          trplot.qrrvglm(object, check.ok = FALSE, ...))
-
-
-setMethod("plot", "uqo", function(x, y, ...) 
-         invisible(plotqrrvglm(object=x, ...)))
-
-
-setMethod("lvplot", "uqo", function(object, ...) 
-         invisible(lvplot.qrrvglm(object, C = FALSE, check.ok = FALSE, ...)))
-
-
-
-.VGAM.UQO.CA = function(Y) {
-    Y = as.matrix(Y) / sum(Y)
-    rowsum = c(Y %*% rep(1, len = ncol(Y)))
-    colsum = c(t(Y) %*% rep(1, len = nrow(Y)))
-    rc = outer(rowsum, colsum)
-    Ybar = (Y - rc) / sqrt(rc)
-    Q = qr(Ybar)
-    if (Q$rank > 0) {
-        temp = svd(Ybar)
-        colnames(temp$u) = paste("CA", 1:length(temp$d), sep = "")
-        rownames(temp$u) = dimnames(Y)[[1]]
-        sweep(as.matrix(temp$u[,1:Q$rank, drop = FALSE]),
-            1, 1/sqrt(rowsum), "*")
-    } else stop("Null rank")
-}
-
-
-
-if (FALSE) {
-    scores.uqo <- function (x, type = c("sites", "species"), ...) {
-        if (mode(type) != "character" && mode(type) != "name")
-            type = as.character(substitute(type))
-        type = match.arg(type, c("sites", "species"))[1]
-    
-        switch(type,
-            sites = if (any(slotNames(x) == "lv")) x at lv else Coef(x)@lv,
-            species = if (any(slotNames(x) == "Optimum")) x at Optimum else
-                      Coef(x)@Optimum
-        )
-    }
-
-    setMethod("scores", "uqo", function(x, ...) scores.uqo(x, ...))
-}
-
-
-jitteruqo = function(mat) {
-    mat * ifelse(runif(length(mat)) < 0.5, -1, 1)
-}
-
-
-setMethod("Opt", "uqo", function(object, ...) Opt.qrrvglm(object, ...))
-setMethod("Max", "uqo", function(object, ...) Max.qrrvglm(object, ...))
-setMethod("lv",  "uqo", function(object, ...) latvar.qrrvglm(object, ...))
-
-
-
-if (!isGeneric("calibrate"))
-    setGeneric("calibrate", function(object, ...) standardGeneric("calibrate"))
-setMethod("calibrate", "uqo", function(object, ...)
-          calibrate.qrrvglm(object, ...))
-
-
-
-summary.uqo = function(object, ...) {
-    answer = Coef(object, ...)
-    class(answer) = "summary.uqo"
-    answer at call = object at call
-    answer at misc = object at misc
-    answer
-}
-
-show.summary.uqo = function(x, ...) {
-
-    cat("\nCall:\n")
-    dput(x at call)
-
-    show.Coef.qrrvglm(x, ...)
-
-    cat("\nNumber of responses/species: ", x at NOS, "\n")
-
-    if (length(x at misc$dispersion) == 1) 
-    cat("\nDispersion parameter(s): ", x at misc$dispersion, "\n")
-    invisible(x)
-}
-
-setClass("summary.uqo", representation("Coef.uqo",
-         "misc" = "list",
-         "call" = "call"))
-
-setMethod("summary", "uqo", function(object, ...)
-    summary.uqo(object, ...))
-
-
-
-
-
-setMethod("show", "summary.uqo",
-          function(object)
-          show.summary.uqo(object))
-
-
-
-
-Tol.uqo = function(object, ...) {
-    Coef(object, ...)@Tolerance
-}
-
-Tol.Coef.uqo = function(object, ...) {
-    if (length(list(...))) warning("Too late! Ignoring the extra arguments")
-    Coef(object, ...)@Tolerance
-}
-
-if (!isGeneric("Tol"))
-    setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
-setMethod("Tol", "uqo", function(object, ...) Tol.uqo(object, ...))
-setMethod("Tol", "Coef.uqo", function(object, ...) Tol.Coef.uqo(object, ...))
-
-
-
-
-
diff --git a/R/vgam.R b/R/vgam.R
index debd2b0..de66464 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -9,21 +9,18 @@
 
 
 vgam <- function(formula, 
-                 family, 
-                 data = list(), 
-                 weights = NULL,
-                 subset = NULL,
-                 na.action=na.fail,
+                 family, data = list(), 
+                 weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control=vgam.control(...),
+                 control = vgam.control(...),
                  offset = NULL, 
                  method = "vgam.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
                  contrasts = NULL,
                  constraints = NULL,
-                 extra=list(),
-                 qr.arg = FALSE, smart = TRUE,
-                 ...) {
+                 extra = list(),
+                 form2 = NULL,  # Added 20130730
+                 qr.arg = FALSE, smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data= 
   function.name <- "vgam"
 
@@ -44,38 +41,82 @@ vgam <- function(formula,
   mf$drop.unused.levels <- TRUE
   mf[[1]] <- as.name("model.frame")
   mf <- eval(mf, parent.frame())
-  switch(method, model.frame = return(mf), vgam.fit = 1,
+  switch(method,
+         model.frame = return(mf),
+         vgam.fit = 1,
          stop("invalid 'method': ", method))
   mt <- attr(mf, "terms")
 
   xlev <- .getXlevels(mt, mf)
-  y <- model.response(mf, "any") # model.extract(mf, "response")
-  x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
-       matrix(, NROW(y), 0)
+  y <- model.response(mf, "any")  # model.extract(mf, "response")
+  x <- if (!is.empty.model(mt))
+         model.matrix(mt, mf, contrasts) else
+         matrix(, NROW(y), 0)
   attr(x, "assign") <- attrassigndefault(x, mt)
 
+
+
+
+
+
+  if (!is.null(form2)) {
+    if (!is.null(subset))
+      stop("argument 'subset' cannot be used when ",
+            "argument 'form2' is used")
+    retlist <- shadowvgam(formula =
+                 form2,
+                 family = family, data = data,
+                 na.action = na.action,
+                 control = vgam.control(...),
+                 method = method,
+                 model = model, x.arg = x.arg, y.arg = y.arg,
+                 contrasts = contrasts,
+                 constraints = constraints,
+                 extra = extra,
+                 qr.arg = qr.arg)
+    Ym2 <- retlist$Ym2
+    Xm2 <- retlist$Xm2
+
+    if (length(Ym2)) {
+      if (nrow(as.matrix(Ym2)) != nrow(as.matrix(y)))
+        stop("number of rows of 'y' and 'Ym2' are unequal")
+    }
+    if (length(Xm2)) {
+      if (nrow(as.matrix(Xm2)) != nrow(as.matrix(x)))
+        stop("number of rows of 'x' and 'Xm2' are unequal")
+    }
+  } else {
+    Xm2 <- Ym2 <- NULL
+  }
+
+
+
+
+
+
+
   offset <- model.offset(mf)
   if (is.null(offset))
-    offset <- 0 # yyy ???
+    offset <- 0  # yyy ???
 
 
 
 
   mf2 <- mf
   if (!missing(subset)) {
-      mf2$subset <- NULL 
-      mf2 <- eval(mf2, parent.frame())   # mf2 is the full data frame. 
-      spars2 <-  lapply(mf2, attr, "spar") 
-      dfs2 <-  lapply(mf2, attr, "df") 
-      sx2 <-  lapply(mf2, attr, "s.xargument") 
-      for (ii in 1:length(mf)) {
-        if (length(sx2[[ii]])) {
-          attr(mf[[ii]], "spar") <- spars2[[ii]]
-          attr(mf[[ii]], "dfs2") <- dfs2[[ii]]
-          attr(mf[[ii]], "s.xargument") <- sx2[[ii]]
-        }
+    mf2$subset <- NULL 
+    mf2 <- eval(mf2, parent.frame())  # mf2 is the full data frame. 
+    spars2 <-  lapply(mf2, attr, "spar") 
+    dfs2   <-  lapply(mf2, attr, "df") 
+    sx2 <-  lapply(mf2, attr, "s.xargument") 
+    for (ii in 1:length(mf)) {
+      if (length(sx2[[ii]])) {
+        attr(mf[[ii]], "spar") <- spars2[[ii]]
+        attr(mf[[ii]], "dfs2") <- dfs2[[ii]]
+        attr(mf[[ii]], "s.xargument") <- sx2[[ii]]
       }
-      rm(mf2) 
+    }
+    rm(mf2) 
   }
 
 
@@ -102,11 +143,12 @@ vgam <- function(formula,
   n <- dim(x)[1]
 
   if (FALSE && is.R()) {
-      family at linkinv <- eval(family at linkinv)
-      family at link <- eval(family at link)
+    family at linkinv <- eval(family at linkinv)
+    family at link <- eval(family at link)
 
-      for (ii in names(.min.criterion.VGAM)) 
-          if (length(family[[ii]])) family[[ii]] <- eval(family[[ii]])
+    for (ii in names(.min.criterion.VGAM)) 
+      if (length(family[[ii]]))
+        family[[ii]] <- eval(family[[ii]])
   }
 
   if (length(slot(family, "first")))
@@ -126,16 +168,18 @@ vgam <- function(formula,
   if (nonparametric) {
 
       ff <- apply(aa$factors[smoothers[["s"]],,drop = FALSE], 2, any)
-      smoothers[["s"]] <- if (any(ff))
-          seq(along = ff)[aa$order == 1 & ff] else NULL
+      smoothers[["s"]] <-
+        if (any(ff)) seq(along = ff)[aa$order == 1 & ff] else NULL
 
     smooth.labels <- aa$term.labels[unlist(smoothers)]
-  } else 
-    function.name <- "vglm"       # This is effectively so 
+  } else {
+    function.name <- "vglm"  # This is effectively so 
+  }
 
 
 
   fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
+                  Xm2 = Xm2, Ym2 = Ym2,  # Added 20130730
       etastart = etastart, mustart = mustart, coefstart = coefstart,
       offset = offset, family = family, control = control,
       criterion = control$criterion,
@@ -149,7 +193,6 @@ vgam <- function(formula,
     fit$nl.df[fit$nl.df < 0] <- 0
   }
 
-    # --------------------------------------------------------------
 
   if (!is.null(fit[["smooth.frame"]])) {
     fit <- fit[-1]       # Strip off smooth.frame
@@ -178,7 +221,7 @@ vgam <- function(formula,
 
 
   answer <-
-  new("vgam", 
+  new("vgam",
     "assign"       = attr(x, "assign"),
     "call"         = fit$call,
     "coefficients" = fit$coefficients,
@@ -192,9 +235,9 @@ vgam <- function(formula,
     "R"            = fit$R,
     "rank"         = fit$rank,
     "residuals"    = as.matrix(fit$residuals),
-    "rss"          = fit$rss,
+    "res.ss"       = fit$res.ss,
     "smart.prediction" = as.list(fit$smart.prediction),
-    "terms"        = list(terms=fit$terms))
+    "terms"        = list(terms = fit$terms))
 
   if (!smart)
     answer at smart.prediction <- list(smart.arg = FALSE)
@@ -213,22 +256,41 @@ vgam <- function(formula,
     slot(answer, "offset") <- as.matrix(offset)
   if (length(fit$weights))
     slot(answer, "weights") <- as.matrix(fit$weights)
+
+
   if (x.arg)
-    slot(answer, "x") <- x # The 'small' design matrix
+    slot(answer, "x") <- x  # The 'small' design matrix
+
+
+
+  if (x.arg && length(Xm2))
+    slot(answer, "Xm2") <- Xm2  # The second (lm) design matrix
+  if (y.arg && length(Ym2))
+    slot(answer, "Ym2") <- as.matrix(Ym2)  # The second response
+  if (!is.null(form2))
+    slot(answer, "callXm2") <- retlist$call
+  answer at misc$formula <- formula
+  answer at misc$form2   <- form2
+
+
+
   if (length(xlev))
     slot(answer, "xlevels") <- xlev
   if (y.arg)
     slot(answer, "y") <- as.matrix(fit$y)
-    answer at misc$formula <- formula
+  answer at misc$formula <- formula
+
 
 
-    slot(answer, "control") <- fit$control
+
+
+  slot(answer, "control") <- fit$control
 
   if (length(fit$extra)) {
     slot(answer, "extra") <- fit$extra
   }
-  slot(answer, "iter") <- fit$iter
-  slot(answer, "post") <- fit$post
+  slot(answer, "iter")   <- fit$iter
+  slot(answer, "post")   <- fit$post
 
   fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix
   dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
@@ -252,8 +314,6 @@ vgam <- function(formula,
 
 
 
-
-
   }
   if (length(fit$effects))
     slot(answer, "effects") <- fit$effects
@@ -270,3 +330,53 @@ attr(vgam, "smart") <- TRUE
 
 
 
+
+shadowvgam <-
+        function(formula,
+                 family, data = list(), 
+                 weights = NULL, subset = NULL, na.action = na.fail,
+                 etastart = NULL, mustart = NULL, coefstart = NULL,
+                 control = vgam.control(...), 
+                 offset = NULL, 
+                 method = "vgam.fit",
+                 model = FALSE, x.arg = TRUE, y.arg = TRUE,
+                 contrasts = NULL, 
+                 constraints = NULL,
+                 extra = list(), 
+                 qr.arg = FALSE, ...) {
+    dataname <- as.character(substitute(data))  # "list" if no data=
+    function.name <- "shadowvgam"
+
+    ocall <- match.call()
+
+    if (missing(data)) 
+        data <- environment(formula)
+
+    mf <- match.call(expand.dots = FALSE)
+    m <- match(c("formula", "data", "subset", "weights", "na.action",
+        "etastart", "mustart", "offset"), names(mf), 0)
+    mf <- mf[c(1, m)]
+    mf$drop.unused.levels <- TRUE
+    mf[[1]] <- as.name("model.frame")
+    mf <- eval(mf, parent.frame())
+    switch(method, model.frame = return(mf), vgam.fit = 1,
+           stop("invalid 'method': ", method))
+    mt <- attr(mf, "terms")
+
+    x <- y <- NULL 
+
+    xlev <- .getXlevels(mt, mf)
+    y <- model.response(mf, "any")  # model.extract(mf, "response")
+    x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
+         matrix(, NROW(y), 0)
+    attr(x, "assign") <- attrassigndefault(x, mt)
+
+    list(Xm2 = x, Ym2 = y, call = ocall)
+}
+
+
+
+
+
+
+
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 20286ec..bed56a7 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -4,6 +4,9 @@
 
 
 
+
+
+
 vgam.control <- function(all.knots = FALSE,
                          bf.epsilon = 1e-7,
                          bf.maxit = 30, 
@@ -23,101 +26,108 @@ vgam.control <- function(all.knots = FALSE,
 
 
 
-    if (mode(criterion) != "character" && mode(criterion) != "name")
-        criterion <- as.character(substitute(criterion))
-    criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch = 1)
-    criterion <- names(.min.criterion.VGAM)[criterion]
-
-    if (!is.logical(checkwz) || length(checkwz) != 1)
-      stop("bad input for argument 'checkwz'")
-    if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
-      stop("bad input for argument 'wzepsilon'")
+  if (mode(criterion) != "character" && mode(criterion) != "name")
+      criterion <- as.character(substitute(criterion))
+  criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch = 1)
+  criterion <- names(.min.criterion.VGAM)[criterion]
+
+  if (!is.logical(checkwz) || length(checkwz) != 1)
+    stop("bad input for argument 'checkwz'")
+  if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
+    stop("bad input for argument 'wzepsilon'")
+
+  if (length(all.knots) > 1)
+    warning("all.knots should be of length 1; using first value only")
+  if (!is.Numeric(bf.epsilon, length.arg = 1, positive = TRUE)) {
+    warning("bad input for argument 'bf.epsilon'; using 0.00001 instead")
+    bf.epsilon <- 0.00001
+  }
+  if (!is.Numeric(bf.maxit, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE)) {
+    warning("bad input for argument 'bf.maxit'; using 30 instead")
+    bf.maxit <- 30
+  }
+  if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE)) {
+    warning("bad input for argument 'epsilon'; using 0.0001 instead")
+    epsilon <- 0.0001
+  }
+  if (!is.Numeric(maxit, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE)) {
+    warning("bad input for argument 'maxit'; using 30 instead")
+    maxit <- 30
+  }
+
+  convergence <- expression({
+    switch(criterion,
+           coefficients =
+             if (iter == 1) iter < maxit else
+               (iter < maxit &&
+                max(abs(new.coeffs - old.coeffs) / (
+                    abs(old.coeffs) + epsilon)) > epsilon),
+           sqrt(sqrt(eff.n)) *
+           abs(old.crit - new.crit) / (
+           abs(old.crit) + epsilon) > epsilon &&
+           iter < maxit)
+  })
+
+
+  list(all.knots = as.logical(all.knots)[1],
+       bf.epsilon = bf.epsilon, 
+       bf.maxit = bf.maxit, 
+       checkwz = checkwz,
+       convergence = convergence,
+       criterion = criterion,
+       epsilon = epsilon, 
+       maxit = maxit, 
+       nk=nk,
+       min.criterion = .min.criterion.VGAM,
+       save.weight = as.logical(save.weight)[1],
+       se.fit = as.logical(se.fit)[1],
+       trace = as.logical(trace)[1],
+       wzepsilon = wzepsilon)
+}
 
-    if (length(all.knots) > 1)
-      warning("all.knots should be of length 1; using first value only")
-    if (!is.Numeric(bf.epsilon, allowable.length = 1, positive = TRUE)) {
-      warning("bad input for argument 'bf.epsilon'; using 0.00001 instead")
-      bf.epsilon <- 0.00001
-    }
-    if (!is.Numeric(bf.maxit, allowable.length = 1,
-                    positive = TRUE, integer.valued = TRUE)) {
-      warning("bad input for argument 'bf.maxit'; using 20 instead")
-      bf.maxit <- 20
-    }
-    if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE)) {
-      warning("bad input for argument 'epsilon'; using 0.0001 instead")
-      epsilon <- 0.0001
-    }
-    if (!is.Numeric(maxit, allowable.length = 1,
-                    positive = TRUE, integer.valued = TRUE)) {
-      warning("bad input for argument 'maxit'; using 30 instead")
-      maxit <- 30
-    }
 
-    convergence <- expression({
-        switch(criterion,
-        coefficients = if (iter == 1) iter < maxit else (iter < maxit &&
-        max(abs(new.coeffs - old.coeffs)/(abs(old.coeffs)+epsilon)) > epsilon),
-        abs(old.crit-new.crit)/(abs(old.crit)+epsilon) > epsilon && iter<maxit)
-    })
-
-    list(all.knots = as.logical(all.knots)[1],
-         bf.epsilon = bf.epsilon, 
-         bf.maxit = bf.maxit, 
-         checkwz = checkwz,
-         convergence = convergence,
-         criterion = criterion,
-         epsilon = epsilon, 
-         maxit = maxit, 
-         nk=nk,
-         min.criterion = .min.criterion.VGAM,
-         save.weight = as.logical(save.weight)[1],
-         se.fit = as.logical(se.fit)[1],
-         trace = as.logical(trace)[1],
-         wzepsilon = wzepsilon)
-}
 
 
 vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
                          assign, M, n, constraints) {
-        attr(qr, "class") <- "qr" 
-        class(qr) <- "qr"
-
-    if (!is.matrix(smomat)) smomat <- as.matrix(smomat)
-    if (!is.matrix(wz)) wz <- as.matrix(wz)
-    if (!is.matrix(deriv)) deriv <- as.matrix(deriv)
-    if (!is.matrix(resid)) resid <- as.matrix(resid)
-
-    trivc <- trivial.constraints(constraints)
+  attr(qr, "class") <- "qr" 
+  class(qr) <- "qr"
 
-    ans <- rep(as.numeric(NA), length = ncol(smomat))
-    Uderiv <- vbacksub(U, t(deriv), M = M, n = n)    # \bU_i^{-1} \biu_i
-    ptr <- 0
-    for(ii in 1:length(smooth.labels)) {
-        cmat <- constraints[[ smooth.labels[ii] ]]
-        index <- (ptr+1):(ptr+ncol(cmat))
+  if (!is.matrix(smomat)) smomat <- as.matrix(smomat)
+  if (!is.matrix(wz)) wz <- as.matrix(wz)
+  if (!is.matrix(deriv)) deriv <- as.matrix(deriv)
+  if (!is.matrix(resid)) resid <- as.matrix(resid)
 
-        for(jay in index) {
-            yy <- t(cmat[,jay-ptr,drop = FALSE])
-            yy <- kronecker(smomat[,jay,drop = FALSE], yy)  # n x M
-            Us <- mux22(U, yy, M = M, upper = TRUE,
-                        as.matrix = TRUE)  # n * M
+  trivc <- trivial.constraints(constraints)
 
-            Uss <- matrix(c(t(Us)), nrow=n*M, ncol = 1)
+  ans <- rep(as.numeric(NA), length = ncol(smomat))
+  Uderiv <- vbacksub(U, t(deriv), M = M, n = n)  # \bU_i^{-1} \biu_i
+  ptr <- 0
+  for (ii in 1:length(smooth.labels)) {
+    cmat <- constraints[[ smooth.labels[ii] ]]
+    index <- (ptr + 1):(ptr + ncol(cmat))
 
-            Rsw <- qr.resid(qr, Uss)
+    for (jay in index) {
+      yy <- t(cmat[, jay-ptr, drop = FALSE])
+      yy <- kronecker(smomat[, jay, drop = FALSE], yy)  # n x M
+      Us <- mux22(U, yy, M = M, upper = TRUE,
+                  as.matrix = TRUE)  # n * M
 
-            vRsw <- matrix(Rsw, nrow=n, ncol=M, byrow = TRUE)
-            newans <- vbacksub(U, t(vRsw), M = M, n=n)
+      Uss <- matrix(c(t(Us)), nrow = n * M, ncol = 1)
+      Rsw <- qr.resid(qr, Uss)
 
-            ans[jay] <- sum(vRsw^2 + 2 * newans * deriv)
+      vRsw <- matrix(Rsw, nrow = n, ncol = M, byrow = TRUE)
+      newans <- vbacksub(U, t(vRsw), M = M, n = n)
 
-        }
-        ptr <- ptr + ncol(cmat)
+      ans[jay] <- sum(vRsw^2 + 2 * newans * deriv)
     }
+    ptr <- ptr + ncol(cmat)
+  }
 
-    names(ans) <- dimnames(smomat)[[2]]
-    ans
+  names(ans) <- dimnames(smomat)[[2]]
+  ans
 }
     
 
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index c24dfd1..3faafa0 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -4,18 +4,24 @@
 
 
 
-vgam.fit <- function(x, y, w, mf,
-        etastart, mustart, coefstart,
-        offset, family, control, criterion = "coefficients",
-        constraints = NULL, extra, qr.arg,
-        Terms,
-        nonparametric, smooth.labels,
-        function.name = "vgam", ...) {
 
 
+vgam.fit <-
+  function(x, y, w, mf,
+           Xm2 = NULL, Ym2 = NULL,  # Added 20130730
+           etastart, mustart, coefstart,
+           offset, family, control, criterion = "coefficients",
+           constraints = NULL, extra, qr.arg,
+           Terms,
+           nonparametric, smooth.labels,
+           function.name = "vgam", ...) {
+
+
+  eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
+
   specialCM <- NULL
   post <- list()
-  check.Rank <- TRUE # Set this to false for family functions vppr() etc.
+  check.Rank <- TRUE  # Set this to false for family functions vppr() etc.
   epsilon <- control$epsilon
   maxit <- control$maxit
   save.weight <- control$save.weight
@@ -42,7 +48,7 @@ vgam.fit <- function(x, y, w, mf,
       new.coeffs <- c.list$coeff
 
       if (length(family at middle))
-          eval(family at middle)
+        eval(family at middle)
 
       eta <- fv + offset
       mu <- family at linkinv(eta, extra)
@@ -59,22 +65,27 @@ vgam.fit <- function(x, y, w, mf,
       if (trace) {
         cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
 
-        UUUU <- switch(criterion, coefficients =
-                       format(new.crit, dig = round(2-log10(epsilon))),
-                       format(round(new.crit, 4)))
+        UUUU <- switch(criterion,
+                       coefficients =
+                         format(new.crit,
+                                dig = round(1 - log10(epsilon))),
+                         format(new.crit, 
+                                dig = max(4, 
+                                          round(-0 - log10(epsilon) +
+                                          log10(sqrt(eff.n))))))
 
         switch(criterion,
-               coefficients = {if(length(new.crit) > 2) cat("\n");
+               coefficients = {if (length(new.crit) > 2) cat("\n");
                cat(UUUU, fill = TRUE, sep = ", ")},
                cat(UUUU, fill = TRUE, sep = ", "))
       }
 
-                one.more <- eval(control$convergence)
+      one.more <- eval(control$convergence)
 
       flush.console()
 
-      if (!is.finite(one.more) ||
-        !is.logical(one.more)) one.more <- FALSE
+      if (!is.finite(one.more) || !is.logical(one.more))
+        one.more <- FALSE
       if (one.more) {
         iter <- iter + 1
         deriv.mu <- eval(family at deriv)
@@ -93,7 +104,7 @@ vgam.fit <- function(x, y, w, mf,
       }
 
       c.list$one.more <- one.more
-      c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+      c.list$coeff <- runif(length(new.coeffs))  # 20030312; twist needed!
       old.coeffs <- new.coeffs
     }
     c.list
@@ -106,11 +117,11 @@ vgam.fit <- function(x, y, w, mf,
   old.coeffs <- coefstart
 
   intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
-  y.names <- predictors.names <- NULL # May be overwritten in @initialize
+  y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
   n.save <- n
   if (length(slot(family, "initialize")))
-    eval(slot(family, "initialize")) # Initialize mu & M (& optionally w)
+    eval(slot(family, "initialize"))  # Initialize mu & M (& optionally w)
 
   if (length(etastart)) {
     eta <- etastart
@@ -145,7 +156,6 @@ vgam.fit <- function(x, y, w, mf,
     if (nonparametric) {
 
 
-
       smooth.frame <- mf
       assignx <- attr(x, "assign")
       which <- assignx[smooth.labels]
@@ -153,7 +163,7 @@ vgam.fit <- function(x, y, w, mf,
       bf <- "s.vam"
       bf.call <- parse(text = paste(
               "s.vam(x, z, wz, tfit$smomat, which, tfit$smooth.frame,",
-              "bf.maxit, bf.epsilon, trace, se = se.fit, X_vlm_save, ",
+              "bf.maxit, bf.epsilon, trace, se = se.fit, X.vlm.save, ",
               "Blist, ncolBlist, M = M, qbig = qbig, Umat = U, ",
               "all.knots = control$all.knots, nk = control$nk)",
               sep = ""))[[1]]
@@ -162,26 +172,26 @@ vgam.fit <- function(x, y, w, mf,
       smomat <- matrix(0, n, qbig)
       dy <- if (is.matrix(y)) dimnames(y)[[1]] else names(y)
       d2 <- if (is.null(predictors.names))
-          paste("(Additive predictor ",1:M,")", sep = "") else
-          predictors.names
+              paste("(Additive predictor ",1:M,")", sep = "") else
+              predictors.names
       dimnames(smomat) <- list(dy, vlabel(smooth.labels,
-            ncolBlist[smooth.labels], M))
+                                          ncolBlist[smooth.labels], M))
 
       tfit <- list(smomat = smomat, smooth.frame = smooth.frame)
     } else {
-      bf.call <- expression(vlm.wfit(xmat = X_vlm_save, z,
+      bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
                                      Blist = NULL, U = U,
                                      matrix.out = FALSE, is.vlmX = TRUE,
                                      qr = qr.arg, xij = NULL))
       bf <- "vlm.wfit"
     }
 
-    X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+    X.vlm.save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
 
 
     if (length(coefstart)) {
-      eta <- if (ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
-               offset else X_vlm_save * coefstart + offset
+      eta <- if (ncol(X.vlm.save) > 1) X.vlm.save %*% coefstart +
+               offset else X.vlm.save * coefstart + offset
       eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
       mu <- family at linkinv(eta, extra)
     }
@@ -196,7 +206,8 @@ vgam.fit <- function(x, y, w, mf,
                        coefficients = 1,
                        tfun(mu = mu, y = y, w = w, res = FALSE,
                             eta = eta, extra))
-    old.crit <- if (minimize.criterion) 10*new.crit+10 else -10*new.crit-10
+    old.crit <- ifelse(minimize.criterion,  10 * new.crit + 10,
+                                           -10 * new.crit - 10)
 
     deriv.mu <- eval(family at deriv)
     wz <- eval(family at weight)
@@ -211,15 +222,14 @@ vgam.fit <- function(x, y, w, mf,
     c.list <- list(wz = as.double(wz), z = as.double(z),
                    fit = as.double(t(eta)),
                    one.more = TRUE, U = as.double(U),
-                   coeff = as.double(rep(1, ncol(X_vlm_save))))
+                   coeff = as.double(rep(1, ncol(X.vlm.save))))
 
 
-    dX_vlm <- as.integer(dim(X_vlm_save))
-    nrow_X_vlm <- dX_vlm[[1]]
-    ncol_X_vlm <- dX_vlm[[2]]
-    if (nrow_X_vlm < ncol_X_vlm)
-      stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm,
-           " observations")
+    dX.vlm <- as.integer(dim(X.vlm.save))
+    nrow.X.vlm <- dX.vlm[[1]]
+    ncol.X.vlm <- dX.vlm[[2]]
+    if (nrow.X.vlm < ncol.X.vlm)
+      stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
 
     while (c.list$one.more) {
       tfit <- eval(bf.call)  # fit$smooth.frame is new
@@ -237,18 +247,18 @@ vgam.fit <- function(x, y, w, mf,
       warning("convergence not obtained in ", maxit, " iterations")
 
 
-    dnrow_X_vlm <- labels(X_vlm_save)
-    xnrow_X_vlm <- dnrow_X_vlm[[2]]
-    ynrow_X_vlm <- dnrow_X_vlm[[1]]
+    dnrow.X.vlm <- labels(X.vlm.save)
+    xnrow.X.vlm <- dnrow.X.vlm[[2]]
+    ynrow.X.vlm <- dnrow.X.vlm[[1]]
 
     if (length(family at fini))
       eval(family at fini)
 
     coefs <- tfit$coefficients
-    asgn <- attr(X_vlm_save, "assign")    # 29/11/01 was x 
+    asgn <- attr(X.vlm.save, "assign")    # 20011129 was x 
 
-    names(coefs) <- xnrow_X_vlm
-    cnames <- xnrow_X_vlm
+    names(coefs) <- xnrow.X.vlm
+    cnames <- xnrow.X.vlm
 
     if (!is.null(tfit$rank)) {
       rank <- tfit$rank
@@ -256,9 +266,9 @@ vgam.fit <- function(x, y, w, mf,
         stop("rank < ncol(x) is bad")
     } else rank <- ncol(x)
 
-    R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
     R[lower.tri(R)] <- 0
-    attributes(R) <- list(dim = c(ncol_X_vlm, ncol_X_vlm),
+    attributes(R) <- list(dim = c(ncol.X.vlm, ncol.X.vlm),
                           dimnames = list(cnames, cnames), rank = rank)
 
 
@@ -280,9 +290,9 @@ vgam.fit <- function(x, y, w, mf,
       names(mu) <- names(fv)
     }
 
-    tfit$fitted.values <- NULL      # Have to kill it off  3/12/01
-    fit <- structure(c(tfit, list(
-                assign = asgn,
+    tfit$fitted.values <- NULL  # Have to kill it off  3/12/01
+    fit <- structure(c(tfit,
+           list(assign = asgn,
                 constraints = Blist,
                 control = control,
                 fitted.values = mu,
@@ -293,7 +303,7 @@ vgam.fit <- function(x, y, w, mf,
                 R = R,
                 terms = Terms)))
 
-    df.residual <- nrow_X_vlm - rank 
+    df.residual <- nrow.X.vlm - rank 
 
     if (!se.fit) {
       fit$varmat <- NULL
@@ -310,16 +320,17 @@ vgam.fit <- function(x, y, w, mf,
       fit$predictors <- as.vector(fit$predictors)
       fit$residuals <- as.vector(fit$residuals)
       names(fit$residuals) <- names(fit$predictors) <- yn
-    } else
-      dimnames(fit$residuals) <-
+    } else {
+      dimnames(fit$residuals)  <-
       dimnames(fit$predictors) <- list(yn, predictors.names)
+    }
 
     NewBlist <- process.constraints(constraints, x, M,
                                     specialCM = specialCM, by.col = FALSE)
 
     misc <- list(
         colnames.x = xn,
-        colnames.X_vlm = xnrow_X_vlm,
+        colnames.X.vlm = xnrow.X.vlm,
         criterion = criterion,
         function.name = function.name,
         intercept.only=intercept.only,
@@ -328,10 +339,10 @@ vgam.fit <- function(x, y, w, mf,
         n = n,
         new.assign = new.assign(x, NewBlist),
         nonparametric = nonparametric,
-        nrow_X_vlm = nrow_X_vlm,
+        nrow.X.vlm = nrow.X.vlm,
         orig.assign = attr(x, "assign"),
         p = ncol(x),
-        ncol_X_vlm = ncol_X_vlm,
+        ncol.X.vlm = ncol.X.vlm,
         ynames = dimnames(y)[[2]])
 
 
@@ -358,10 +369,10 @@ vgam.fit <- function(x, y, w, mf,
       if (ii != criterion &&
           any(slotNames(family) == ii) &&
           length(body(slot(family, ii)))) {
-            fit[[ii]] <-
-            crit.list[[ii]] <-
-               (slot(family, ii))(mu = mu, y = y, w = w, res = FALSE,
-                                  eta = eta, extra)
+        fit[[ii]] <-
+        crit.list[[ii]] <-
+          (slot(family, ii))(mu = mu, y = y, w = w, res = FALSE,
+                             eta = eta, extra)
         }
     }
 
@@ -370,11 +381,12 @@ vgam.fit <- function(x, y, w, mf,
 
     if (M == 1) {
       fit$predictors <- as.vector(fit$predictors)
-      fit$residuals <- as.vector(fit$residuals)
-      names(fit$residuals) <- names(fit$predictors) <- yn
+      fit$residuals  <- as.vector(fit$residuals)
+      names(fit$residuals)  <-
+      names(fit$predictors) <- yn
     } else {
-      dimnames(fit$residuals) <- dimnames(fit$predictors) <-
-          list(yn, predictors.names)
+      dimnames(fit$residuals)  <-
+      dimnames(fit$predictors) <- list(yn, predictors.names)
     }
 
 
@@ -403,7 +415,7 @@ vgam.fit <- function(x, y, w, mf,
 
 
 
-    fit$misc <- NULL # 8/6/02; It's necessary to kill it as it exists in vgam
+    fit$misc <- NULL  # 20020608; It is necessary to kill it as it
     structure(c(fit, list(
         contrasts = attr(x, "contrasts"),
         control = control,
@@ -433,7 +445,7 @@ new.assign <- function(X, Blist) {
   lasgn <- unlist(lapply(asgn, length))
 
   ncolBlist <- unlist(lapply(Blist, ncol))
-  names(ncolBlist) <- NULL    # This is necessary for below to work 
+  names(ncolBlist) <- NULL  # This is necessary for below to work 
 
   temp2 <- vlabel(nasgn, ncolBlist, M)
   L <- length(temp2)
@@ -442,10 +454,10 @@ new.assign <- function(X, Blist) {
   kk <- 0
   low <- 1
   for (ii in 1:length(asgn)) {
-    len <- low:(low + ncolBlist[ii] * lasgn[ii] -1)
-    temp <- matrix(len, ncolBlist[ii], lasgn[ii])
+    len  <- low:(low  + ncolBlist[ii] * lasgn[ii] -1)
+    temp <- matrix(len, ncolBlist[ii],  lasgn[ii])
     for (mm in 1:ncolBlist[ii])
-      newasgn[[kk+mm]] <- temp[mm,]
+      newasgn[[kk + mm]] <- temp[mm, ]
     low <- low + ncolBlist[ii] * lasgn[ii]
     kk <- kk + ncolBlist[ii]
   }
@@ -454,3 +466,6 @@ new.assign <- function(X, Blist) {
   newasgn
 }
 
+
+
+
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 20bcd6a..5ebe1f0 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -42,7 +42,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
   if (!is.null(attributes(x)$NAs) || any(is.na(x)))
     stop("cannot smooth on variables with NAs") 
 
-  sx <- unique(sort(as.vector(x))) # "as.vector()" strips off attributes
+  sx <- unique(sort(as.vector(x)))  # "as.vector()" strips off attributes
   ooo <- match(x, sx)  # as.integer(match(x, sx))      # sx[o]==x
   neffec <- length(sx)  # as.integer(length(sx))
 
@@ -69,9 +69,10 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
       stop("bad value for 'nk'")
     if (!chosen)
       nk <- 0
-    knot.list <- dotC(name = "vknootl2", as.double(xbar),
-                      as.integer(neffec), knot = double(neffec+6),
-                      k = as.integer(nk+4), chosen = as.integer(chosen))
+    knot.list <- .C("vknootl2",
+                    as.double(xbar),
+                    as.integer(neffec), knot = double(neffec+6),
+                    k = as.integer(nk+4), chosen = as.integer(chosen), PACKAGE = "VGAM")
     if (noround) {
       knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
       knot.list$k <- length(knot)
diff --git a/R/vglm.R b/R/vglm.R
index 745e3e3..e4f85ab 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -9,7 +9,7 @@ vglm <- function(formula,
                  family, data = list(), 
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control=vglm.control(...), 
+                 control = vglm.control(...), 
                  offset = NULL, 
                  method = "vglm.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
@@ -17,8 +17,7 @@ vglm <- function(formula,
                  constraints = NULL,
                  extra = list(), 
                  form2 = NULL, 
-                 qr.arg = TRUE, smart = TRUE, ...)
-{
+                 qr.arg = TRUE, smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "vglm"
 
@@ -43,7 +42,7 @@ vglm <- function(formula,
   mt <- attr(mf, "terms")
 
   xlev <- .getXlevels(mt, mf)
-  y <- model.response(mf, "any") # model.extract(mf, "response")
+  y <- model.response(mf, "any")  # model.extract(mf, "response")
   x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
        matrix(, NROW(y), 0)
   attr(x, "assign") <- attrassigndefault(x, mt)
@@ -77,7 +76,7 @@ vglm <- function(formula,
     }
     if (length(Xm2)) {
       if (nrow(as.matrix(Xm2)) != nrow(as.matrix(x)))
-        stop("number of rows of 'y' and 'Ym2' are unequal")
+        stop("number of rows of 'x' and 'Xm2' are unequal")
     }
   } else {
     Xm2 <- Ym2 <- NULL
@@ -111,15 +110,15 @@ vglm <- function(formula,
   vglm.fitter <- get(method)
 
   fit <- vglm.fitter(x = x, y = y, w = w, offset = offset,
-              Xm2 = Xm2, Ym2 = Ym2,
-              etastart = etastart, mustart = mustart, coefstart = coefstart,
-              family = family, 
-              control = control,
-              constraints = constraints,
-              criterion = control$criterion,
-              extra = extra,
-              qr.arg = qr.arg,
-              Terms = mt, function.name = function.name, ...)
+           Xm2 = Xm2, Ym2 = Ym2,
+           etastart = etastart, mustart = mustart, coefstart = coefstart,
+           family = family, 
+           control = control,
+           constraints = constraints,
+           criterion = control$criterion,
+           extra = extra,
+           qr.arg = qr.arg,
+           Terms = mt, function.name = function.name, ...)
 
   fit$misc$dataname <- dataname
 
@@ -145,7 +144,7 @@ vglm <- function(formula,
     "R"            = fit$R,
     "rank"         = fit$rank,
     "residuals"    = as.matrix(fit$residuals),
-    "rss"          = fit$rss,
+    "res.ss"       = fit$res.ss,
     "smart.prediction" = as.list(fit$smart.prediction),
     "terms"        = list(terms = mt))
 
@@ -168,11 +167,11 @@ vglm <- function(formula,
       slot(answer, "weights") <- as.matrix(fit$weights)
 
   if (x.arg)
-    slot(answer, "x") <- fit$x # The 'small' (lm) design matrix
+    slot(answer, "x") <- fit$x  # The 'small' (lm) design matrix
   if (x.arg && length(Xm2))
-    slot(answer, "Xm2") <- Xm2 # The second (lm) design matrix
+    slot(answer, "Xm2") <- Xm2  # The second (lm) design matrix
   if (y.arg && length(Ym2))
-    slot(answer, "Ym2") <- as.matrix(Ym2) # The second response
+    slot(answer, "Ym2") <- as.matrix(Ym2)  # The second response
   if (!is.null(form2))
     slot(answer, "callXm2") <- retlist$call
   answer at misc$formula <- formula
@@ -191,7 +190,7 @@ vglm <- function(formula,
               "'extra' into a list")
       list(fit$extra)
     }
-  } else list() # R-1.5.0
+  } else list()  # R-1.5.0
   slot(answer, "iter") <- fit$iter
   slot(answer, "post") <- fit$post
 
@@ -218,17 +217,16 @@ attr(vglm, "smart") <- TRUE
 shadowvglm <-
         function(formula,
                  family, data = list(), 
-                 weights = NULL, subset = NULL, na.action=na.fail,
+                 weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control=vglm.control(...), 
+                 control = vglm.control(...), 
                  offset = NULL, 
                  method = "vglm.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
                  contrasts = NULL, 
                  constraints = NULL,
                  extra = list(), 
-                 qr.arg = FALSE, ...)
-{
+                 qr.arg = FALSE, ...) {
     dataname <- as.character(substitute(data))  # "list" if no data=
     function.name <- "shadowvglm"
 
@@ -251,12 +249,12 @@ shadowvglm <-
     x <- y <- NULL 
 
     xlev <- .getXlevels(mt, mf)
-    y <- model.response(mf, "any") # model.extract(mf, "response")
+    y <- model.response(mf, "any")  # model.extract(mf, "response")
     x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
          matrix(, NROW(y), 0)
     attr(x, "assign") <- attrassigndefault(x, mt)
 
-    list(Xm2=x, Ym2=y, call=ocall)
+    list(Xm2 = x, Ym2 = y, call = ocall)
 }
 
 
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 6f15ba2..4564dcd 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -4,15 +4,18 @@
 
 
 
+
+
 .min.criterion.VGAM <-
   c("deviance" = TRUE,
     "loglikelihood" = FALSE,
     "AIC" = TRUE, 
     "Likelihood" = FALSE,
-    "rss" = TRUE,
+    "res.ss" = TRUE,
     "coefficients" = TRUE)
 
 
+ 
 
 vlm.control <- function(save.weight = TRUE,
                         tol = 1e-7,
@@ -21,12 +24,12 @@ vlm.control <- function(save.weight = TRUE,
                         wzepsilon = .Machine$double.eps^0.75,
                         ...) {
   if (tol <= 0) {
-    warning("tol not positive; using 1e-7 instead")
+    warning("argument 'tol' not positive; using 1e-7 instead")
     tol <- 1e-7
   }
   if (!is.logical(checkwz) || length(checkwz) != 1)
     stop("bad input for argument 'checkwz'")
-  if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
+  if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'wzepsilon'")
 
   list(save.weight = save.weight,
@@ -37,6 +40,8 @@ vlm.control <- function(save.weight = TRUE,
 }
 
 
+
+
 vglm.control <- function(checkwz = TRUE,
                          Check.rank = TRUE,
                          criterion = names(.min.criterion.VGAM), 
@@ -53,17 +58,19 @@ vglm.control <- function(checkwz = TRUE,
 
 
 
+
     if (mode(criterion) != "character" && mode(criterion) != "name")
       criterion <- as.character(substitute(criterion))
-    criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch = 1)
+    criterion <- pmatch(criterion[1], names(.min.criterion.VGAM),
+                        nomatch = 1)
     criterion <- names(.min.criterion.VGAM)[criterion]
 
 
 
     if (!is.logical(checkwz) || length(checkwz) != 1)
-        stop("bad input for argument 'checkwz'")
-    if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
-        stop("bad input for argument 'wzepsilon'")
+      stop("bad input for argument 'checkwz'")
+    if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
+      stop("bad input for argument 'wzepsilon'")
 
     convergence <- expression({
 
@@ -71,21 +78,24 @@ vglm.control <- function(checkwz = TRUE,
       switch(criterion,
              coefficients = if (iter == 1) iter < maxit else
                             (iter < maxit &&
-      max(abs(new.crit - old.crit) / (abs(old.crit) + epsilon)) > epsilon),
-                             iter < maxit &&
-          abs(old.crit - new.crit) / (abs(old.crit) + epsilon)  > epsilon)
+                            max(abs(new.crit - old.crit) / (
+                                abs(old.crit) + epsilon)) > epsilon),
+             iter < maxit &&
+             sqrt(eff.n) *
+             abs(old.crit - new.crit) / (
+             abs(old.crit) + epsilon)  > epsilon)
     })
 
-    if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE)) {
+    if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE)) {
       warning("bad input for argument 'epsilon'; using 0.00001 instead")
       epsilon <- 0.00001
     }
-    if (!is.Numeric(maxit, allowable.length = 1,
+    if (!is.Numeric(maxit, length.arg = 1,
                     positive = TRUE, integer.valued = TRUE)) {
       warning("bad input for argument 'maxit'; using 30 instead")
       maxit <- 30
     }
-    if (!is.Numeric(stepsize, allowable.length = 1, positive = TRUE)) {
+    if (!is.Numeric(stepsize, length.arg = 1, positive = TRUE)) {
       warning("bad input for argument 'stepsize'; using 1 instead")
       stepsize <- 1
     }
@@ -113,53 +123,57 @@ vcontrol.expression <- expression({
 
   control <- control   # First one, e.g., vgam.control(...)
   mylist <- family at vfamily
-  for(i in length(mylist):1) {
-      for(ii in 1:2) {
-          temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
-                        mylist[i], ".control", sep="")
-          tempexists <- if (is.R()) exists(temp, envir = VGAM:::VGAMenv) else 
-                       exists(temp, inherit = TRUE)
-          if (tempexists) {
-            temp <- get(temp)
-            temp <- temp(...)
-            for(k in names(temp))
-              control[[k]] <- temp[[k]]
-          }
+  for (i in length(mylist):1) {
+    for (ii in 1:2) {
+      temp <- paste(if (ii == 1) "" else
+                    paste(function.name, ".", sep = ""),
+                    mylist[i], ".control", sep = "")
+      if (exists(temp, envir = VGAMenv)) {
+        temp <- get(temp)
+        temp <- temp(...)
+        for (k in names(temp))
+          control[[k]] <- temp[[k]]
       }
-}
+    }
+  }
 
 
-    orig.criterion <- control$criterion
-    if (control$criterion != "coefficients") {
-        try.crit <- c(names(.min.criterion.VGAM), "coefficients")
-        for(i in try.crit) {
-            if (any(slotNames(family) == i) &&
-            (( is.R() && length(body(slot(family, i)))) ||
-            ((!is.R() && length(slot(family, i)) > 1)))) {
-                control$criterion <- i
-                break
-            } else
-                control$criterion <- "coefficients"
-        }
+  orig.criterion <- control$criterion
+  if (control$criterion != "coefficients") {
+    try.crit <- c(names(.min.criterion.VGAM), "coefficients")
+    for (i in try.crit) {
+      if (any(slotNames(family) == i) &&
+          length(body(slot(family, i)))) {
+        control$criterion <- i
+        break
+      } else {
+        control$criterion <- "coefficients"
+      }
     }
-    control$min.criterion <- control$min.criterion[control$criterion]
+  }
 
+  control$min.criterion <- control$min.criterion[control$criterion]
 
 
 
 
-        for(ii in 1:2) {
-            temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
-                          family at vfamily[1], 
-                          ".", control$criterion, ".control", sep="")
-            if (exists(temp, inherit=T)) {
-                temp <- get(temp)
-                temp <- temp(...)
-                for(k in names(temp))
-                    control[[k]] <- temp[[k]]
-            }
-        }
 
+  for (ii in 1:2) {
+    temp <- paste(if (ii == 1) "" else
+                  paste(function.name, ".", sep = ""),
+                  family at vfamily[1], 
+                  ".", control$criterion, ".control", sep = "")
+    if (exists(temp, inherit = TRUE)) {
+      temp <- get(temp)
+      temp <- temp(...)
+      for (k in names(temp))
+        control[[k]] <- temp[[k]]
+    }
+  }
 })
 
 
+
+
+
+
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index fa61764..fefd4b3 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -8,7 +8,7 @@
 
 
 vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
-    X_vlm_arg = NULL,
+    X.vlm.arg = NULL,
     Xm2 = NULL, Ym2 = NULL,
     etastart = NULL, mustart = NULL, coefstart = NULL,
     offset = 0, family,
@@ -19,9 +19,11 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
     extra = NULL,
     Terms = Terms, function.name = "vglm", ...) {
 
+  eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
+
   specialCM <- NULL
   post <- list()
-  check.rank <- TRUE # Set this to false for family functions vppr() etc.
+  check.rank <- TRUE  # Set this to false for family functions vppr() etc.
   check.rank <- control$Check.rank
   nonparametric <- FALSE
   epsilon <- control$epsilon
@@ -64,9 +66,13 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
       if (trace && orig.stepsize == 1) {
         cat("VGLM    linear loop ", iter, ": ", criterion, "= ")
         UUUU <- switch(criterion,
-                       coefficients = format(new.crit,
-                                      dig = round(2 - log10(epsilon))),
-                       format(round(new.crit, 4)))
+                       coefficients =
+                         format(new.crit,
+                                dig = round(1 - log10(epsilon))),
+                         format(new.crit,
+                                dig = max(4,
+                                          round(-0 - log10(epsilon) +
+                                                log10(sqrt(eff.n))))))
 
         switch(criterion,
                coefficients = {if (length(new.crit) > 2) cat("\n");
@@ -75,87 +81,89 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
       }
 
 
-      {
-          take.half.step =
-            (control$half.stepsizing &&
-             length(old.coeffs)) &&
-             ((orig.stepsize != 1) ||
-             (criterion != "coefficients" &&
-             (if (minimize.criterion) new.crit > old.crit else
-             new.crit < old.crit)))
-          if (!is.logical(take.half.step))
-            take.half.step <- TRUE
-          if (take.half.step) {
-              stepsize <- 2 * min(orig.stepsize, 2*stepsize)
-              new.coeffs.save <- new.coeffs
-              if (trace) 
-                cat("Taking a modified step")
-              repeat {
-                  if (trace) {
-                    cat(".")
-                    flush.console()
-                  }
-                  stepsize <- stepsize / 2
-                  if (too.small <- stepsize < 0.001)
-                    break
-                  new.coeffs <- (1-stepsize)*old.coeffs +
-                                 stepsize*new.coeffs.save
-
-                  if (length(slot(family, "middle")))
-                    eval(slot(family, "middle"))
-
-                  fv <- X_vlm_save %*% new.coeffs
-                  if (M > 1)
-                    fv <- matrix(fv, n, M, byrow = TRUE)
-
-                  eta <- fv + offset
-                  mu <- slot(family, "linkinv")(eta, extra)
-
-                  if (length(slot(family, "middle2")))
-                    eval(slot(family, "middle2"))
-
-
-                  new.crit <- 
-                      switch(criterion,
-                          coefficients = new.coeffs,
-                          tfun(mu = mu, y = y, w = w,
-                               res = FALSE, eta = eta, extra))
-
-                  if ((criterion == "coefficients") || 
-                     ( minimize.criterion && new.crit < old.crit) ||
-                     (!minimize.criterion && new.crit > old.crit))
-                      break
-              } # of repeat
-
-              if (trace) 
-                cat("\n")
-              if (too.small) {
-                warning("iterations terminated because ",
-                        "half-step sizes are very small")
-                one.more <- FALSE
-              } else {
-                if (trace) {
-                    cat("VGLM    linear loop ",
-                        iter, ": ", criterion, "= ")
-
-                    UUUU <- switch(criterion,
-                                   coefficients =
-                                     format(new.crit,
-                                            dig = round(2-log10(epsilon))),
-                                   format(round(new.crit, 4)))
-
-                    switch(criterion,
-                           coefficients = {
-                           if (length(new.crit) > 2) cat("\n");
-                           cat(UUUU, fill = TRUE, sep = ", ")},
-                           cat(UUUU, fill = TRUE, sep = ", "))
-                }
 
-                one.more <- eval(control$convergence)
-              }
-          } else {
-            one.more <- eval(control$convergence)
+
+      take.half.step <- (control$half.stepsizing &&
+                         length(old.coeffs)) &&
+                         ((orig.stepsize != 1) ||
+                          (criterion != "coefficients" &&
+                          (if (minimize.criterion) new.crit > old.crit else
+                                                   new.crit < old.crit)))
+      if (!is.logical(take.half.step))
+        take.half.step <- TRUE
+      if (take.half.step) {
+        stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+        new.coeffs.save <- new.coeffs
+        if (trace) 
+          cat("Taking a modified step")
+        repeat {
+            if (trace) {
+              cat(".")
+              flush.console()
+            }
+            stepsize <- stepsize / 2
+            if (too.small <- stepsize < 0.001)
+              break
+            new.coeffs <- (1-stepsize) * old.coeffs +
+                             stepsize  * new.coeffs.save
+
+            if (length(slot(family, "middle")))
+              eval(slot(family, "middle"))
+
+            fv <- X.vlm.save %*% new.coeffs
+            if (M > 1)
+              fv <- matrix(fv, n, M, byrow = TRUE)
+
+            eta <- fv + offset
+            mu <- slot(family, "linkinv")(eta, extra)
+
+            if (length(slot(family, "middle2")))
+              eval(slot(family, "middle2"))
+
+
+            new.crit <- 
+              switch(criterion,
+                     coefficients = new.coeffs,
+                     tfun(mu = mu, y = y, w = w,
+                          res = FALSE, eta = eta, extra))
+
+            if ((criterion == "coefficients") || 
+               ( minimize.criterion && new.crit < old.crit) ||
+               (!minimize.criterion && new.crit > old.crit))
+              break
+        }  # of repeat
+
+        if (trace) 
+          cat("\n")
+        if (too.small) {
+          warning("iterations terminated because ",
+                  "half-step sizes are very small")
+          one.more <- FALSE
+        } else {
+          if (trace) {
+            cat("VGLM    linear loop ",
+                iter, ": ", criterion, "= ")
+
+            UUUU <- switch(criterion,
+                           coefficients =
+                             format(new.crit,
+                                    dig = round(1 - log10(epsilon))),
+                             format(new.crit, 
+                                    dig = max(4,
+                                              round(-0 - log10(epsilon) +
+                                                    log10(sqrt(eff.n))))))
+
+            switch(criterion,
+                   coefficients = {
+                   if (length(new.crit) > 2) cat("\n");
+                   cat(UUUU, fill = TRUE, sep = ", ")},
+                   cat(UUUU, fill = TRUE, sep = ", "))
           }
+
+          one.more <- eval(control$convergence)
+        }
+      } else {
+        one.more <- eval(control$convergence)
       }
       flush.console()
 
@@ -175,12 +183,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
         c.list$z <- z
         c.list$U <- U
-        if (copy_X_vlm)
-          c.list$X_vlm <- X_vlm_save
+        if (copy.X.vlm)
+          c.list$X.vlm <- X.vlm.save
       }
 
       c.list$one.more <- one.more
-      c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+      c.list$coeff <- runif(length(new.coeffs))  # 20030312; twist needed!
       old.coeffs <- new.coeffs
     }
     c.list
@@ -190,19 +198,19 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
 
 
-  copy_X_vlm <- FALSE    # May be overwritten in @initialize
+  copy.X.vlm <- FALSE    # May be overwritten in @initialize
   stepsize <- orig.stepsize
   old.coeffs <- coefstart
 
   intercept.only <- ncol(x) == 1 &&
                     dimnames(x)[[2]] == "(Intercept)"
-  y.names <- predictors.names <- NULL # May be overwritten in @initialize
+  y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
   n.save <- n 
 
 
   if (length(slot(family, "initialize")))
-    eval(slot(family, "initialize")) # Initialize mu & M (& optionally w)
+    eval(slot(family, "initialize"))  # Initialize mu & M (& optionally w)
 
 
   if (length(etastart)) {
@@ -244,24 +252,28 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
 
 
-  X_vlm_save <- if (length(X_vlm_arg)) X_vlm_arg else
-    lm2vlm.model.matrix(x, Blist, xij = control$xij,
-                                  Xm2 = Xm2)
-
-
+  X.vlm.save <- if (length(X.vlm.arg)) {
+                  X.vlm.arg
+                } else {
+                  lm2vlm.model.matrix(x, Blist, xij = control$xij,
+                                                Xm2 = Xm2)
+                }
 
 
 
   if (length(coefstart)) {
-    eta <- if (ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
-             offset else X_vlm_save * coefstart + offset
+    eta <- if (ncol(X.vlm.save) > 1) {
+             X.vlm.save %*% coefstart + offset
+           } else {
+             X.vlm.save  *  coefstart + offset
+           }
     eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta) 
     mu <- slot(family, "linkinv")(eta, extra)
   }
 
 
   if (criterion != "coefficients") {
-    tfun <- slot(family, criterion)   # family[[criterion]]
+    tfun <- slot(family, criterion)  # family[[criterion]]
   }
 
   iter <- 1
@@ -269,9 +281,8 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
                      coefficients = 1,
                      tfun(mu = mu, y = y, w = w,
                           res = FALSE, eta = eta, extra))
-  old.crit <- if (minimize.criterion)
-              10*new.crit+10 else
-             -10*new.crit-10
+  old.crit <- ifelse(minimize.criterion,  10 * new.crit + 10,
+                                         -10 * new.crit - 10)
 
   deriv.mu <- eval(slot(family, "deriv"))
   wz <- eval(slot(family, "weight"))
@@ -283,34 +294,35 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
   tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
   z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
 
-  c.list <- list(z = as.double(z), fit = as.double(t(eta)),
+  c.list <- list(z = as.double(z),
+                 fit = as.double(t(eta)),
                  one.more = TRUE,
-                 coeff = as.double(rep(1, ncol(X_vlm_save))),
+                 coeff = as.double(rep(1, ncol(X.vlm.save))),
                  U = as.double(U),
-                 copy_X_vlm = copy_X_vlm,
-                 X_vlm = if (copy_X_vlm) as.double(X_vlm_save) else
+                 copy.X.vlm = copy.X.vlm,
+                 X.vlm = if (copy.X.vlm) as.double(X.vlm.save) else
                          double(3))
 
 
-  dX_vlm <- as.integer(dim(X_vlm_save))
-  nrow_X_vlm <- dX_vlm[[1]]
-  ncol_X_vlm <- dX_vlm[[2]]
+  dX.vlm <- as.integer(dim(X.vlm.save))
+  nrow.X.vlm <- dX.vlm[[1]]
+  ncol.X.vlm <- dX.vlm[[2]]
 
-  if (nrow_X_vlm < ncol_X_vlm)
-    stop(ncol_X_vlm, "parameters but only ", nrow_X_vlm,
-         " observations")
+  if (nrow.X.vlm < ncol.X.vlm)
+    stop(ncol.X.vlm, "parameters but only ", nrow.X.vlm, " observations")
 
 
 
-  bf.call <- expression(vlm.wfit(xmat = X_vlm_save, z,
+  bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
                                  Blist = NULL, U = U,
                                  matrix.out = FALSE,
                                  is.vlmX = TRUE,
                                  qr = qr.arg, xij = NULL))
 
 
-  while(c.list$one.more) {
-      tfit <- eval(bf.call)   # fit$smooth.frame is new
+
+  while (c.list$one.more) {
+      tfit <- eval(bf.call)  # fit$smooth.frame is new
     
       c.list$coeff <- tfit$coefficients 
     
@@ -327,9 +339,9 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
 
 
-  dnrow_X_vlm <- labels(X_vlm_save)
-  xnrow_X_vlm <- dnrow_X_vlm[[2]]
-  ynrow_X_vlm <- dnrow_X_vlm[[1]]
+  dnrow.X.vlm <- labels(X.vlm.save)
+  xnrow.X.vlm <- dnrow.X.vlm[[2]]
+  ynrow.X.vlm <- dnrow.X.vlm[[1]]
 
   if (length(slot(family, "fini")))
     eval(slot(family, "fini"))
@@ -338,24 +350,24 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
     tfit$predictors <- matrix(tfit$predictors, n, M)
 
   coefs <- tfit$coefficients
-  asgn <- attr(X_vlm_save, "assign")
+  asgn <- attr(X.vlm.save, "assign")
 
-  names(coefs) <- xnrow_X_vlm
+  names(coefs) <- xnrow.X.vlm
 
   rank <- tfit$rank
-  cnames <- xnrow_X_vlm
+  cnames <- xnrow.X.vlm
 
-  if (check.rank && rank < ncol_X_vlm)
+  if (check.rank && rank < ncol.X.vlm)
     stop("vglm only handles full-rank models (currently)")
 
-  R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+  R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
   R[lower.tri(R)] <- 0
-  attributes(R) <- list(dim = c(ncol_X_vlm, ncol_X_vlm),
-                        dimnames = list(cnames, cnames), rank=rank)
+  attributes(R) <- list(dim = c(ncol.X.vlm, ncol.X.vlm),
+                        dimnames = list(cnames, cnames), rank = rank)
 
   effects <- tfit$effects
-  neff <- rep("", nrow_X_vlm)
-  neff[seq(ncol_X_vlm)] <- cnames
+  neff <- rep("", nrow.X.vlm)
+  neff[seq(ncol.X.vlm)] <- cnames
   names(effects) <- neff
 
   dim(tfit$predictors) <- c(n, M)
@@ -387,23 +399,23 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
   }
 
 
-  df.residual <- nrow_X_vlm - rank
+  df.residual <- nrow.X.vlm - rank
   fit <- list(assign = asgn,
               coefficients = coefs,
               constraints = Blist, 
               df.residual = df.residual,
-              df.total = n*M,
+              df.total = n * M,
               effects = effects, 
               fitted.values = mu,
               offset = offset, 
               rank = rank,
               residuals = residuals,
               R = R,
-              terms = Terms) # terms: This used to be done in vglm() 
+              terms = Terms)  # terms: This used to be done in vglm() 
 
   if (qr.arg) {
     fit$qr <- tfit$qr
-    dimnames(fit$qr$qr) <- dnrow_X_vlm
+    dimnames(fit$qr$qr) <- dnrow.X.vlm
   }
 
   if (M == 1) {
@@ -414,7 +426,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
 
   misc <- list(
       colnames.x = xn,
-      colnames.X_vlm = xnrow_X_vlm,
+      colnames.X.vlm = xnrow.X.vlm,
       criterion = criterion,
       function.name = function.name, 
       intercept.only = intercept.only,
@@ -422,10 +434,10 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
       M = M,
       n = n,
       nonparametric = nonparametric,
-      nrow_X_vlm = nrow_X_vlm,
+      nrow.X.vlm = nrow.X.vlm,
       orig.assign = attr(x, "assign"),
       p = ncol(x),
-      ncol_X_vlm = ncol_X_vlm,
+      ncol.X.vlm = ncol.X.vlm,
       ynames = dimnames(y)[[2]])
 
 
@@ -433,14 +445,13 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
   if (criterion != "coefficients")
     crit.list[[criterion]] <- fit[[criterion]] <- new.crit
 
-  for(ii in names(.min.criterion.VGAM)) {
+  for (ii in names(.min.criterion.VGAM)) {
     if (ii != criterion &&
         any(slotNames(family) == ii) &&
         length(body(slot(family, ii)))) {
-          fit[[ii]] <-
-          crit.list[[ii]] <-
-          (slot(family, ii))(mu = mu, y = y, w = w,
-                             res = FALSE, eta = eta, extra)
+      fit[[ii]] <-
+      crit.list[[ii]] <- (slot(family, ii))(mu = mu, y = y, w = w,
+                                            res = FALSE, eta = eta, extra)
     }
   }
 
@@ -462,9 +473,10 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
         iter = iter,
         misc = misc,
         post = post,
-        rss = tfit$rss,
+        res.ss = tfit$res.ss,
         x = x,
         y = y)),
         vclass = slot(family, "vfamily"))
 }
 
+
diff --git a/R/vlm.R b/R/vlm.R
index 29a8e34..5adb172 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -15,9 +15,8 @@ vlm <- function(formula,
                 contrasts = NULL, 
                 constraints = NULL,
                 extra = NULL, offset = NULL,  
-                smart = TRUE, ...)
-{
-  dataname <- as.character(substitute(data)) # "list" if no data=
+                smart = TRUE, ...) {
+  dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "vlm"
 
   ocall <- match.call()
@@ -51,7 +50,7 @@ vlm <- function(formula,
 
 
   xlev <- .getXlevels(mt, mf)
-  y <- model.response(mf, "any") # model.extract(mf, "response")
+  y <- model.response(mf, "any")  # model.extract(mf, "response")
   x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
        matrix(, NROW(y), 0)
   attr(x, "assign") <- attrassigndefault(x, mt)
@@ -97,11 +96,11 @@ vlm <- function(formula,
 
   fit <- vlm.wfit(xmat = x, zmat = y, Blist = Blist, wz = wz, U = NULL,
                  matrix.out = FALSE, is.vlmX = FALSE,
-                 rss = TRUE, qr = qr.arg,
+                 res.ss = TRUE, qr = qr.arg,
                  x.ret = TRUE, offset = offset)
 
-  ncol_X_vlm <- fit$rank
-  fit$R <- fit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+  ncol.X.vlm <- fit$rank
+  fit$R <- fit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
   fit$R[lower.tri(fit$R)] <- 0
 
 
@@ -109,26 +108,26 @@ vlm <- function(formula,
 
     fit$constraints <- Blist
 
-    dnrow_X_vlm <- labels(fit$X_vlm)
-    xnrow_X_vlm <- dnrow_X_vlm[[2]]
+    dnrow.X.vlm <- labels(fit$X.vlm)
+    xnrow.X.vlm <- dnrow.X.vlm[[2]]
     dn <- labels(x)
     xn <- dn[[2]]
-    dX_vlm <- as.integer(dim(fit$X_vlm))
-    nrow_X_vlm <- dX_vlm[[1]]
-    ncol_X_vlm <- dX_vlm[[2]]
+    dX.vlm <- as.integer(dim(fit$X.vlm))
+    nrow.X.vlm <- dX.vlm[[1]]
+    ncol.X.vlm <- dX.vlm[[2]]
 
     misc <- list(
         colnames.x = xn,
-        colnames.X_vlm = xnrow_X_vlm,
+        colnames.X.vlm = xnrow.X.vlm,
         function.name = function.name,
         intercept.only=intercept.only,
         predictors.names = predictors.names,
         M = M,
         n = nrow(x),
-        nrow_X_vlm = nrow_X_vlm,
+        nrow.X.vlm = nrow.X.vlm,
         orig.assign = attr(x, "assign"),
         p = ncol(x),
-        ncol_X_vlm = ncol_X_vlm,
+        ncol.X.vlm = ncol.X.vlm,
         ynames = dimnames(y)[[2]])
     
     fit$misc <- misc
@@ -149,7 +148,7 @@ vlm <- function(formula,
       "coefficients" = fit$coefficients,
       "constraints"  = fit$constraints,
       "control"      = control, 
-      "criterion"    = list(deviance = fit$rss),
+      "criterion"    = list(deviance = fit$res.ss),
       "dispersion"   = 1,
       "df.residual"  = fit$df.residual,
       "df.total"     = n*M,
@@ -160,7 +159,7 @@ vlm <- function(formula,
       "R"            = fit$R,
       "rank"         = fit$rank,
       "residuals"    = as.matrix(fit$residuals),
-      "rss"          = fit$rss,
+      "res.ss"       = fit$res.ss,
       "smart.prediction" = as.list(fit$smart.prediction),
       "terms"        = list(terms = mt))
 
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 4cf263a..e852438 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -12,10 +12,10 @@
 
 vlm.wfit <-
   function(xmat, zmat, Blist, wz = NULL, U = NULL, 
-           matrix.out = FALSE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
+           matrix.out = FALSE, is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
            x.ret = FALSE,
            offset = NULL,
-           omit.these = NULL, only.rss = FALSE,
+           omit.these = NULL, only.res.ss = FALSE,
            ncolx = if (matrix.out && is.vlmX) {
                      stop("need argument 'ncolx'") 
                    } else {
@@ -29,7 +29,7 @@ vlm.wfit <-
   zmat <- as.matrix(zmat)
   n <- nrow(zmat)
   M <- ncol(zmat)
-  if (!only.rss) {
+  if (!only.res.ss) {
     contrast.save <- attr(xmat, "contrasts")
     znames <- dimnames(zmat)[[2]]
   }
@@ -45,25 +45,25 @@ vlm.wfit <-
     stop("input unconformable")
   }
 
-  X_vlm_save <- if (is.vlmX) {
-          xmat 
+  X.vlm.save <- if (is.vlmX) {
+        xmat 
       } else {
-          if (missing.Blist || !length(Blist)) {
-              Blist <- replace.constraints(vector("list", ncol(xmat)),
-                                          diag(M), 1:ncol(xmat)) # NULL
-          }
-          lm2vlm.model.matrix(x = xmat, Blist = Blist, M = M,
-                              assign.attributes = FALSE,
-                              xij = xij,
-                              Xm2 = Xm2)
+        if (missing.Blist || !length(Blist)) {
+          Blist <- replace.constraints(vector("list", ncol(xmat)),
+                                       diag(M), 1:ncol(xmat))  # NULL
         }
-  X_vlm <- mux111(U, X_vlm_save, M = M)
-  z_vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
+        lm2vlm.model.matrix(x = xmat, Blist = Blist, M = M,
+                            assign.attributes = FALSE,
+                            xij = xij,
+                            Xm2 = Xm2)
+      }
+  X.vlm <- mux111(U, X.vlm.save, M = M)
+  z.vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
 
 
   if (length(omit.these)) {
-      X_vlm <- X_vlm[!omit.these,,drop = FALSE] 
-      z_vlm <- z_vlm[!omit.these]
+    X.vlm <- X.vlm[!omit.these, , drop = FALSE] 
+    z.vlm <- z.vlm[!omit.these]
   }
 
 
@@ -71,12 +71,12 @@ vlm.wfit <-
 
 
 
-  ans <- lm.fit(X_vlm, y = z_vlm, ...)
+  ans <- lm.fit(X.vlm, y = z.vlm, ...)
 
-  if (rss) {
-    ans$rss <- sum(ans$resid^2)
-    if (only.rss)
-      return(list(rss = ans$rss))
+  if (res.ss) {
+    ans$res.ss <- sum(ans$resid^2)
+    if (only.res.ss)
+      return(list(res.ss = ans$res.ss))
   }
 
   if (length(omit.these) && any(omit.these)) {
@@ -86,7 +86,7 @@ vlm.wfit <-
 
   fv <- ans$fitted.values
   dim(fv) <- c(M, n)
-  fv <- vbacksub(U, fv, M = M, n = n) # Have to premultiply fv by U
+  fv <- vbacksub(U, fv, M = M, n = n)  # Have to premultiply fv by U
 
 
   if (length(Eta.range)) {
@@ -111,7 +111,7 @@ vlm.wfit <-
   ans$constraints <- Blist
   ans$contrasts <- contrast.save
   if (x.ret) {
-    ans$X_vlm <- X_vlm_save
+    ans$X.vlm <- X.vlm.save
   }
 
   if (!is.null(offset)) {
@@ -134,10 +134,10 @@ vlm.wfit <-
   }
   ncolBlist <- unlist(lapply(Blist, ncol)) 
   temp <- c(0, cumsum(ncolBlist))
-  for(ii in 1:ncolx) {
+  for (ii in 1:ncolx) {
     index <- (temp[ii]+1):temp[ii+1]
     cm <- Blist[[ii]]
-    B[,ii] <- cm %*% ans$coef[index]
+    B[, ii] <- cm %*% ans$coef[index]
   }
   ans$mat.coefficients <- t(B)
   ans
@@ -169,8 +169,8 @@ print.vlm.wfit <- function(x, ...) {
   }
   cat("\nDegrees of Freedom:", n*M, "Total;", rdf, "Residual\n")
 
-  if (!is.null(x$rss)) {
-    cat("Residual Sum of Squares:", format(x$rss), "\n")
+  if (!is.null(x$res.ss)) {
+    cat("Residual Sum of Squares:", format(x$res.ss), "\n")
   }
 
   invisible(x)
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index 99d7adc..e16de2a 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -19,7 +19,7 @@ setClass("vsmooth.spline", representation(
          "call"         = "call",
          "constraints"  = "list",
          "df"           = "numeric",
-         "nlfit"        = "vsmooth.spline.fit", # is the nonlinear component
+         "nlfit"        = "vsmooth.spline.fit",  # is the nonlinear component
          "lev"          = "matrix",
          "lfit" = "vlm",  # 20020606 was "vlm.wfit"; is the linear component
          "spar"         = "numeric",
@@ -107,7 +107,7 @@ setMethod("depvar",  "vsmooth.spline", function(object, ...)
 
 vsmooth.spline <-
   function(x, y, w = NULL, df = rep(5, M),
-           spar = NULL, #rep(0,M),
+           spar = NULL,  #rep(0,M),
            all.knots = FALSE, 
            iconstraint = diag(M),
            xconstraint = diag(M),
@@ -117,6 +117,8 @@ vsmooth.spline <-
            nk = NULL,
            control.spar = list()) {
 
+ 
+    
   if (var.arg) {
     warning("@var will be returned, but no use will be made of it") 
   }
@@ -194,6 +196,7 @@ vsmooth.spline <-
   }
   dim2wz <- ncol(wzmat)
 
+ 
   if (missing.constraints) {
     constraints <- list("(Intercepts)" = eval(iconstraint),
                         "x"            = eval(xconstraint))
@@ -216,24 +219,25 @@ vsmooth.spline <-
 
 
     usortx <- unique(sort(as.vector(xvector)))
-    ooo <- match(xvector, usortx)             # usortx[ooo] == x
+    ooo <- match(xvector, usortx)  # usortx[ooo] == x
     neff <- length(usortx)
     if (neff < 7) {
       stop("not enough unique 'x' values (need 7 or more)")
     }
 
-    dim1U <- dim2wz # 10/1/00; was M * (M+1) / 2
+    dim1U <- dim2wz  # 10/1/00; was M * (M+1) / 2
 
-    collaps <- dotC(name = "vsuff9",
+    collaps <- .C("vsuff9",
       as.integer(n_lm), as.integer(neff), as.integer(ooo),
       as.double(xvector), as.double(ymat), as.double(wzmat),
+                    
       xbar = double(neff), ybar = double(neff * M),
           wzbar = double(neff * dim2wz),
       uwzbar = double(1), wzybar = double(neff * M), okint = as.integer(0),
       as.integer(M), dim2wz = as.integer(dim2wz), dim1U = as.integer(dim1U),
       blist = as.double(diag(M)), ncolb = as.integer(M),
       trivc = as.integer(1), wuwzbar = as.integer(0),
-      dim1Uwzbar = as.integer(dim1U), dim2wzbar = as.integer(dim2wz))
+      dim1Uwzbar = as.integer(dim1U), dim2wzbar = as.integer(dim2wz), PACKAGE = "VGAM")
 
     if (collaps$okint != 1) {
       stop("some non-positive-definite weight matrices ",
@@ -244,10 +248,10 @@ vsmooth.spline <-
 
     if (FALSE) {
     } else {
-      yinyin <- collaps$ybar   # Includes both linear and nonlinear parts
+      yinyin <- collaps$ybar  # Includes both linear and nonlinear parts
       x <- collaps$xbar  # Could call this xxx for location finder
 
-      lfit <- vlm(yinyin ~ 1 + x,    # xxx
+      lfit <- vlm(yinyin ~ 1 + x,  # xxx
                  constraints = constraints,
                  save.weight = FALSE,
                  qr.arg = FALSE, x.arg = FALSE, y.arg = FALSE,
@@ -255,7 +259,7 @@ vsmooth.spline <-
                  weights = matrix(collaps$wzbar, neff, dim2wz))
     }
 
-    ncb0  <- ncol(constraints[[2]])   # Of xxx and not of the intercept
+    ncb0  <- ncol(constraints[[2]])  # Of xxx and not of the intercept
     spar  <- rep(if (length(spar)) spar else 0, length = ncb0)
     dfvec <- rep(df, length = ncb0)
 
@@ -281,7 +285,7 @@ vsmooth.spline <-
                        "Bcoefficients" = matrix(as.numeric(NA), 1, 1),
                        "knots"         = numeric(0),
                        "xmin"          = numeric(0),
-                       "xmax"          = numeric(0)) # 8/11/03
+                       "xmax"          = numeric(0))  # 8/11/03
 
       dratio <- as.numeric(NA)
 
@@ -317,7 +321,7 @@ vsmooth.spline <-
     if (length(nknots)) {
       warning("overriding 'nk' by 'all.knots = TRUE'")
     }
-    nknots <- length(knot) - 4     # No longer neff + 2
+    nknots <- length(knot) - 4  # No longer neff + 2
   } else {
     chosen <- length(nknots)
     if (chosen && (nknots > neff+2 || nknots <= 5)) {
@@ -326,10 +330,10 @@ vsmooth.spline <-
     if (!chosen) {
       nknots <- 0
     }
-      knot.list <- dotC(name = "vknootl2", as.double(xbar),
-                        as.integer(neff), knot = double(neff+6),
-                        k = as.integer(nknots+4),
-                        chosen = as.integer(chosen))
+    knot.list <- .C("vknootl2", as.double(xbar),
+                      as.integer(neff), knot = double(neff+6),
+                      k = as.integer(nknots+4),
+                      chosen = as.integer(chosen), PACKAGE = "VGAM")
     if (noround) {
       knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
       knot.list$k <- length(knot)
@@ -342,6 +346,7 @@ vsmooth.spline <-
     stop("not enough distinct knots found")
   }
 
+ 
   conmat <- (constraints[[2]])[, nonlin, drop = FALSE]
   ncb <- sum(nonlin)
   trivc <- trivial.constraints(conmat)
@@ -354,16 +359,18 @@ vsmooth.spline <-
    ooo <- 1:neff # Already sorted
 
 
-  collaps <- dotC(name = "vsuff9",
+
+  collaps <- .C("vsuff9",
       as.integer(neff), as.integer(neff), as.integer(ooo),
       as.double(collaps$xbar), as.double(resmat), as.double(collaps$wzbar),
+                  
       xbar = double(neff), ybar = double(neff * ncb),
           wzbar = double(neff * dim2wzbar),
       uwzbar = double(1), wzybar = double(neff * ncb), okint = as.integer(0),
       as.integer(M), as.integer(dim2wz), as.integer(dim1U),
       blist = as.double(conmat), ncolb = as.integer(ncb),
       as.integer(trivc), wuwzbar = as.integer(0),
-      as.integer(dim1Uwzbar), as.integer(dim2wzbar))
+      as.integer(dim1Uwzbar), as.integer(dim2wzbar), PACKAGE = "VGAM")
 
   if (collaps$okint != 1) {
    stop("some non-positive-definite weight matrices ",
@@ -374,10 +381,45 @@ vsmooth.spline <-
   dim(collaps$wzbar) <- c(neff, dim2wzbar)
 
 
-  ldk <- 3 * ncb + 1     # 10/7/02; Previously 4 * ncb
+ 
+
+ 
+
+
+  wzyb.c <-
+  zedd.c <- matrix(0, neff, ncb)
+  Wmat.c <- array(0, c(ncb, ncb, neff))
+ if (FALSE)
+  for (ii in 1:neff) {
+    Wi.indiv <- m2adefault(wzmat[ii, , drop = FALSE], M = ncb)
+    Wi.indiv <- Wi.indiv[,, 1]  # Drop the 3rd dimension
+    Wmat.c[,, ii] <- t(conmat) %*% Wi.indiv %*% conmat
+    one.Wmat.c <- matrix(Wmat.c[,, ii], ncb, ncb)
+    zedd.c[ii, ] <- solve(Wmat.c[,, ii],
+                          t(conmat) %*% Wi.indiv %*% cbind(resmat[ii, ]))
+    wzyb.c[ii, ] <- one.Wmat.c %*% zedd.c[ii, ]
+  }
+
+ 
+
+
+
+
+
+
+ 
+
+  ldk <- 3 * ncb + 1  # 20020710; Previously 4 * ncb
   varmat <- if (var.arg) matrix(0, neff, ncb) else double(1)
-  vsplin <- dotC(name = "Yee_spline",
-     xs = as.double(xbar),  as.double(collaps$wzybar),
+
+
+
+
+
+  vsplin <- .C("Yee_spline",
+     xs = as.double(xbar),
+     yyy = as.double(collaps$wzybar),  # zz
+                 
          as.double(collaps$wzbar), xknot = as.double(knot),
      n = as.integer(neff), nknots = as.integer(nknots), as.integer(ldk),
          M = as.integer(ncb), dim2wz = as.integer(dim2wzbar),
@@ -395,8 +437,14 @@ vsmooth.spline <-
      double(1), as.integer(0),
 
      icontrsp = as.integer(contr.sp$maxit),
-      contrsp = as.double(unlist(contr.sp[1:4])))
+      contrsp = as.double(unlist(contr.sp[1:4])), PACKAGE = "VGAM")
 
+
+
+
+
+
+ 
   if (vsplin$ierror != 0) {
     stop("vsplin$ierror == ", vsplin$ierror,
          ". Something gone wrong in 'vsplin'")
@@ -416,6 +464,8 @@ vsmooth.spline <-
   dofr.nl <- colSums(vsplin$levmat)  # Actual EDF used 
 
 
+
+ 
   fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
   if (M > 1) {
     dimnames(fv) <- list(NULL, ny2)
@@ -547,7 +597,7 @@ predictvsmooth.spline <- function(object, x, deriv = 0, se.fit = FALSE) {
 
   mat.coef <- coefvlm(lfit, matrix.out = TRUE)
   coeflfit <- t(mat.coef)   # M x p now
-  M <- nrow(coeflfit) # if (is.matrix(object at y)) ncol(object at y) else 1
+  M <- nrow(coeflfit)  # if (is.matrix(object at y)) ncol(object at y) else 1
 
   pred <- if (deriv == 0)
            predict(lfit, data.frame(x = x)) else
@@ -562,7 +612,7 @@ predictvsmooth.spline <- function(object, x, deriv = 0, se.fit = FALSE) {
 
   conmat <- if (!length(lfit at constraints)) diag(M) else
               lfit at constraints[[2]]
-  conmat <- conmat[, nonlin, drop = FALSE] # Of nonlinear functions
+  conmat <- conmat[, nonlin, drop = FALSE]  # Of nonlinear functions
 
   list(x = x, y = pred + predict(nlfit, x, deriv)$y %*% t(conmat))
 }
@@ -585,10 +635,10 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
   ncb <- ncol(object at Bcoefficients)
   y <- matrix(as.numeric(NA), length(xs), ncb)
   if (ngood <- sum(good)) {
-    junk <- dotC(name = "Yee_vbvs", as.integer(ngood),
+    junk <- .C("Yee_vbvs", as.integer(ngood),
           as.double(object at knots), as.double(object at Bcoefficients),
           as.double(xs[good]), smomat = double(ngood * ncb),
-          as.integer(nknots), as.integer(deriv), as.integer(ncb))
+          as.integer(nknots), as.integer(deriv), as.integer(ncb), PACKAGE = "VGAM")
     y[good,] <- junk$smomat
 
     if (TRUE && deriv > 1) {
@@ -628,9 +678,9 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
 
 valid.vknotl2 <- function(knot, tol = 1/1024) {
 
-  junk <- dotC(name = "Yee_pknootl2", knot = as.double(knot),
+  junk <- .C("Yee_pknootl2", knot = as.double(knot),
                as.integer(length(knot)),
-               keep = integer(length(knot)), as.double(tol))
+               keep = integer(length(knot)), as.double(tol), PACKAGE = "VGAM")
   keep <- as.logical(junk$keep)
   knot <- junk$knot[keep]
   if (length(knot) <= 11) {
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..a2acbd1
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index 04a4c45..c8cafc5 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
new file mode 100644
index 0000000..3c2ea93
Binary files /dev/null and b/data/Huggins89table1.rda differ
diff --git a/data/Perom.rda b/data/Perom.rda
deleted file mode 100644
index a5627f2..0000000
Binary files a/data/Perom.rda and /dev/null differ
diff --git a/data/V1.txt.gz b/data/V1.txt.gz
new file mode 100644
index 0000000..b49b154
Binary files /dev/null and b/data/V1.txt.gz differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 139f886..507a0f2 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index 97e6be8..1699e04 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index 5c937fa..4b199ad 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index db0e160..7e0578c 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
new file mode 100644
index 0000000..9fd70c6
Binary files /dev/null and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index e0e3019..074a63c 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
new file mode 100644
index 0000000..b0f880f
Binary files /dev/null and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index dcbcbcd..572f176 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 402a25e..45f2359 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 860ee59..d8eba09 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 5a59896..654fee5 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index fc07dbd..70a81a3 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 41be541..1f9f6ed 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
new file mode 100644
index 0000000..ff35c27
Binary files /dev/null and b/data/deermice.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 722f3db..ce60866 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index c490aa1..0111a78 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 8e81a1f..8b0d11b 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/machinists.txt.gz b/data/machinists.txt.gz
new file mode 100644
index 0000000..cb93c6e
Binary files /dev/null and b/data/machinists.txt.gz differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index a1ac6bb..c9f4568 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/mmt.rda b/data/mmt.rda
index 18acd65..a9782a6 100644
Binary files a/data/mmt.rda and b/data/mmt.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index 89426e6..0a3cd82 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prats.txt.gz b/data/prats.txt.gz
new file mode 100644
index 0000000..45d60be
Binary files /dev/null and b/data/prats.txt.gz differ
diff --git a/data/prinia.rda b/data/prinia.rda
new file mode 100644
index 0000000..d219194
Binary files /dev/null and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index fe71487..46cecfe 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index 84da16c..f860477 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index c66480c..a5cbb07 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index ba91a39..8cabb8a 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wffc.indiv.rda b/data/wffc.indiv.rda
deleted file mode 100644
index c126c35..0000000
Binary files a/data/wffc.indiv.rda and /dev/null differ
diff --git a/data/wffc.nc.rda b/data/wffc.nc.rda
deleted file mode 100644
index 93d820f..0000000
Binary files a/data/wffc.nc.rda and /dev/null differ
diff --git a/data/wffc.rda b/data/wffc.rda
deleted file mode 100644
index dcccb00..0000000
Binary files a/data/wffc.rda and /dev/null differ
diff --git a/data/wffc.teams.rda b/data/wffc.teams.rda
deleted file mode 100644
index aa6966e..0000000
Binary files a/data/wffc.teams.rda and /dev/null differ
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
index cabf345..43a0c11 100644
--- a/inst/doc/categoricalVGAM.R
+++ b/inst/doc/categoricalVGAM.R
@@ -1,9 +1,10 @@
 ### R code from vignette source 'categoricalVGAM.Rnw'
 
 ###################################################
-### code chunk number 1: categoricalVGAM.Rnw:84-89
+### code chunk number 1: categoricalVGAM.Rnw:84-90
 ###################################################
 library("VGAM")
+library("VGAMdata")
 ps.options(pointsize = 12)
 options(width = 72, digits = 4)
 options(SweaveHooks = list(fig = function() par(las = 1)))
@@ -11,7 +12,7 @@ options(prompt = "R> ", continue = "+")
 
 
 ###################################################
-### code chunk number 2: categoricalVGAM.Rnw:613-616
+### code chunk number 2: pneumocat
 ###################################################
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
@@ -19,7 +20,7 @@ fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
 
 
 ###################################################
-### code chunk number 3: categoricalVGAM.Rnw:899-903
+### code chunk number 3: categoricalVGAM.Rnw:900-904
 ###################################################
 journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
 squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
@@ -28,156 +29,37 @@ dimnames(squaremat) <- list(winner = journal, loser = journal)
 
 
 ###################################################
-### code chunk number 4: categoricalVGAM.Rnw:1004-1008
+### code chunk number 4: categoricalVGAM.Rnw:1005-1009
 ###################################################
 abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
 fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
 coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
+Coef(fit)  # Estimated pA and pB
 
 
 ###################################################
-### code chunk number 5: categoricalVGAM.Rnw:1314-1315
-###################################################
-head(wffc.nc, 5)
-
-
-###################################################
-### code chunk number 6: categoricalVGAM.Rnw:1324-1336
-###################################################
-fnc <- transform(wffc.nc,
-                 finame = factor(iname),
-                 fsector = factor(sector),
-                 fday = factor(ceiling(session / 2)),
-                 mornaft = 1 - (session %% 2),
-                 fbeatboat = factor(beatboat))
-
-fnc <- fnc[with(fnc, !is.element(comid, c(99,72,80,93,45,71,97,78))),] 
-fnc <- transform(fnc,
-                ordnum = ifelse(numbers <= 02, "few",
-                         ifelse(numbers <= 10, "more", "most")))
-fnc$ordnum <- ordered(fnc$ordnum, levels = c("few", "more", "most"))
-
-
-###################################################
-### code chunk number 7: categoricalVGAM.Rnw:1341-1342
-###################################################
-with(fnc, table(ordnum))
-
-
-###################################################
-### code chunk number 8: categoricalVGAM.Rnw:1349-1356
-###################################################
-fit.pom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = TRUE, reverse = TRUE),
-          data = fnc)
-
-
-###################################################
-### code chunk number 9: categoricalVGAM.Rnw:1368-1370
-###################################################
-head(fit.pom at y, 3)
-colSums(fit.pom at y)
-
-
-###################################################
-### code chunk number 10: categoricalVGAM.Rnw:1381-1383
-###################################################
-head(coef(fit.pom, matrix = TRUE), 10)
-#head(summary(fit.pom)@coef3, 10) # Old now since 0.7-10 is nicer
-
-
-###################################################
-### code chunk number 11: categoricalVGAM.Rnw:1387-1388
-###################################################
-head(coef(summary(fit.pom)), 10)
-
-
-###################################################
-### code chunk number 12: categoricalVGAM.Rnw:1434-1442
-###################################################
-fit.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          cumulative(parallel = FALSE ~ 1 + mornaft, reverse = TRUE),
-          data = fnc)
-head(coef(fit.ppom, matrix = TRUE),  8)
-
-
-###################################################
-### code chunk number 13: categoricalVGAM.Rnw:1447-1449
-###################################################
-pchisq(deviance(fit.pom) - deviance(fit.ppom),
-       df = df.residual(fit.pom) - df.residual(fit.ppom), lower.tail=FALSE)
-
-
-###################################################
-### code chunk number 14: categoricalVGAM.Rnw:1456-1464
-###################################################
-fit2.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = FALSE ~ 1 + fday, reverse = TRUE),
-          data = fnc)
-head(coef(fit2.ppom, matrix = TRUE), 8)
-
-
-###################################################
-### code chunk number 15: categoricalVGAM.Rnw:1469-1470
-###################################################
-head(fitted(fit2.ppom), 3)
-
-
-###################################################
-### code chunk number 16: categoricalVGAM.Rnw:1475-1476
-###################################################
-head(predict(fit2.ppom), 3)
-
-
-###################################################
-### code chunk number 17: categoricalVGAM.Rnw:1480-1482
-###################################################
-dim(model.matrix(fit2.ppom, type = "lm"))
-dim(model.matrix(fit2.ppom, type = "vlm"))
-
-
-###################################################
-### code chunk number 18: categoricalVGAM.Rnw:1486-1487
-###################################################
-constraints(fit2.ppom)[c(1, 2, 5, 6)]
-
-
-###################################################
-### code chunk number 19: categoricalVGAM.Rnw:1524-1526
+### code chunk number 5: categoricalVGAM.Rnw:1287-1289
 ###################################################
 head(marital.nz, 4)
 summary(marital.nz)
 
 
 ###################################################
-### code chunk number 20: categoricalVGAM.Rnw:1529-1531
+### code chunk number 6: categoricalVGAM.Rnw:1292-1294
 ###################################################
 fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
                data = marital.nz)
 
 
 ###################################################
-### code chunk number 21: categoricalVGAM.Rnw:1535-1537
+### code chunk number 7: categoricalVGAM.Rnw:1298-1300
 ###################################################
 head(fit.ms at y, 4)
 colSums(fit.ms at y)
 
 
 ###################################################
-### code chunk number 22: categoricalVGAM.Rnw:1546-1558
+### code chunk number 8: categoricalVGAM.Rnw:1309-1321
 ###################################################
 # Plot output
 mycol <- c("red","darkgreen","blue")
@@ -194,7 +76,7 @@ plot(fit.ms, se=TRUE, scale=12,
 
 
 ###################################################
-### code chunk number 23: categoricalVGAM.Rnw:1601-1614
+### code chunk number 9: categoricalVGAM.Rnw:1364-1377
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -213,13 +95,13 @@ plot(fit.ms, se=TRUE, scale=12,
 
 
 ###################################################
-### code chunk number 24: categoricalVGAM.Rnw:1634-1635
+### code chunk number 10: categoricalVGAM.Rnw:1397-1398
 ###################################################
 plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
 
 
 ###################################################
-### code chunk number 25: categoricalVGAM.Rnw:1644-1648
+### code chunk number 11: categoricalVGAM.Rnw:1407-1411
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -229,7 +111,7 @@ plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
 
 
 ###################################################
-### code chunk number 26: categoricalVGAM.Rnw:1671-1683
+### code chunk number 12: categoricalVGAM.Rnw:1434-1446
 ###################################################
 foo <- function(x, elbow=50)
     poly(pmin(x, elbow), 2)
@@ -246,13 +128,13 @@ fit2.ms <-
 
 
 ###################################################
-### code chunk number 27: categoricalVGAM.Rnw:1686-1687
+### code chunk number 13: categoricalVGAM.Rnw:1449-1450
 ###################################################
 coef(fit2.ms, matrix = TRUE)
 
 
 ###################################################
-### code chunk number 28: categoricalVGAM.Rnw:1691-1698
+### code chunk number 14: categoricalVGAM.Rnw:1454-1461
 ###################################################
 par(mfrow=c(2,2))
 plotvgam(fit2.ms, se = TRUE, scale = 12,
@@ -264,7 +146,7 @@ plotvgam(fit2.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 29: categoricalVGAM.Rnw:1709-1718
+### code chunk number 15: categoricalVGAM.Rnw:1472-1481
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -279,33 +161,33 @@ plotvgam(fit2.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 30: categoricalVGAM.Rnw:1736-1737
+### code chunk number 16: categoricalVGAM.Rnw:1499-1500
 ###################################################
 deviance(fit.ms) - deviance(fit2.ms)
 
 
 ###################################################
-### code chunk number 31: categoricalVGAM.Rnw:1743-1744
+### code chunk number 17: categoricalVGAM.Rnw:1506-1507
 ###################################################
 (dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
 
 
 ###################################################
-### code chunk number 32: categoricalVGAM.Rnw:1747-1748
+### code chunk number 18: categoricalVGAM.Rnw:1510-1511
 ###################################################
 1-pchisq(deviance(fit.ms) - deviance(fit2.ms), df=dfdiff)
 
 
 ###################################################
-### code chunk number 33: categoricalVGAM.Rnw:1761-1772
+### code chunk number 19: categoricalVGAM.Rnw:1524-1535
 ###################################################
 ooo <- with(marital.nz, order(age))
 with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
      type="l", las=1, lwd=2, ylim=0:1,
      ylab="Fitted probabilities",
-     xlab="Age", # main="Marital status amongst NZ Male Europeans",
+     xlab="Age",  # main="Marital status amongst NZ Male Europeans",
      col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
+legend(x=52.5, y=0.62,  # x="topright",
        col=c(mycol[1], "black", mycol[-1]),
        lty=1:4,
        legend=colnames(fit.ms at y), lwd=2)
@@ -313,7 +195,7 @@ abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
 
 
 ###################################################
-### code chunk number 34: categoricalVGAM.Rnw:1787-1800
+### code chunk number 20: categoricalVGAM.Rnw:1550-1563
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
  par(mfrow=c(1,1))
@@ -332,7 +214,7 @@ abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
 
 
 ###################################################
-### code chunk number 35: categoricalVGAM.Rnw:1834-1838
+### code chunk number 21: categoricalVGAM.Rnw:1597-1601
 ###################################################
 # Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
 head(backPain, 4)
@@ -341,41 +223,41 @@ backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale
 
 
 ###################################################
-### code chunk number 36: categoricalVGAM.Rnw:1842-1843
+### code chunk number 22: categoricalVGAM.Rnw:1605-1606
 ###################################################
 bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain)
 
 
 ###################################################
-### code chunk number 37: categoricalVGAM.Rnw:1846-1847
+### code chunk number 23: categoricalVGAM.Rnw:1609-1610
 ###################################################
 Coef(bp.rrmlm1)
 
 
 ###################################################
-### code chunk number 38: categoricalVGAM.Rnw:1875-1876
+### code chunk number 24: categoricalVGAM.Rnw:1638-1639
 ###################################################
 set.seed(123)
 
 
 ###################################################
-### code chunk number 39: categoricalVGAM.Rnw:1879-1881
+### code chunk number 25: categoricalVGAM.Rnw:1642-1644
 ###################################################
 bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
                    Corner = FALSE, Uncor = TRUE)
 
 
 ###################################################
-### code chunk number 40: categoricalVGAM.Rnw:1889-1893
+### code chunk number 26: categoricalVGAM.Rnw:1652-1656
 ###################################################
 biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-#      xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
-       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
+#      xlim=c(-1,6), ylim=c(-1.2,4),  # Use this if not scaled
+       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2),  # Use this if scaled
        chull=TRUE, clty=2, ccol="blue")
 
 
 ###################################################
-### code chunk number 41: categoricalVGAM.Rnw:1925-1933
+### code chunk number 27: categoricalVGAM.Rnw:1688-1696
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -389,7 +271,7 @@ biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
 
 
 ###################################################
-### code chunk number 42: categoricalVGAM.Rnw:2047-2048
+### code chunk number 28: categoricalVGAM.Rnw:1810-1811
 ###################################################
 iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
 
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
index b5841f5..c4f98e0 100644
--- a/inst/doc/categoricalVGAM.Rnw
+++ b/inst/doc/categoricalVGAM.Rnw
@@ -83,6 +83,7 @@
 
 <<echo=FALSE, results=hide>>=
 library("VGAM")
+library("VGAMdata")
 ps.options(pointsize = 12)
 options(width = 72, digits = 4)
 options(SweaveHooks = list(fig = function() par(las = 1)))
@@ -610,7 +611,7 @@ to~\texttt{gam()} \citep{gam:pack:2009}, e.g.,
 to fit a nonparametric proportional odds model
 \citep[cf.~p.179 of][]{mccu:neld:1989}
 to the pneumoconiosis data one could try
-<<eval=T>>=
+<<label = pneumocat, eval=T>>=
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
             cumulative(reverse = TRUE, parallel = TRUE), pneumo)
@@ -1005,7 +1006,7 @@ A toy example where $p=p_A$ and $q=p_B$ is
 abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
 fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
 coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
+Coef(fit)  # Estimated pA and pB
 @
 The function \texttt{Coef()}, which applies only to intercept-only models,
 applies to $g_{j}(\theta_{j})=\eta_{j}$
@@ -1246,251 +1247,13 @@ data sets in order to give a flavour of what is available in the package.
 
 
 
-\subsection{2008 World Fly Fishing Championships}
-\label{sec:jsscat.eg.WFFC}
 
-The World Fly Fishing Championships (WFFC)
-is a prestigious catch-and-release competition held annually.
-In 2008 it was held in New~Zealand during the month of March.
-The data was released and appears in~\VGAM{} as the data frames
-\texttt{wffc},
-\texttt{wffc.nc},
-\texttt{wffc.indiv} and
-\texttt{wffc.teams}.
-Details about the competition are found
-in the online help, as well as~\cite{yee:2010v}.
-
-
-Briefly, we will model the abundance of fish caught during each
-three-hour session amongst the 90 or so competitors (from about
-19~countries) who fished all their sessions. There were five~sectors
-(locations) labelled I--V for the Whanganui~River, Lake~Otamangakau,
-Lake~Rotoaira, Waihou~River and Waimakariri~River, respectively. The
-sessions were sequentially labelled 1--6 where odd and even
-numbers denote morning and afternoon respectively. There were
-three consecutive days of fishing during which each sector experienced
-a rest session.
-
-
-
-\cite{yee:2010v} fitted Poisson and negative
-binomial regressions to the numbers caught at each competitor-session
-combination.
-The negative
-binomial regression had an intercept-only for its
-index parameter~$k$ and
-$\Var(Y) = \mu(1+\mu / k)$.
-Both models had the log-linear relationship
-\begin{eqnarray}
-\label{eq:wffc.use.loglinear}
-\log \, \mu_{adsc} &=&
-\eta ~=~
-\beta_{(1)1} +
-\alpha_{s} +
-\beta_{a} +
-\gamma_{d} +
-\delta_{c}.
-\end{eqnarray}
-where $\mu = E(Y)$ is the mean number caught,
-$\beta_{(1)1}$~is the intercept,
-$\alpha_{s}$~are the sector effects for $s=1,\ldots,5$ sectors,
-$\delta_{c}$~are the ``competitor effects'' for $c=1,\ldots,91$ competitors
-(8~competitors who did not fish all~5 sessions were excluded),
-$\beta_{a}$~are the morning ($a=1$) and afternoon ($a=2$) effects,
-$\gamma_{d}$~are the day effects for
-day $d=1,2,3$.
-Recall for factors that the first level is baseline, e.g.,
-$\alpha_1=\beta_1=0$ etc.
-Not used here is $b=1,\ldots,19$ for which beat/boat was
-fished/used (e.g., fixed locations on the river).
-We will fit a proportional odds model with essentially the RHS
-of~(\ref{eq:wffc.use.loglinear}) as the linear predictor.
-
-
-
-Here is a peek at the data frame used.
-Each row of~\texttt{wffc.nc} is the number of captures
-by each sector-session-beat combination.
-<<>>=
-head(wffc.nc, 5)
-@ 
-
-
-We first process the data a little: create the regressor
-variables and restrict the analysis to anglers who fished all their
-sessions.
-Here, ``\texttt{nc}'' stands for numbers caught, and
-``\texttt{f}'' stands for factor.
-<<>>=
-fnc <- transform(wffc.nc,
-                 finame = factor(iname),
-                 fsector = factor(sector),
-                 fday = factor(ceiling(session / 2)),
-                 mornaft = 1 - (session %% 2),
-                 fbeatboat = factor(beatboat))
-
-fnc <- fnc[with(fnc, !is.element(comid, c(99,72,80,93,45,71,97,78))),] 
-fnc <- transform(fnc,
-                ordnum = ifelse(numbers <= 02, "few",
-                         ifelse(numbers <= 10, "more", "most")))
-fnc$ordnum <- ordered(fnc$ordnum, levels = c("few", "more", "most"))
-@
-The variable \texttt{ordnum} is ordinal with 3~levels.
-The cut-points chosen here were decided upon by manual
-inspection; they gave approximately the same numbers in each level:
-<<>>=
-with(fnc, table(ordnum))
-@
-
-
-
-Now we are in a position to fit a proportional odds model
-to mimic~(\ref{eq:wffc.use.loglinear}).
-<<>>=
-fit.pom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = TRUE, reverse = TRUE),
-          data = fnc)
-@
-Here, we set \texttt{reverse = TRUE} so that the coefficients
-have the same direction as a logistic regression.
-It means that if a regression coefficient is positive then 
-an increasing value of an explanatory variable is associated
-with an increasing value of the response.
-One could have used \texttt{family = propodds} instead.
-
-
-
-Before interpreting some output let's check that the input was alright.
-<<>>=
-head(fit.pom at y, 3)
-colSums(fit.pom at y)
-@
-The checking indicates no problems with the input.
-
-
-
-Now let's look at some output. Note that the Whanganui~River,
-Mornings and Day~1 are the baseline levels of the factors. Also, the
-variable \texttt{mornaft} is 0~for morning and 1~for afternoons.
-Likewise, the factor \texttt{fday} has values \texttt{1}, \texttt{2}
-and \texttt{3}.
-<<>>=
-head(coef(fit.pom, matrix = TRUE), 10)
-#head(summary(fit.pom)@coef3, 10) # Old now since 0.7-10 is nicer
-@
-verifies the parallelism assumption.
-Standard errors and Wald statistics may be obtained by
-<<>>=
-head(coef(summary(fit.pom)), 10)
-@
-Not surprisingly, these results agree with the Poisson
-and negative binomial regressions
-\citep[reported in][]{yee:2010v}.
-The most glaring qualitative results are as follows.
-We use the rough rule of thumb that if
-the absolute value of the $t$~{statistic} is greater than~$2$
-then it is `statistically significant'.
-\begin{itemize}
-
-\item
-The two lakes were clearly less productive than the rivers.
-However, neither of the other two rivers were significantly different
-from the Whanganui~River.
-
-
-
-\item
-There is a noticeable day effect: the second day is not significantly
-different from the opening day but it is for the third day.
-The decreasing values of the fitted coefficients show there is an
-increasing catch-reduction (fish depletion if it were catch-and-keep)
-as the competition progressed. Replacing \texttt{fday} by a
-variable~\texttt{day} and entering that linearly gave a $t$~statistic
-of~$-4.0$: there is a significant decrease in catch over time.
-
-
-
-\item
-Mornings were more productive than afternoons. The $p$~value for this
-would be close to~5\%. This result is in line with the day effect: fishing
-often results in a `hammering' effect over time on fish populations,
-especially in small streams. Since the morning and afternoon sessions
-were fixed at 9.00am--12.00pm and 2.30--5.30pm daily, there was only
-$2\frac12$~hours for the fish to recover until the next angler arrived.
-
-
-\end{itemize}
-
-
-
-
-
-Let us check the proportional odds assumption with respect
-to the variable~\texttt{mornaft}.
-<<>>=
-fit.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          cumulative(parallel = FALSE ~ 1 + mornaft, reverse = TRUE),
-          data = fnc)
-head(coef(fit.ppom, matrix = TRUE),  8)
-@
-As expected, all rows but~\texttt{(Intercept)} and~\texttt{mornaft}
-are identical due to the parallelism.
-Then
-<<>>=
-pchisq(deviance(fit.pom) - deviance(fit.ppom),
-       df = df.residual(fit.pom) - df.residual(fit.ppom), lower.tail=FALSE)
-@
-gives a likelihood ratio test $p$~value which is non-significant.
-Repeating the testing for each variable separately indicates
-that the parallelism assumption seems reasonable here except
-with~\texttt{fday} ($p$~value $\approx 0.012$).
-For this model
-<<>>=
-fit2.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = FALSE ~ 1 + fday, reverse = TRUE),
-          data = fnc)
-head(coef(fit2.ppom, matrix = TRUE), 8)
-@
-
-
-Some miscellaneous output is as follows.
-<<>>=
-head(fitted(fit2.ppom), 3)
-@
-are the fitted probabilities $\widehat{P}(Y={j})$ which sum to unity for
-each row.
-The $i$th~row of
-<<>>=
-head(predict(fit2.ppom), 3)
-@
-is $\widehat{\boldeta}(\bix_i)^{\top}$.
-The dimensions of the LM and VLM design matrices are
-<<>>=
-dim(model.matrix(fit2.ppom, type = "lm"))
-dim(model.matrix(fit2.ppom, type = "vlm"))
-@
-which shows the VLM matrix grows quickly with respect to~$M$.
-Lastly,
-<<>>=
-constraints(fit2.ppom)[c(1, 2, 5, 6)]
-@
-shows some of the constraint matrices,
-$\bH_1=\bI_2$
-and
-$\bH_2=\bH_5=\bH_6=\bone_2$
-(see Equations~\ref{eqn:constraints.VGAM}--\ref{eqn:lin.coefs4}).
+%20130919
+%Note: 
+%\subsection{2008 World Fly Fishing Championships}
+%\label{sec:jsscat.eg.WFFC}
+%are deleted since there are problems with accessing the \texttt{wffc.nc}
+%data etc. since they are now in \pkg{VGAMdata}.
 
 
 
@@ -1763,9 +1526,9 @@ ooo <- with(marital.nz, order(age))
 with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
      type="l", las=1, lwd=2, ylim=0:1,
      ylab="Fitted probabilities",
-     xlab="Age", # main="Marital status amongst NZ Male Europeans",
+     xlab="Age",  # main="Marital status amongst NZ Male Europeans",
      col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
+legend(x=52.5, y=0.62,  # x="topright",
        col=c(mycol[1], "black", mycol[-1]),
        lty=1:4,
        legend=colnames(fit.ms at y), lwd=2)
@@ -1888,8 +1651,8 @@ The fit was biplotted
  rows of $\widehat{\bA}$ plotted as labels) using
 <<figure=F>>=
 biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-#      xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
-       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
+#      xlim=c(-1,6), ylim=c(-1.2,4),  # Use this if not scaled
+       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2),  # Use this if scaled
        chull=TRUE, clty=2, ccol="blue")
 @
 to give Figure~\ref{fig:jsscat.eg.rrmlm2.backPain}.
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 3703669..3883457 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index 9f7f384..bfd1f11 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -66,7 +66,7 @@ Sunderland, MA: Sinauer Associates, Inc.
 y <- cbind(53, 95, 38)
 fit <- vglm(y ~ 1, AA.Aa.aa(link = "probit"), trace = TRUE)
 rbind(y, sum(y) * fitted(fit))
-Coef(fit) # Estimated pA
+Coef(fit)  # Estimated pA
 summary(fit)
 }
 \keyword{models}
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index f56d701..0aa625e 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -64,11 +64,11 @@ Lange, K. (2002)
 }
 
 \examples{
-ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925)
+ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32)  # Data from Fisher (1925)
 fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identity", init.p = 0.9), trace = TRUE)
 fit <- vglm(ymat ~ 1, AB.Ab.aB.ab, trace = TRUE)
 rbind(ymat, sum(ymat)*fitted(fit))
-Coef(fit) # Estimated p
+Coef(fit)  # Estimated p
 p <- sqrt(4*(fitted(fit)[, 4]))
 p*p
 summary(fit)
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
index ce722b6..a324f23 100644
--- a/man/AB.Ab.aB.ab2.Rd
+++ b/man/AB.Ab.aB.ab2.Rd
@@ -66,11 +66,11 @@ AB.Ab.aB.ab2(link = "logit", init.p = NULL)
 }
 
 \examples{
-ymat <- cbind(68, 11, 13, 21) # See Elandt-Johnson, pp.430,427
+ymat <- cbind(68, 11, 13, 21)  # See Elandt-Johnson, pp.430,427
 fit <- vglm(ymat ~ 1, AB.Ab.aB.ab2(link = cloglog), trace = TRUE, crit = "coef")
-Coef(fit) # Estimated p
+Coef(fit)  # Estimated p
 rbind(ymat, sum(ymat) * fitted(fit))
-sqrt(diag(vcov(fit))) # Estimated variance is approx 0.0021
+sqrt(diag(vcov(fit)))  # Estimated variance is approx 0.0021
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/ABO.Rd b/man/ABO.Rd
index ce20f83..3a25744 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -75,10 +75,10 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
 
 }
 \examples{
-ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name
+ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073)  # Order matters, not the name
 fit <- vglm(ymat ~ 1, ABO(link = identity), trace = TRUE, cri = "coef")
 coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
+Coef(fit)  # Estimated pA and pB
 rbind(ymat, sum(ymat) * fitted(fit))
 sqrt(diag(vcov(fit)))
 }
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index 9545cd0..849c5a6 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -4,6 +4,7 @@
 \alias{AICvgam}
 \alias{AICrrvglm}
 \alias{AICqrrvglm}
+\alias{AICcao}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Akaike's Information Criterion }
 \description{
@@ -12,7 +13,11 @@
 
 }
 \usage{
-AICvlm(object, \dots, k = 2)
+    AICvlm(object, \dots, corrected = FALSE, k = 2)
+   AICvgam(object, \dots, k = 2)
+ AICrrvglm(object, \dots, k = 2)
+AICqrrvglm(object, \dots, k = 2)
+    AICcao(object, \dots, k = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,16 +25,24 @@ AICvlm(object, \dots, k = 2)
   Some \pkg{VGAM} object, for example, having
   class \code{\link{vglmff-class}}.
 
+
   }
   \item{\dots}{
   Other possible arguments fed into
   \code{logLik} in order to compute the log-likelihood.
 
+
+  }
+  \item{corrected}{
+  Logical, perform the finite sample correction?
+
+
   }
   \item{k}{
   Numeric, the penalty per parameter to be used;
   the default is the classical AIC.
 
+
   }
 }
 \details{
@@ -39,6 +52,7 @@ AICvlm(object, \dots, k = 2)
   in the fitted model, and \eqn{k = 2} for the usual AIC.
   One could assign \eqn{k = \log(n)} (\eqn{n} the number of observations)
   for the so-called BIC or SBC (Schwarz's Bayesian criterion).
+  This is the function \code{AICvlm()}.
 
 
   This code relies on the log-likelihood being defined, and computed,
@@ -52,8 +66,17 @@ AICvlm(object, \dots, k = 2)
   parameter.
 
 
-  For VGAMs the nonlinear effective degrees of freedom for each
+  For VGAMs and CAO the nonlinear effective degrees of freedom for each
   smoothed component is used. This formula is heuristic.
+  These are the functions \code{AICvgam()} and \code{AICcao()}.
+
+
+  The finite sample correction is usually recommended when the
+  sample size is small or when the number of parameters is large.
+  When the sample size is large their difference tends to be negligible.
+  The correction is described in Hurvich and Tsai (1989), and is based
+  on a (univariate) linear model with normally distributed errors.
+
 
 
 }
@@ -61,19 +84,42 @@ AICvlm(object, \dots, k = 2)
   Returns a numeric value with the corresponding AIC (or BIC, or \dots,
   depending on \code{k}).
 
+
 }
 \author{T. W. Yee. }
 \note{
   AIC has not been defined for QRR-VGLMs yet.
 
 
+  Using AIC to compare \code{\link{posbinomial}} models
+  with, e.g., \code{\link{posbernoulli.tb}} models,
+  requires \code{posbinomial(omit.constant = TRUE)}.
+  See \code{\link{posbinomial}} for an example.
+  A warning is given if it suspects a wrong \code{omit.constant} value
+  was used.
+
+
+
+  Where defined,
+  \code{AICc(...)} is the same as \code{AIC(..., corrected = TRUE)}.
+
+
+
 }
 
-%\references{
+\references{
+
+Hurvich, C. M. and Tsai, C.-L. (1989)
+Regression and time series model selection in small samples,
+\emph{Biometrika},
+\bold{76}, 297--307.
+
+
+
 %  Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986).
 %  \emph{Akaike Information Criterion Statistics}.
 %  D. Reidel Publishing Company.
-%}
+}
 
 \section{Warning }{
   This code has not been double-checked.
@@ -89,7 +135,8 @@ AICvlm(object, \dots, k = 2)
   VGLMs are described in \code{\link{vglm-class}};
   VGAMs are described in \code{\link{vgam-class}};
   RR-VGLMs are described in \code{\link{rrvglm-class}};
-  \code{\link[stats]{AIC}}.
+  \code{\link[stats]{AIC}},
+  \code{\link{BICvlm}}.
 
 
 }
@@ -99,10 +146,14 @@ pneumo <- transform(pneumo, let = log(exposure.time))
               cumulative(parallel = TRUE, reverse = TRUE), pneumo))
 coef(fit1, matrix = TRUE)
 AIC(fit1)
+AICc(fit1)  # Quick way
+AIC(fit1, corrected = TRUE)  # Slow way
 (fit2 <- vglm(cbind(normal, mild, severe) ~ let,
               cumulative(parallel = FALSE, reverse = TRUE), pneumo))
 coef(fit2, matrix = TRUE)
 AIC(fit2)
+AICc(fit2)
+AIC(fit2, corrected = TRUE)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/BICvlm.Rd b/man/BICvlm.Rd
new file mode 100644
index 0000000..4a05a43
--- /dev/null
+++ b/man/BICvlm.Rd
@@ -0,0 +1,127 @@
+\name{BICvlm}
+\alias{BICvlm}
+%\alias{BICvglm}
+\alias{BICvgam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bayesian Information Criterion }
+\description{
+  Calculates the Bayesian information criterion (BIC) for
+  a fitted model object for which a log-likelihood value
+  has been obtained.
+
+
+}
+\usage{
+BICvlm(object, \dots, k = log(nobs(object)))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object, \dots}{
+  Same as \code{\link{AICvlm}}.
+
+
+  }
+  \item{k}{
+  Numeric, the penalty per parameter to be used;
+  the default is \code{log(n)} where
+  \code{n} is the number of observations).
+
+
+  }
+}
+\details{
+  The so-called BIC or SBC (Schwarz's Bayesian criterion)
+  can be computed by calling \code{\link{AICvlm}} with a
+  different \code{k} argument.
+  See \code{\link{AICvlm}} for information and caveats.
+
+
+}
+\value{
+  Returns a numeric value with the corresponding BIC, or \dots,
+  depending on \code{k}.
+
+
+}
+\author{T. W. Yee. }
+\note{
+  BIC, AIC and other ICs can have have many additive
+  constants added to them. The important thing are the
+  differences since the minimum value corresponds to the best model.
+  Preliminary testing shows absolute differences
+  with some \pkg{VGAM} family functions such as
+  \code{\link{gaussianff}},
+  however, they should agree with non-normal families.
+
+
+  BIC has not been defined for QRR-VGLMs yet.
+
+
+}
+
+%\references{
+%  Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986).
+%  \emph{Akaike Information Criterion Statistics}.
+%  D. Reidel Publishing Company.
+%}
+
+
+\section{Warning }{
+  Like \code{\link{AICvlm}}, this code has not been double-checked.
+  The general applicability of \code{BIC} for the VGLM/VGAM classes
+  has not been developed fully. 
+  In particular, \code{BIC} should not be run on some \pkg{VGAM} family
+  functions because of violation of certain regularity conditions, etc.
+
+
+  Many \pkg{VGAM} family functions such as
+  \code{\link{cumulative}} can have the number of
+  observations absorbed into the prior weights argument
+  (e.g., \code{weights} in \code{\link{vglm}}), either
+  before or after fitting.  Almost all \pkg{VGAM} family
+  functions can have the number of observations defined by
+  the \code{weights} argument, e.g., as an observed frequency.
+  \code{BIC} simply uses the number of rows of the model matrix, say,
+  as defining \code{n}, hence the user must be very careful
+  of this possible error.
+  Use at your own risk!!
+
+
+}
+
+\seealso{
+  \code{\link{AICvlm}},
+  VGLMs are described in \code{\link{vglm-class}};
+  VGAMs are described in \code{\link{vgam-class}};
+  RR-VGLMs are described in \code{\link{rrvglm-class}};
+  \code{\link[stats]{BIC}},
+  \code{\link[stats]{AIC}}.
+
+
+}
+\examples{
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let,
+              cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+coef(fit1, matrix = TRUE)
+BIC(fit1)
+(fit2 <- vglm(cbind(normal, mild, severe) ~ let,
+              cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+coef(fit2, matrix = TRUE)
+BIC(fit2)
+
+# These do not agree in absolute terms:
+gdata <- data.frame(x2 = sort(runif(n <- 40)))
+gdata <- transform(gdata, y1 = 1 + 2*x2 + rnorm(n, sd = 0.1))
+fit.v <- vglm(y1 ~ x2, gaussianff, data = gdata)
+fit.g <-  glm(y1 ~ x2, gaussian  , data = gdata)
+fit.l <-   lm(y1 ~ x2, data = gdata)
+c(BIC(fit.l), BIC(fit.g), BIC(fit.v))
+c(AIC(fit.l), AIC(fit.g), AIC(fit.v))
+c(AIC(fit.l) - AIC(fit.v),
+  AIC(fit.g) - AIC(fit.v))
+c(logLik(fit.l), logLik(fit.g), logLik(fit.v))
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
index 51165dd..01e959b 100644
--- a/man/Coef.qrrvglm-class.Rd
+++ b/man/Coef.qrrvglm-class.Rd
@@ -35,12 +35,12 @@ linear predictors and \eqn{n} is the number of observations.
     }
     \item{\code{Rank}:}{The rank (dimension, number of latent variables) 
     of the RR-VGLM. Called \eqn{R}. }
-    \item{\code{lv}:}{\eqn{n} by \eqn{R} matrix
+    \item{\code{latvar}:}{\eqn{n} by \eqn{R} matrix
           of latent variable values. }
-    \item{\code{lvOrder}:}{Of class \code{"matrix"}, the permutation
+    \item{\code{latvar.order}:}{Of class \code{"matrix"}, the permutation
           returned when the function 
-          \code{\link{order}} is applied to each column of \code{lv}.
-          This enables each column of \code{lv} to be easily sorted. 
+          \code{\link{order}} is applied to each column of \code{latvar}.
+          This enables each column of \code{latvar} to be easily sorted. 
           }
     \item{\code{Maximum}:}{Of class \code{"numeric"}, the 
           \eqn{M} maximum fitted values. That is, the fitted values 
@@ -51,7 +51,7 @@ linear predictors and \eqn{n} is the number of observations.
           of the latent variables where the optima are. 
           If the curves are not bell-shaped, then the value will
           be \code{NA} or \code{NaN}.}
-    \item{\code{OptimumOrder}:}{Of class \code{"matrix"}, the permutation
+    \item{\code{Optimum.order}:}{Of class \code{"matrix"}, the permutation
           returned when the function 
           \code{\link{order}} is applied to each column of \code{Optimum}.
           This enables each row of \code{Optimum} to be easily sorted. 
@@ -112,18 +112,18 @@ canonical Gaussian ordination.
 x2 <- rnorm(n <- 100)
 x3 <- rnorm(n)
 x4 <- rnorm(n)
-lv1 <- 0 + x3 - 2*x4
-lambda1 <- exp(3 - 0.5 * (lv1-0)^2)
-lambda2 <- exp(2 - 0.5 * (lv1-1)^2)
-lambda3 <- exp(2 - 0.5 * ((lv1+4)/2)^2)
+latvar1 <- 0 + x3 - 2*x4
+lambda1 <- exp(3 - 0.5 * ( latvar1-0)^2)
+lambda2 <- exp(2 - 0.5 * ( latvar1-1)^2)
+lambda3 <- exp(2 - 0.5 * ((latvar1+4)/2)^2)
 y1 <- rpois(n, lambda1)
 y2 <- rpois(n, lambda2)
 y3 <- rpois(n, lambda3)
 yy <- cbind(y1, y2, y3)
-# vvv p1 <- cqo(yy ~ x2 + x3 + x4, fam=poissonff, trace=FALSE)
+# vvv p1 <- cqo(yy ~ x2 + x3 + x4, fam = poissonff, trace = FALSE)
 \dontrun{
 lvplot(p1, y = TRUE, lcol = 1:3, pch = 1:3, pcol = 1:3)
 }
-# vvv print(Coef(p1), digits=3)
+# vvv print(Coef(p1), digits = 3)
 }
 \keyword{classes}
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
index aa6e6a7..d4d8230 100644
--- a/man/Coef.qrrvglm.Rd
+++ b/man/Coef.qrrvglm.Rd
@@ -5,14 +5,22 @@
 \description{
   This methods function returns important matrices etc. of a 
   QO object.
+
+
 }
 \usage{
-Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
+Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{object}{ A CQO or UQO object. The former has class \code{"qrrvglm"}. }
-  \item{varlvI}{
+  \item{object}{
+% A CQO or UQO object.
+  A CQO object.
+  The former has class \code{"qrrvglm"}.
+  }
+
+
+  \item{varI.latvar}{
   Logical indicating whether to scale the site scores (latent variables)
   to have variance-covariance matrix equal to the rank-\eqn{R} identity
   matrix. All models have uncorrelated site scores (latent variables),
@@ -43,7 +51,7 @@ Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
   tolerances are unity by transformation or by definition, and the spread
   of the site scores can be compared to them. Vice versa, if one wishes
   to compare the tolerances with the sites score variability then setting
-  \code{varlvI=TRUE} is more appropriate.
+  \code{varI.latvar=TRUE} is more appropriate.
 
 
   For rank-2 QRR-VGLMs, one of the species can be chosen so that the
@@ -63,7 +71,8 @@ Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
 }
 \value{
   The \bold{A}, \bold{B1}, \bold{C},  \bold{T},  \bold{D} matrices/arrays
-  are returned, along with other slots.  For UQO, \bold{C} is undefined.
+  are returned, along with other slots.
+% For UQO, \bold{C} is undefined.
   The returned object has class \code{"Coef.qrrvglm"}
   (see \code{\link{Coef.qrrvglm-class}}).
 
@@ -118,10 +127,10 @@ set.seed(123)
 x2 <- rnorm(n <- 100)
 x3 <- rnorm(n)
 x4 <- rnorm(n)
-lv1 <- 0 + x3 - 2*x4
-lambda1 <- exp(3 - 0.5 * (lv1-0)^2)
-lambda2 <- exp(2 - 0.5 * (lv1-1)^2)
-lambda3 <- exp(2 - 0.5 * ((lv1+4)/2)^2) # Unequal tolerances
+latvar1 <- 0 + x3 - 2*x4
+lambda1 <- exp(3 - 0.5 * ( latvar1-0)^2)
+lambda2 <- exp(2 - 0.5 * ( latvar1-1)^2)
+lambda3 <- exp(2 - 0.5 * ((latvar1+4)/2)^2)  # Unequal tolerances
 y1 <- rpois(n, lambda1)
 y2 <- rpois(n, lambda2)
 y3 <- rpois(n, lambda3)
diff --git a/man/Coef.vlm.Rd b/man/Coef.vlm.Rd
index 416eba9..baa2a6f 100644
--- a/man/Coef.vlm.Rd
+++ b/man/Coef.vlm.Rd
@@ -65,9 +65,9 @@ Reduced-rank vector generalized linear models.
 \examples{
 set.seed(123); nn <- 1000
 bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3))
-fit <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE) # intercept-only model
-coef(fit, matrix = TRUE) # log scale
-Coef(fit) # On the original scale
+fit <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE)  # intercept-only model
+coef(fit, matrix = TRUE)  # log scale
+Coef(fit)  # On the original scale
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index c05bbe2..98aaeac 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -16,11 +16,21 @@
 \usage{
 TypicalVGAMfamilyFunction(lsigma = "loge",
                           isigma = NULL,
+                          link.list = list("(Default)" = "identity",
+                                           x2          = "loge",
+                                           x3          = "logoff",
+                                           x4          = "mlogit",
+                                           x5          = "mlogit"),
+                          earg.list = list("(Default)" = list(),
+                                           x2          = list(),
+                                           x3          = list(offset = -1),
+                                           x4          = list(),
+                                           x5          = list()),
                           gsigma = exp(-5:5),
                           parallel = TRUE,
-                          apply.parint = FALSE,
                           shrinkage.init = 0.95,
                           nointercept = NULL, imethod = 1,
+                          type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
                           probs.x = c(0.15, 0.85),
                           probs.y = c(0.25, 0.50, 0.75),
                           mv = FALSE, earg.link = FALSE,
@@ -28,6 +38,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                           oim = FALSE, nsimEIM = 100, zero = NULL)
 }
 \arguments{
+%                         apply.parint = FALSE,
 
   \item{lsigma}{
   Character.
@@ -48,6 +59,27 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
 % }
 
 
+  \item{link.list, earg.list}{
+  Some \pkg{VGAM} family functions
+  (such as \code{\link{normal.vcm}})
+  implement models with
+  potentially lots of parameter link functions.
+  These two arguments allow many such links and extra arguments
+  to be inputted more easily.
+  One has something like
+  \code{link.list = list("(Default)" = "identity", x2 = "loge", x3 = "logoff")}
+  and 
+  \code{earg.list = list("(Default)" = list(), x2 = list(), x3 = "list(offset = -1)")}.
+  Then any unnamed terms will have the default link with its
+  corresponding extra argument.
+  Note: the \code{\link{mlogit}} link is also possible, and if so,
+  at least two instances of it are necessary.
+  Then the last term is the baseline/reference group.
+
+
+  }
+
+
   \item{isigma}{
   Optional initial values can often be inputted using an argument
   beginning with \code{"i"}.
@@ -86,6 +118,25 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
   not be as convenient.
 
 
+  Here are some examples.
+1. \code{parallel = TRUE ~ x2 + x5} means the parallelism assumption
+is only applied to \eqn{X_2}, \eqn{X_5} and the intercept.
+2.  \code{parallel = TRUE ~ -1}
+and \code{parallel = TRUE ~ 0}
+mean the parallelism assumption
+is applied to \emph{no} variables at all.
+Similarly,
+\code{parallel = FALSE ~ -1} and
+\code{parallel = FALSE ~ 0}
+mean the parallelism assumption
+is applied to \emph{all} the variables including the intercept.
+3.  \code{parallel = FALSE ~ x2 - 1}
+and \code{parallel = FALSE ~ x2 + 0}
+applies the
+parallelism constraint to all terms (including the intercept)
+except for \eqn{X_2}.
+
+
   This argument is common in \pkg{VGAM} family functions for categorical
   responses, e.g., \code{\link{cumulative}},  \code{\link{acat}}, 
   \code{\link{cratio}}, \code{\link{sratio}}.
@@ -97,15 +148,21 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
 
 
   }
-  \item{apply.parint}{
-  Logical. It refers to whether the parallelism constraint is
-  applied to the intercept too.
-  By default, in some models it does, in other models it does not.
-  Used only if \code{parallel = TRUE} (fully or partially with
-  respect to all the explanatory variables).
 
 
-  }
+% \item{apply.parint}{
+% \emph{This variable will be depreciated shortly}.
+% Logical.
+% It refers to whether the parallelism constraint is
+% applied to the intercept too.
+% By default, in some models it does, in other models it does not.
+% Used only if \code{parallel = TRUE} (fully or partially with
+% respect to all the explanatory variables).
+
+
+% }
+
+
   \item{nsimEIM}{
   Some \pkg{VGAM} family functions use simulation to obtain an approximate
   expected information matrix (EIM).
@@ -146,6 +203,16 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
 
 
   }
+  \item{type.fitted}{
+  Character.
+  Type of fitted value returned by the \code{fitted()} methods function.
+  The first choice is always the default.
+  The available choices depends on what kind of family function it is.
+  Using the first few letters of the chosen choice is okay.
+  See \code{\link{fittedvlm}} for more details.
+
+
+  }
   \item{probs.x, probs.y}{
   Numeric, with values in (0, 1).
   The probabilites that define quantiles with respect to some vector,
@@ -177,6 +244,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
   The latter uses the EIM, and is usually recommended.
   If \code{oim = TRUE} then \code{nsimEIM} is ignored.
 
+
   }
   \item{zero}{
   An integer specifying which linear/additive predictor is modelled
@@ -344,7 +412,9 @@ Bias reduction in exponential family nonlinear models.
 
 \seealso{
   \code{\link{Links}},
-  \code{\link{vglmff-class}}.
+  \code{\link{vglmff-class}},
+  \code{\link{normal.vcm}},
+  \code{\link{mlogit}}.
 
 
 }
@@ -375,7 +445,7 @@ coef(fit, mat = TRUE)
 \dontrun{
 ndata <- data.frame(x = runif(nn <- 500))
 ndata <- transform(ndata,
-           y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)), # k is size
+           y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)),  # k is size
            y2 = rnbinom(nn, mu = exp(2-x), size = exp(0)))
 fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), ndata)
 coef(fit, matrix = TRUE)
@@ -394,16 +464,16 @@ gdata <- data.frame(x2 = rnorm(nn <- 200))
 gdata <- transform(gdata,
            y1 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1 + 0.2*x2)),
            y2 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1)))
-args(normal1)
-fit1 <- vglm(y1 ~ x2, normal1, gdata) # This is ok
-fit2 <- vglm(y2 ~ x2, normal1(zero = 2), gdata) # This is ok
+args(uninormal)
+fit1 <- vglm(y1 ~ x2, uninormal, gdata)            # This is okay
+fit2 <- vglm(y2 ~ x2, uninormal(zero = 2), gdata)  # This is okay
 
 # This creates potential conflict
 clist <- list("(Intercept)" = diag(2), "x2" = diag(2))
-fit3 <- vglm(y2 ~ x2, normal1(zero = 2), gdata,
-             constraints = clist) # Conflict!
-coef(fit3, matrix = TRUE) # Shows that clist[["x2"]] was overwritten,
-constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg
+fit3 <- vglm(y2 ~ x2, uninormal(zero = 2), gdata,
+             constraints = clist)  # Conflict!
+coef(fit3, matrix = TRUE)  # Shows that clist[["x2"]] was overwritten,
+constraints(fit3)  # i.e., 'zero' seems to override the 'constraints' arg
 
 # Example 6 ('whitespace' argument)
 pneumo <- transform(pneumo, let = log(exposure.time))
@@ -411,8 +481,8 @@ fit1 <- vglm(cbind(normal, mild, severe) ~ let,
              sratio(whitespace = FALSE, parallel = TRUE), pneumo)
 fit2 <- vglm(cbind(normal, mild, severe) ~ let,
              sratio(whitespace = TRUE,  parallel = TRUE), pneumo)
-head(predict(fit1), 2) # No white spaces
-head(predict(fit2), 2) # Uses white spaces
+head(predict(fit1), 2)  # No white spaces
+head(predict(fit2), 2)  # Uses white spaces
 }
 
 \keyword{models}
diff --git a/man/DeLury.Rd b/man/DeLury.Rd
deleted file mode 100644
index 381c136..0000000
--- a/man/DeLury.Rd
+++ /dev/null
@@ -1,198 +0,0 @@
-\name{DeLury}
-\alias{DeLury}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ DeLury's Method for Population Size Estimation }
-\description{
-  Computes DeLury's method or Leslie's method for estimating a
-  biological population size.
-
-}
-\usage{
-DeLury(catch, effort, type = c("DeLury","Leslie"), ricker = FALSE)
-       
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{catch, effort}{
-  Catch and effort. These should be numeric vectors of equal length.
-
-  }
-  \item{type}{
-  Character specifying which of the DeLury or Leslie models is to be fitted.
-  The default is the first value.
-
-  }
-  \item{ricker}{
-  Logical. If \code{TRUE} then the Ricker (1975) modification is computed.
-
-  }
-}
-\details{
-  This simple function implements the methods of DeLury (1947).
-  These are called the DeLury and Leslie models.
-  Note that there are many assumptions.
-  These include: (i) Catch and effort records are available for a series
-  of consecutive time intervals. The catch for a given time
-  interval, specified by \eqn{t}, is \eqn{c(t)}, and the
-  corresponding effort by \eqn{e(t)}.
-  The \emph{catch per unit effort} (CPUE) for the time interval \eqn{t} is
-  \eqn{C(t) = c(t)/e(t)}.
-  Let \eqn{d(t)} represent the proportion of the population
-  captured during the time interval \eqn{t}.
-  Then \eqn{d(t) = k(t) e(t)} so that \eqn{k(t)} is the 
-  proportion of the population captured during interval \eqn{t} by one unit
-  of effort. Then \eqn{k(t)} is called the \emph{catchability},
-  and the \emph{intensity} of effort is \eqn{e(t)}.
-  Let \eqn{E(t)} and \eqn{K(t)} be the total effort and total catch
-  up to interval \eqn{t}, and \eqn{N(t)} be the number of individuals
-  in the population at time \eqn{t}.
-  It is good idea to plot
-  \eqn{\log(C(t))} against \eqn{E(t)} for \code{type = "DeLury"} and
-  \eqn{C(t)} versus \eqn{K(t)} for \code{type = "Leslie"}.
-
-The other assumptions are as follows.
-%
-   (ii) The population is closed---the population must be closed to sources
-   of animals such as recruitment and immigration and losses of animals
-   due to natural mortality and emigration.
-%
-   (iii) Catchability is constant over the period of removals.
-%
-   (iv) The units of effort are independent, i.e., the individual units
-   of the method of capture (i.e., nets, traps, etc) do not compete with
-   each other.
-%
-   (v) All fish are equally vulnerable to the method of capture---source
-   of error may include gear saturation and trap-happy or trap-shy
-   individuals.
-%
-   (vi) Enough fish must be removed to substantially reduce the CPUE.
-%
-   (vii) The catches may remove less than 2\% of the population.
-%
-   Also, the usual assumptions of simple regression such as
-%
-   (viii) random sampling,
-%
-   (ix) the independent variable(s) are measured without error---both
-   catches and effort should be known, not estimated,
-%
-   (x) a line describes the data,
-%
-  (xi) the errors are independent and normally distributed.
-
-
-
-}
-\value{
-  A list with the following components.
-
-
-  \item{catch, effort }{
-  Catch and effort. Same as the original vectors.
-  These correspond to \eqn{c(t)} and \eqn{e(t)} respectively.
-
-
-  }
-  \item{type, ricker}{Same as input.
-
-
-  }
-  \item{N0}{an estimate of the population size at time 0.
-  Only valid if the assumptions are satisfied.
-
-
-  }
-  \item{CPUE}{Catch Per Unit Effort \eqn{=C(t)}. }
-  \item{K, E}{\eqn{K(t)}, \eqn{E(t)}. Only one is computed
-  depending on \code{type}. }
-  \item{lmfit}{
-  the \code{\link[stats:lm]{lm}} object from the
-  fit of \code{log(CPUE)} on \code{K} (when \code{type = "Leslie"}).
-  Note that the \code{x} component of the object is the model matrix.
-
-
-  }
-
-}
-\references{
-
-
-DeLury, D. B. (1947)
-On the estimation of biological populations.
-\emph{Biometrics},
-\bold{3}, 145--167.
-
-
-Ricker, W. E. (1975)
-Computation and interpretation of biological
-statistics of fish populations.
-\emph{Bull. Fish. Res. Bd. Can.},
-\bold{191}, 382--
-
-
-Yee, T. W. (2010)
-VGLMs and VGAMs: an overview for applications in fisheries research.
-\emph{Fisheries Research},
-\bold{101}, 116--126.
-
-
-
-
-}
-\author{ T. W. Yee. }
-\note{
-The data in the example below comes from DeLury (1947), and
-some plots of his are reproduced.
-Note that he used log to base 10 whereas natural logs are used here.
-His plots had some observations obscured by the y-axis!
-
-
-The DeLury method is not applicable to the data frame
-\code{\link{wffc.nc}} since the 2008 World Fly Fishing Competition was
-strictly catch-and-release.
-
-
-}
-\seealso{ \code{\link{wffc.nc}}. }
-\examples{
-pounds <- c(  147, 2796, 6888, 7723, 5330, 8839, 6324, 3569, 8120, 8084,
-            8252, 8411, 6757, 1152, 1500, 11945, 6995, 5851, 3221, 6345,
-            3035, 6271, 5567, 3017, 4559, 4721, 3613,  473,  928, 2784,
-            2375, 2640, 3569)
-traps  <- c(  200, 3780, 7174, 8850, 5793, 9504, 6655, 3685, 8202, 8585,
-            9105, 9069, 7920, 1215, 1471, 11597, 8470, 7770, 3430, 7970,
-            4740, 8144, 7965, 5198, 7115, 8585, 6935, 1060, 2070, 5725,
-            5235, 5480, 8300)
-table1 <- DeLury(pounds/1000, traps/1000)
-
-\dontrun{
-with(table1, plot(1+log(CPUE) ~ E, las = 1, pch = 19, main = "DeLury method",
-     xlab = "E(t)", ylab = "1 + log(C(t))", col = "blue"))
-}
-omitIndices <- -(1:16)
-table1b <- DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000)
-\dontrun{
-with(table1b, plot(1+log(CPUE) ~ E, las = 1, pch = 19, main = "DeLury method",
-     xlab = "E(t)", ylab = "1 + log(C(t))", col = "blue"))
-mylmfit <- with(table1b, lmfit)
-lines(mylmfit$x[, 2], 1 + predict.lm(mylmfit), col = "red", lty = "dashed")
-}
-
-
-omitIndices <- -(1:16)
-table2 <- DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000, type = "L")
-\dontrun{
-with(table2, plot(CPUE ~ K, las = 1, pch = 19,
-     main = "Leslie method; Fig. III",
-     xlab = "K(t)", ylab = "C(t)", col = "blue"))
-mylmfit <- with(table2, lmfit)
-abline(a = coef(mylmfit)[1], b = coef(mylmfit)[2],
-       col = "orange", lty = "dashed")
-}
-}
-% Add one or more standard keywords, see file 'KEYWORDS' in the
-% R documentation directory.
-\keyword{ models }
-
-
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
index a5e0765..a4ece90 100644
--- a/man/G1G2G3.Rd
+++ b/man/G1G2G3.Rd
@@ -76,7 +76,7 @@ fit <- vglm(ymat ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
 fit <- vglm(ymat ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
            trace = TRUE, crit = "coef")
 fit <- vglm(ymat ~ 1, G1G2G3(link = "identity"), trace = TRUE)
-Coef(fit) # Estimated p1, p2 and f
+Coef(fit)  # Estimated p1, p2 and f
 rbind(ymat, sum(ymat)*fitted(fit))
 sqrt(diag(vcov(fit)))
 }
diff --git a/man/Huggins89.t1.Rd b/man/Huggins89.t1.Rd
index 50f0c3a..8e5248b 100644
--- a/man/Huggins89.t1.Rd
+++ b/man/Huggins89.t1.Rd
@@ -1,5 +1,6 @@
 \name{Huggins89.t1}
 \alias{Huggins89.t1}
+\alias{Huggins89table1}
 \docType{data}
 \title{
   Table 1 of Huggins (1989)
@@ -12,7 +13,10 @@
 
 %%  ~~ A concise (1-5 lines) description of the dataset. ~~
 }
-\usage{data(Huggins89.t1)}
+\usage{
+data(Huggins89table1)
+data(Huggins89.t1)
+}
 \format{
   The format is a data frame.
 
@@ -23,12 +27,19 @@
 }
 \details{
   Table 1 of Huggins (1989) gives this toy data set.
-  Note that variables \code{z1},\ldots,\code{z10} are
+  Note that variables \code{t1},\ldots,\code{t10} are
   occasion-specific variables. They correspond to the
   response variables \code{y1},\ldots,\code{y10} which
   have values 1 for capture and 0 for not captured.
 
 
+
+  Both \code{Huggins89table1} and \code{Huggins89.t1} are identical.
+  The latter used variables beginning with \code{z}, 
+  not \code{t}, and may be withdrawn very soon.
+
+
+
 %%  ~~ If necessary, more details than the __description__ above ~~
 }
 %\source{
@@ -45,66 +56,98 @@ On the statistical analysis of capture experiments.
 %%  ~~ possibly secondary sources and usages ~~
 }
 \examples{
-\dontrun{
-small.Huggins89.t1 <- transform(Huggins89.t1, Zedd = z1, Z2 = z2, Z3 = z3)
-small.Huggins89.t1 <- subset(small.Huggins89.t1, y1 + y2 + y3 > 0)
-# fit1 is the bottom equation on p.133, but this is only for the 1st 3 responses.
-# Currently posbernoulli.tb() cannot handle more than 3 Bernoulli variates.
-# The fit is not very good.
-fit1 <-
-  vglm(cbind(y1, y2, y3) ~  x2 + Zedd,
-       xij = list(Zedd ~ z1 + z2 + z3 + Z2 + Z3 - 1),
-       posbernoulli.tb(parallel.t = TRUE), maxit = 155,
-       data = small.Huggins89.t1, trace = TRUE,
-       form2 = ~ x2 + Zedd + z1 + z2 + z3 + Z2 + Z3)
-coef(fit1)
-coef(fit1, matrix = TRUE)  # M_t model
-constraints(fit1)
-summary(fit1)
-fit1 at extra$N.hat     # Estimate of the population size N
-fit1 at extra$SE.N.hat  # Its standard error
-
-
-fit.t <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
-              posbernoulli.t, data = Huggins89.t1, trace = TRUE)
-coef(fit.t)
-coef(fit.t, matrix = TRUE)  # M_t model
-summary(fit.t)
-fit.t at extra$N.hat     # Estimate of the population size N
-fit.t at extra$SE.N.hat  # Its standard error
-
-
-fit.b <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
-              posbernoulli.b, data = Huggins89.t1, trace = TRUE)
-coef(fit.b)
-coef(fit.b, matrix = TRUE)  # M_b model
-summary(fit.b)
-fit.b at extra$N.hat
-fit.b at extra$SE.N.hat
-
-
-fit.0 <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
-             posbernoulli.b(parallel.b = TRUE), data = Huggins89.t1,
-             trace = TRUE)
-coef(fit.0, matrix = TRUE)  # M_0 model (version 1)
-coef(fit.0)
-summary(fit.0)
-fit.0 at extra$N.hat
-fit.0 at extra$SE.N.hat
-
-
-Fit.0 <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
-              posbernoulli.t(parallel.t = TRUE), data = Huggins89.t1,
-              trace = TRUE)
-coef(Fit.0)
-coef(Fit.0, matrix = TRUE)  # M_0 model (version 2)
-summary(Fit.0)
-Fit.0 at extra$N.hat
-Fit.0 at extra$SE.N.hat
-}
+Huggins89table1 <- transform(Huggins89table1, x3.tij = t1,
+                             T2 = t2, T3 = t3, T4 = t4,  T5 =  t5, T6 = t6,
+                             T7 = t7, T8 = t8, T9 = t9, T10 = t10)
+small.table1 <- subset(Huggins89table1,
+                       y1 + y2 + y3 + y4 + y5 + y6 + y7 + y8 + y9 + y10 > 0)
+# fit.tbh is the bottom equation on p.133.
+# It is a M_tbh model.
+fit.tbh <-
+  vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~  x2 + x3.tij,
+       xij = list(x3.tij ~ t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9 + t10 +
+                           T2 + T3 + T4 + T5 + T6 + T7 + T8 + T9 + T10 - 1),
+       posbernoulli.tb(parallel.t = TRUE ~ x2 + x3.tij),
+       data = small.table1, trace = TRUE,
+       form2 = ~  x2 + x3.tij +
+                  t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9 + t10 +
+                       T2 + T3 + T4 + T5 + T6 + T7 + T8 + T9 + T10)
+
+# These results differ a bit from Huggins (1989), probably because
+# two animals had to be removed here (they were never caught):
+coef(fit.tbh)  # First element is the behavioural effect
+sqrt(diag(vcov(fit.tbh)))  # SEs
+constraints(fit.tbh, matrix = TRUE)
+summary(fit.tbh, presid = FALSE)
+fit.tbh at extra$N.hat     # Estimate of the population site N; cf. 20.86
+fit.tbh at extra$SE.N.hat  # Its standard error; cf. 1.87 or 4.51
+
+fit.th <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+               posbernoulli.t, data = small.table1, trace = TRUE)
+coef(fit.th)
+constraints(fit.th)
+coef(fit.th, matrix = TRUE)  # M_th model
+summary(fit.th, presid = FALSE)
+fit.th at extra$N.hat     # Estimate of the population size N
+fit.th at extra$SE.N.hat  # Its standard error
+
+fit.bh <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+               posbernoulli.b(I2 = FALSE), data = small.table1, trace = TRUE)
+coef(fit.bh)
+constraints(fit.bh)
+coef(fit.bh, matrix = TRUE)  # M_bh model
+summary(fit.bh, presid = FALSE)
+fit.bh at extra$N.hat
+fit.bh at extra$SE.N.hat
+
+fit.h <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+             posbernoulli.b, data = small.table1, trace = TRUE)
+coef(fit.h, matrix = TRUE)  # M_h model (version 1)
+coef(fit.h)
+summary(fit.h, presid = FALSE)
+fit.h at extra$N.hat
+fit.h at extra$SE.N.hat
+
+Fit.h <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+              posbernoulli.t(parallel.t = TRUE ~ x2),
+              data = small.table1, trace = TRUE)
+coef(Fit.h)
+coef(Fit.h, matrix = TRUE)  # M_h model (version 2)
+summary(Fit.h, presid = FALSE)
+Fit.h at extra$N.hat
+Fit.h at extra$SE.N.hat
 }
 \keyword{datasets}
-% data(Huggins89.t1)
-%## maybe str(Huggins89.t1) ; plot(Huggins89.t1) ...
+
+
+%\dontrun{
+%}
+% data(Huggins89table1)
+%## maybe str(Huggins89table1) ; plot(Huggins89table1) ...
+
+
+% coef(fit1, matrix = TRUE)  # M_t model
+% Huggins89.t1 <- transform(Huggins89.t1, xx2 = c(matrix(x2, 2, 10, byrow = TRUE)))
+
+
+
+
+%This code below is equivalent to the above fit.tbh (same name).
+%But this version uses manual construction of the constraint matrices:
+%tau <- 10
+%Hlist <-list("(Intercept)" = cbind(bhvr.effect = c(rep(0, len = tau),
+%                                                   rep(1, len = tau-1)),
+%                                   overall.intercept = 1),
+%             x2            = cbind(rep(1, len = 2*tau-1)),
+%             Zedd          = cbind(rep(1, len = 2*tau-1)))
+%fit.tbh <-
+%  vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~  x2 + Zedd,
+%       xij = list(Zedd ~ z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 +
+%                              Z2 + Z3 + Z4 + Z5 + Z6 + Z7 + Z8 + Z9 + Z10 - 1),
+%       posbernoulli.tb, data = small.t1, trace = TRUE,
+%       constraints = Hlist,
+%       form2 = ~  x2 + Zedd +
+%                 z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 +
+%                      Z2 + Z3 + Z4 + Z5 + Z6 + Z7 + Z8 + Z9 + Z10)
 
 
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index 10f630f..e974937 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -69,7 +69,7 @@ New York: Wiley.
 }
 \seealso{
   \code{\link{inv.gaussianff}},
-  \code{\link{wald}}.
+  \code{\link{waldff}}.
 
 
 }
diff --git a/man/Links.Rd b/man/Links.Rd
index af8ca80..9f12a46 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -157,8 +157,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
   For positive parameters (i.e., greater than 0):
   \code{\link{loge}},
-  \code{\link{nloge}},
-  \code{\link{powl}}.
+  \code{\link{negloge}},
+  \code{\link{powerlink}}.
 
 
   For parameters greater than 1:
@@ -177,9 +177,9 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
   For unrestricted parameters (i.e., any value):
   \code{\link{identity}},
-  \code{\link{nidentity}},
+  \code{\link{negidentity}},
   \code{\link{reciprocal}},
-  \code{\link{nreciprocal}}.
+  \code{\link{negreciprocal}}.
 
 
 % Other links:
@@ -188,6 +188,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 \references{
     McCullagh, P. and Nelder, J. A. (1989)
     \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
 }
 
 \seealso{
@@ -196,8 +198,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   \code{\link{vgam}},
   \code{\link{rrvglm}}.
   \code{\link{cqo}},
-  \code{\link{cao}},
-  \code{\link{uqo}}.
+  \code{\link{cao}}.
+% \code{\link{uqo}}.
 
 
 }
@@ -247,32 +249,26 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
 
 
-
-
-
-
-
-
 }
 \examples{
 logit("a")
 logit("a", short = FALSE)
 logit("a", short = FALSE, tag = TRUE)
 
-logoff(1:5, offset = 1) # Same as log(1:5 + 1)
-powl(1:5, power = 2) # Same as (1:5)^2
+logoff(1:5, offset = 1)  # Same as log(1:5 + 1)
+powerlink(1:5, power = 2)  # Same as (1:5)^2
 
 \dontrun{ # This is old and no longer works:
 logoff(1:5, earg = list(offset = 1))
-powl(1:5, earg = list(power = 2))
+powerlink(1:5, earg = list(power = 2))
 }
 
-fit1 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
-fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
+fit1 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua)  # okay
+fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua)  # okay
 
 \dontrun{
 # This no longer works since "clog" is not a valid VGAM link function:
-fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua) # not ok
+fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua)  # not okay
 
 
 # No matter what the link, the estimated var-cov matrix is the same
@@ -283,10 +279,10 @@ fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff(offset = 1.1),
                             lshape2 = logoff(offset = 1.1)),
             trace = TRUE, crit = "coef")
 vcov(fit1, untransform = TRUE)
-vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE) # Should be all 0s
+vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE)  # Should be all 0s
 \dontrun{ # This is old:
-fit1 at misc$earg # Some 'special' parameters
-fit2 at misc$earg # Some 'special' parameters are here
+fit1 at misc$earg  # Some 'special' parameters
+fit2 at misc$earg  # Some 'special' parameters are here
 }
 
 
@@ -295,8 +291,8 @@ p <- seq(0.01, 0.99, len = 200)
 x <- seq(-4, 4, len = 200)
 plot(p, logit(p), type = "l", col = "blue")
 plot(x, logit(x, inverse = TRUE), type = "l", col = "blue")
-plot(p, logit(p, deriv = 1), type = "l", col = "blue") # reciprocal!
-plot(p, logit(p, deriv = 2), type = "l", col = "blue") # reciprocal!
+plot(p, logit(p, deriv = 1), type = "l", col = "blue")  # reciprocal!
+plot(p, logit(p, deriv = 2), type = "l", col = "blue")  # reciprocal!
 }
 }
 \keyword{models}
diff --git a/man/Max.Rd b/man/Max.Rd
index e39a927..2041f36 100644
--- a/man/Max.Rd
+++ b/man/Max.Rd
@@ -28,7 +28,7 @@ Max(object, ...)
 
 
   Maxima occur in quadratic and additive ordination,
-  e.g., CQO or UQO or CAO.
+  e.g., CQO or CAO.
   For these models the maximum is the fitted value at the
   optimum. For quadratic ordination models there is a formula
   for the optimum but for additive ordination models the
@@ -37,6 +37,10 @@ Max(object, ...)
   a valid optimum, the fitted value at the optimum is the maximum.
 
 
+
+% e.g., CQO or UQO or CAO.
+
+
 }
 \value{
   The value returned depends specifically on the methods
@@ -76,8 +80,8 @@ Constrained additive ordination.
 
 \examples{
 \dontrun{
-set.seed(111) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(111)  # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
                 Trocterr, Zoraspin) ~
diff --git a/man/Opt.Rd b/man/Opt.Rd
index 535996c..792b2d9 100644
--- a/man/Opt.Rd
+++ b/man/Opt.Rd
@@ -28,7 +28,7 @@ Opt(object, ...)
 
 
   Optima occur in quadratic and additive ordination,
-  e.g., CQO or UQO or CAO.
+  e.g., CQO or CAO.
   For these models the optimum is the value of the latent
   variable where the maximum occurs, i.e., where the fitted value
   achieves its highest value.
@@ -83,7 +83,7 @@ called the \emph{species score}.
 
 \examples{
 set.seed(111)  # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 # vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
 # vvv                Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
 # vvv                Trocterr, Zoraspin) ~
diff --git a/man/Pareto.Rd b/man/Pareto.Rd
index 1aa2d44..b950ff2 100644
--- a/man/Pareto.Rd
+++ b/man/Pareto.Rd
@@ -34,26 +34,35 @@ rpareto(n, location, shape)
   \code{ppareto} gives the distribution function,
   \code{qpareto} gives the quantile function, and
   \code{rpareto} generates random deviates.
+
+
 }
 \references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
+
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
+
+
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{pareto1}}, the \pkg{VGAM} family function
+  See \code{\link{paretoff}}, the \pkg{VGAM} family function
   for estimating the parameter \eqn{k} by maximum likelihood estimation,
   for the formula of the probability density function and the
   range restrictions imposed on the parameters.
+
+
 }
 %%\note{
 %%  The Pareto distribution is 
 %%}
+
 \seealso{
-  \code{\link{pareto1}},
+  \code{\link{paretoff}},
   \code{\link{ParetoIV}}.
 
+
 }
 \examples{
 alpha <- 3; k <- exp(1); x <- seq(2.8, 8, len = 300)
@@ -61,13 +70,15 @@ alpha <- 3; k <- exp(1); x <- seq(2.8, 8, len = 300)
 plot(x, dpareto(x, location = alpha, shape = k), type = "l",
      main = "Pareto density split into 10 equal areas")
 abline(h = 0, col = "blue", lty = 2)
-qq <- qpareto(seq(0.1,0.9,by = 0.1),location = alpha,shape = k)
-lines(qq, dpareto(qq, loc = alpha, shape = k), col = "purple", lty = 3, type = "h")
+qvec <- qpareto(seq(0.1,0.9,by = 0.1),location = alpha,shape = k)
+lines(qvec, dpareto(qvec, loc = alpha, shape = k),
+      col = "purple", lty = 3, type = "h")
 }
-pp <- seq(0.1,0.9,by = 0.1)
-qq <- qpareto(pp, location = alpha, shape = k)
-ppareto(qq, location = alpha, shape = k)
-qpareto(ppareto(qq,loc = alpha,shape = k),loc = alpha,shape = k) - qq # Should be 0
+pvec <- seq(0.1, 0.9, by = 0.1)
+qvec <- qpareto(pvec, location = alpha, shape = k)
+ppareto(qvec, location = alpha, shape = k)
+qpareto(ppareto(qvec, loc = alpha, shape = k),
+        loc = alpha, shape = k) - qvec  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/Qvar.Rd b/man/QvarUC.Rd
similarity index 87%
rename from man/Qvar.Rd
rename to man/QvarUC.Rd
index f3cae04..b1112a0 100644
--- a/man/Qvar.Rd
+++ b/man/QvarUC.Rd
@@ -9,33 +9,31 @@ Quasi-variances Preprocessing Function
 \description{
   Takes a \code{\link{vglm}} fit or a variance-covariance matrix,
   and preprocesses it for \code{\link{rcim}} and
-  \code{\link{normal1}} so that quasi-variances can be computed.
+  \code{\link{uninormal}} so that quasi-variances can be computed.
 
 
 %%  ~~ A concise (1-5 lines) description of what the function does. ~~
 }
 \usage{
-Qvar(object, factorname = NULL, which.eta = 1,
+Qvar(object, factorname = NULL, which.linpred = 1,
      coef.indices = NULL, labels = NULL,
      dispersion = NULL, reference.name = "(reference)", estimates = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{object}{
-    A \code{"\link[=vglmff-class]{vglm}"} object or a variance-covariance
-    matrix, e.g., \code{vcov(vglm.object)}.
-    The former is preferred since it contains all the information
-    needed.
+  A \code{"\link[=vglmff-class]{vglm}"} object or a variance-covariance
+  matrix, e.g., \code{vcov(vglm.object)}.
+  The former is preferred since it contains all the information needed.
   If a matrix then \code{factorname} and/or \code{coef.indices}
   should be specified to identify the factor.
 
 
-%%     ~~Describe \code{object} here~~
 }
-\item{which.eta}{
+\item{which.linpred}{
   A single integer from the set \code{1:M}.
   Specifies which linear predictor to use.
-  Let the value of \code{which.eta} be called \eqn{j}.
+  Let the value of \code{which.linpred} be called \eqn{j}.
   Then the factor should appear in that linear predictor, hence
   the \eqn{j}th row of the constraint matrix corresponding
   to the factor should have at least one nonzero value.
@@ -54,25 +52,23 @@ Qvar(object, factorname = NULL, which.eta = 1,
   this argument should also be specified.
 
 
-%%     ~~Describe \code{factor.name} here~~
 }
 \item{labels}{
   Character.
   Optional, for labelling the variance-covariance matrix.
 
-%%     ~~Describe \code{level1.name} here~~
 }
 \item{dispersion}{
   Numeric.
   Optional, passed into \code{vcov()} with the same argument name.
 
-%%     ~~Describe \code{level1.name} here~~
+
 }
 \item{reference.name}{
   Character.
   Label for for the reference level.
 
-%%     ~~Describe \code{level1.name} here~~
+
 }
 \item{coef.indices}{
   Optional numeric vector of length at least 3 specifying
@@ -85,6 +81,7 @@ Qvar(object, factorname = NULL, which.eta = 1,
   an optional vector of estimated coefficients
   (redundant if \code{object} is a model).
 
+
 }
 }
 \details{
@@ -119,8 +116,6 @@ Qvar(object, factorname = NULL, which.eta = 1,
   This implementation draws heavily from that.
 
 
-
-%%  ~~ If necessary, more details than the description above ~~
 }
 \value{
   A \eqn{L} by \eqn{L} matrix whose \eqn{i}-\eqn{j} element
@@ -131,7 +126,7 @@ Qvar(object, factorname = NULL, which.eta = 1,
 
   
   The matrix has an attribute that corresponds to the prior
-  weight matrix; it is accessed by \code{\link{normal1}}
+  weight matrix; it is accessed by \code{\link{uninormal}}
   and replaces the usual \code{weights} argument.
   of \code{\link{vglm}}. This weight matrix has ones on
   the off-diagonals and some small positive number on
@@ -199,7 +194,8 @@ Qvar(object, factorname = NULL, which.eta = 1,
 \seealso{
   \code{\link{rcim}},
   \code{\link{vglm}},
-  \code{\link{normal1}},
+  \code{\link{qvar}},
+  \code{\link{uninormal}},
   \code{\link{explink}},
   \code{qvcalc()} in \pkg{qvcalc},
   \code{\link[MASS]{ships}}.
@@ -217,21 +213,24 @@ Shipmodel <- vglm(incidents ~ type + year + period,
                   data = ships, subset = (service > 0))
 
 # Easiest form of input
-fit1 <- rcim(Qvar(Shipmodel, "type"), normal1("explink"), maxit = 99)
+fit1 <- rcim(Qvar(Shipmodel, "type"), uninormal("explink"), maxit = 99)
+qvar(fit1)              # Easy method to get the quasi-variances
+qvar(fit1, se = TRUE)   # Easy method to get the quasi-standard errors
+
 (quasiVar <- exp(diag(fitted(fit1))) / 2)                 # Version 1
 (quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2)   # Version 2
 (quasiSE  <- sqrt(quasiVar))
 
 # Another form of input
 fit2 <- rcim(Qvar(Shipmodel, coef.ind = c(0,2:5), reference.name = "typeA"),
-             normal1("explink"), maxit = 99)
+             uninormal("explink"), maxit = 99)
 \dontrun{ plotqvar(fit2, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
 
 # The variance-covariance matrix is another form of input (not recommended)
 fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
                   labels = c("typeA", "typeB", "typeC", "typeD", "typeE"),
                   estimates = c(typeA = 0, coef(Shipmodel)[2:5])),
-             normal1("explink"), maxit = 99)
+             uninormal("explink"), maxit = 99)
 (QuasiVar <- exp(diag(fitted(fit3))) / 2)                 # Version 1
 (QuasiVar <- diag(predict(fit3)[, c(TRUE, FALSE)]) / 2)   # Version 2
 (QuasiSE  <- sqrt(quasiVar))
@@ -253,10 +252,10 @@ clist <- list("bs(age, df = 4)" = rbind(1, 0),
 fit1 <- vglm(babies ~ bs(age, df = 4) + bs(age, df = 3) + ethnic,
             zipoissonff(zero = NULL), xs.nz.f,
             constraints = clist, trace = TRUE)
-Fit1 <- rcim(Qvar(fit1, "ethnic", which.eta = 1),
-             normal1("explink", imethod = 1), maxit = 99, trace = TRUE)
-Fit2 <- rcim(Qvar(fit1, "ethnic", which.eta = 2),
-             normal1("explink", imethod = 1), maxit = 99, trace = TRUE)
+Fit1 <- rcim(Qvar(fit1, "ethnic", which.linpred = 1),
+             uninormal("explink", imethod = 1), maxit = 99, trace = TRUE)
+Fit2 <- rcim(Qvar(fit1, "ethnic", which.linpred = 2),
+             uninormal("explink", imethod = 1), maxit = 99, trace = TRUE)
 }
 \dontrun{ par(mfrow = c(1, 2))
 plotqvar(Fit1, scol = "blue", pch = 16,
diff --git a/man/Rcam.Rd b/man/Rcim.Rd
similarity index 98%
rename from man/Rcam.Rd
rename to man/Rcim.Rd
index d31ffb6..aabcd59 100644
--- a/man/Rcam.Rd
+++ b/man/Rcim.Rd
@@ -23,6 +23,7 @@
   Matrix, of dimension \eqn{r} by \eqn{c}.
   It is best that it is labelled with row and column names.
 
+
 }
 \item{rbaseline, cbaseline}{
   Numeric (row number of the matrix \code{mat}) or
@@ -30,6 +31,7 @@
   wants as the row baseline or reference level. 
   Similarly \code{cbaseline} for the column.
 
+
 }
 }
 \details{
@@ -39,6 +41,7 @@
   response with respect to the row and columns---these become
   the new first row and column.
 
+
 }
 
 \value{
@@ -47,6 +50,7 @@
   first rows and columns.
   The default is no change in \code{mat}.
 
+
 }
 \author{
 Alfian F. Hadi and T. W. Yee.
@@ -67,6 +71,7 @@ Alfian F. Hadi and T. W. Yee.
   in \code{\link{moffset}}
   by 1 (when elements of the matrix agree).
 
+
 }
 
 
@@ -81,5 +86,5 @@ Alfian F. Hadi and T. W. Yee.
 (alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*"))
 (aa <- Rcim(alcoff,    rbaseline = "11", cbaseline = "Sun"))
 (bb <- moffset(alcoff,             "11",             "Sun", postfix = "*"))
-aa - bb # Notice the difference!
+aa - bb  # Note the difference!
 }
diff --git a/man/SUR.Rd b/man/SUR.Rd
index 5c0ffc7..5344656 100644
--- a/man/SUR.Rd
+++ b/man/SUR.Rd
@@ -11,10 +11,10 @@ Fits a system of seemingly unrelated regressions.
 \usage{
 SUR(mle.normal = FALSE,
     divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
-    parallel = FALSE, apply.parint = TRUE,
-    Varcov = NULL, matrix.arg = FALSE)
+    parallel = FALSE, Varcov = NULL, matrix.arg = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
+%apply.parint = TRUE,
 \arguments{
 %  \item{estimator}{
 %Character.
@@ -44,9 +44,11 @@ a warning or an error will result.
 
 
 }
-  \item{parallel, apply.parint}{
+  \item{parallel}{
   See
   \code{\link{CommonVGAMffArguments}}.
+  If \code{parallel = TRUE} then the constraint applies to
+  the intercept too.
 
 
 }
@@ -134,7 +136,7 @@ a warning or an error will result.
 
 }
 \note{
-  The fitted object has slot \code{@extra$ncols_X_lm} which is
+  The fitted object has slot \code{@extra$ncols.X.lm} which is
   a \eqn{M} vector with the number of parameters for each LM.
   Also, \code{@misc$values.divisor} is the \eqn{M}-vector of
   \code{divisor} values.
@@ -155,7 +157,7 @@ a warning or an error will result.
 %% ~Make other sections like Warning with \section{Warning }{....} ~
 
 \seealso{
-  \code{\link{normal1}},
+  \code{\link{uninormal}},
   \code{\link{gew}}.
 
 
@@ -172,19 +174,19 @@ zef1 <- vglm(cbind(invest.g, invest.w) ~
              SUR(divisor = "sqrt"), maxit = 1,
              data = gew, trace = TRUE, constraints = clist)
 
-round(coef(zef1, matrix = TRUE), dig = 4) # ZEF
-zef1 at extra$ncols_X_lm
+round(coef(zef1, matrix = TRUE), digits = 4)  # ZEF
+zef1 at extra$ncols.X.lm
 zef1 at misc$divisor
 zef1 at misc$values.divisor
-round(sqrt(diag(vcov(zef1))),    dig = 4) # SEs
+round(sqrt(diag(vcov(zef1))),    digits = 4)  # SEs
 
 mle1 <- vglm(cbind(invest.g, invest.w) ~
              capital.g + value.g + capital.w + value.w,
              SUR(mle.normal = TRUE, divisor = "n-max"),
              epsilon = 1e-11,
              data = gew, trace = TRUE, constraints = clist)
-round(coef(mle1, matrix = TRUE), dig = 4) # MLE
-round(sqrt(diag(vcov(mle1))),    dig = 4) # SEs
+round(coef(mle1, matrix = TRUE), digits = 4)  # MLE
+round(sqrt(diag(vcov(mle1))),    digits = 4)  # SEs
 }
 % Add one or more standard keywords, see file 'KEYWORDS' in the
 % R documentation directory.
diff --git a/man/Tol.Rd b/man/Tol.Rd
index c48f901..573af56 100644
--- a/man/Tol.Rd
+++ b/man/Tol.Rd
@@ -27,7 +27,7 @@ Tol(object, ...)
   Many models have no such notion or definition.
 
 
-  Tolerances occur in quadratic ordination, i.e., CQO or UQO.
+  Tolerances occur in quadratic ordination, i.e., CQO. % or UQO.
   They have ecological meaning because a high tolerance
   for a species means the species can survive over a large
   environmental range (stenoecous species), whereas a
@@ -97,8 +97,8 @@ Constrained additive ordination.
 }
 
 \examples{
-set.seed(111) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(111)  # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
                Trocterr, Zoraspin) ~
diff --git a/man/V1.Rd b/man/V1.Rd
new file mode 100644
index 0000000..5df158b
--- /dev/null
+++ b/man/V1.Rd
@@ -0,0 +1,103 @@
+\name{V1}
+\alias{V1}
+\docType{data}
+\title{ V1 Flying-Bombs Hits in London }
+
+\description{
+  A small count data set.
+  During WWII V1 flying-bombs were fired from sites in France
+  (Pas-de-Calais) and Dutch coasts towards London.
+  The number of hits per square grid around London were recorded.
+
+  
+
+}
+\usage{
+data(V1)
+}
+\format{
+  A data frame with the following variables.
+
+  \describe{
+   
+    \item{hits}{
+      Values between 0 and 4, and 7.
+      Actually, the 7 is really imputed from the paper
+      (it was recorded as "5 and over").
+
+    }
+    \item{ofreq}{
+      Observed frequency, i.e., the number of grids
+      with that many hits.
+
+    }
+
+  }
+}
+\details{
+  The data concerns 576 square grids each of 0.25 square kms
+  about south London.
+  The area was selected comprising 144 square kms over which
+  the basic probability function of the distribution was very
+  nearly constant.
+  V1s, which were one type of flying-bomb,
+  were a ``Vergeltungswaffen'' or vengeance weapon fired
+  during the summer of 1944 at London.
+  The V1s were informally called Buzz Bombs or Doodlebugs,
+  and they were pulse-jet-powered with a warhead of 850 kg of explosives.
+  Over 9500 were launched at London, and many were shot down by
+  artillery and the RAF.
+  Over the period considered the total number of bombs within the area
+  was 537.
+
+
+  It was asserted that the bombs tended to be grouped in clusters.
+  However, a basic Poisson analysis shows this is not the case.
+  Their guidance system being rather primitive, the data
+  is consistent with a Poisson distribution (random).
+
+
+
+}
+\source{
+ 
+  Clarke, R. D. (1946).
+  An application of the Poisson distribution.
+  \emph{Journal of the Institute of Actuaries},
+  \bold{72}(3), 481.
+
+
+}
+\references{
+
+Feller, W. (1970).
+\emph{An Introduction to Probability Theory and Its Applications},
+Vol. 1, Third Edition.
+John Wiley and Sons: New York, USA.
+
+
+% p.160--1
+
+
+}
+\seealso{
+  \code{\link[VGAM]{poissonff}}.
+
+
+}
+\examples{
+V1
+mean(with(V1, rep(hits, times = ofreq)))
+ var(with(V1, rep(hits, times = ofreq)))
+ sum(with(V1, rep(hits, times = ofreq)))
+\dontrun{ barplot(with(V1, ofreq),
+          names.arg = as.character(with(V1, hits)),
+          main = "London V1 buzz bomb hits",
+          col = "lightblue", las = 1,
+          ylab = "Frequency", xlab = "Hits") }
+}
+\keyword{datasets}
+
+
+%
+%
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index 2ca157d..bbb9f85 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -157,7 +157,7 @@ The \pkg{VGAM} package for categorical data analysis.
 \url{http://www.jstatsoft.org/v32/i10/}.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -193,10 +193,10 @@ contains some further information and examples.
 # Example 1; proportional odds model
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-depvar(fit1) # Better than using fit1 at y; dependent variable (response)
-weights(fit1, type = "prior") # Number of observations
-coef(fit1, matrix = TRUE)     # p.179, in McCullagh and Nelder (1989)
-constraints(fit1)             # Constraint matrices
+depvar(fit1)  # Better than using fit1 at y; dependent variable (response)
+weights(fit1, type = "prior")  # Number of observations
+coef(fit1, matrix = TRUE)      # p.179, in McCullagh and Nelder (1989)
+constraints(fit1)              # Constraint matrices
 summary(fit1)
 
 
@@ -234,10 +234,10 @@ head(cdf(fit4))
 
 \dontrun{ par(mfrow = c(1, 1), bty = "l", mar = c(5,4,4,3)+0.1, xpd = TRUE)
 qtplot(fit4, percentiles = c(5,50,90,99), main = "Quantiles", las = 1,
-       xlim = c(15, 90), ylab = "BMI", lwd = 2, lcol = 4) # Quantile plot
+       xlim = c(15, 90), ylab = "BMI", lwd = 2, lcol = 4)  # Quantile plot
 
 ygrid <- seq(15, 43, len = 100)  # BMI ranges
-par(mfrow = c(1, 1), lwd = 2) # Density plot
+par(mfrow = c(1, 1), lwd = 2)  # Density plot
 aa <- deplot(fit4, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
     main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
 aa
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 9ce1175..367b7b3 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -9,6 +9,7 @@
    the 1, 2 and 3-parameter asymmetric Laplace distributions (ALDs).
    The 1-parameter ALD may be used for quantile regression.
 
+
 }
 \usage{
 alaplace1(tau = NULL, llocation = "identity",
@@ -303,7 +304,7 @@ fitp <- vgam(y ~ s(x, df = mydof), data = adata, trace = TRUE,
              alaplace1(tau = mytau, llocation = "loge", parallelLoc = TRUE))
  
 par(las = 1); mylwd = 1.5
-with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
+with(adata, plot(x, jitter(y, factor = 0.5), col = "orange",
                  main = "Example 1; green: parallelLoc = TRUE",
                  ylab = "y", pch = "o", cex = 0.75))
 with(adata, matlines(x, fitted(fit ), col = "blue",
@@ -311,10 +312,10 @@ with(adata, matlines(x, fitted(fit ), col = "blue",
 with(adata, matlines(x, fitted(fitp), col = "green",
                      lty = "solid", lwd = mylwd))
 finexgrid <- seq(0, 1, len = 1001)
-for(ii in 1:length(mytau))
+for (ii in 1:length(mytau))
     lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
           col = "blue", lwd = mylwd)
-fit at extra # Contains useful information
+fit at extra  # Contains useful information
 
 
 # Example 2: regression quantile at a new tau value from an existing fit
@@ -326,9 +327,9 @@ fitp2 <- vglm(y ~ bs(x, df = mydof),
 
 newtau <- 0.5  # Want to refit the model with this tau value
 fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[,1]),
-            family = alaplace1(tau = newtau, llocation = "loge"),
-             adata)
-with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
+              family = alaplace1(tau = newtau, llocation = "loge"),
+              adata)
+with(adata, plot(x, jitter(y, factor = 0.5), col = "orange",
                pch = "o", cex = 0.75, ylab = "y",
                main = "Example 2; parallelLoc = TRUE"))
 with(adata, matlines(x, fitted(fitp2), col = "blue", 
@@ -343,13 +344,13 @@ with(adata, matlines(x, fitted(fitp3), col = "black",
 # link to ensure an increasing quantiles at any value of x.
 
 mytau <- seq(0.2, 0.9, by = 0.1)
-answer <- matrix(0, nrow(adata), length(mytau)) # Stores the quantiles
+answer <- matrix(0, nrow(adata), length(mytau))  # Stores the quantiles
 adata <- transform(adata, offsety = y*0)
 usetau <- mytau
-for(ii in 1:length(mytau)) {
+for (ii in 1:length(mytau)) {
 #   cat("\n\nii  = ", ii, "\n")
   adata <- transform(adata, usey = y-offsety)
-  iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen!
+  iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0)  # Well-chosen!
   mydf <- ifelse(ii == 1, 5, 3)  # Maybe less smoothing will help
   lloc <- ifelse(ii == 1, "identity", "loge")  # 2nd value must be "loge"
   fit3 <- vglm(usey ~ ns(x, df = mydf), data = adata, trace = TRUE,
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index 7b25b66..0fed45a 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -11,6 +11,7 @@
   parameter \code{location}, scale parameter \code{scale},
   and asymmetry parameter \code{kappa}.
 
+
 }
 \usage{
 dalap(x, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)),
@@ -27,15 +28,18 @@ ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
   number of observations.
   If \code{length(n) > 1} then the length is taken to be the number required.
 
+
   }
   \item{location}{
     the location parameter \eqn{\xi}{xi}.
 
+
   }
   \item{scale}{
   the scale parameter \eqn{\sigma}{sigma}.
   Must consist of positive values.
 
+
   }
   \item{tau}{
   the quantile parameter \eqn{\tau}{tau}.
@@ -43,15 +47,18 @@ ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
   This argument is used to specify \code{kappa} and is ignored
   if \code{kappa} is assigned.
 
+
   }
   \item{kappa}{
   the asymmetry parameter \eqn{\kappa}{kappa}.
   Must consist of positive values.
 
+
   }
   \item{log}{
   if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}.
 
+
   }
 
 }
@@ -99,19 +106,19 @@ Boston: Birkhauser.
 x <- seq(-5, 5, by = 0.01)
 loc <- 0; sigma <- 1.5; kappa <- 2
 \dontrun{ plot(x, dalap(x, loc, sigma, kappa = kappa), type = "l", col = "blue",
-     main = "Blue is density, red is cumulative distribution function",
+     main = "Blue is density, orange is cumulative distribution function",
      ylim = c(0, 1), sub = "Purple are 5, 10, ..., 95 percentiles",
      las = 1, ylab = "", cex.main = 0.5)
 abline(h = 0, col = "blue", lty = 2)
 lines(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
       dalap(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
             loc, sigma, kappa = kappa), col = "purple", lty = 3, type = "h")
-lines(x, palap(x, loc, sigma, kappa = kappa), type = "l", col = "red")
+lines(x, palap(x, loc, sigma, kappa = kappa), type = "l", col = "orange")
 abline(h = 0, lty = 2) }
 
-pp <- seq(0.05, 0.95, by = 0.05) # Test two functions
+pp <- seq(0.05, 0.95, by = 0.05)  # Test two functions
 max(abs(palap(qalap(pp, loc, sigma, kappa = kappa),
-              loc, sigma, kappa = kappa) - pp)) # Should be 0
+              loc, sigma, kappa = kappa) - pp))  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/amh.Rd b/man/amh.Rd
index 7abc587..f8fd4a3 100644
--- a/man/amh.Rd
+++ b/man/amh.Rd
@@ -1,7 +1,7 @@
 \name{amh}
 \alias{amh}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Ali-Mikhail-Haq Distribution Distribution Family Function }
+\title{ Ali-Mikhail-Haq Distribution Family Function }
 \description{
   Estimate the association parameter of
   Ali-Mikhail-Haq's bivariate
@@ -19,6 +19,7 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
   and \eqn{-1 < \alpha < 1}{-1 < alpha < 1}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{ialpha}{
   Numeric. Optional initial value for \eqn{\alpha}{alpha}.
@@ -26,16 +27,19 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
   If a convergence failure occurs try assigning a different value.
   Assigning a value will override the argument \code{imethod}.
 
+
   }
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
   specifies the initialization method. If failure to converge occurs
   try the other value, or else specify a value for \code{ialpha}.
 
+
   }
   \item{nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -50,16 +54,20 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
   When \eqn{\alpha = 0}{alpha = 0} the random variables are
   independent.
 
+
 % A variant of Newton-Raphson is used, which only seems to work for an
 % intercept model.
 % It is a very good idea to set \code{trace = TRUE}.
 % This \pkg{VGAM} family function is prone to numerical difficulties.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
+
+
 }
 
 \references{
@@ -88,7 +96,8 @@ New York: Springer.
 \seealso{
   \code{\link{ramh}},
   \code{\link{fgm}},
-  \code{\link{gumbelIbiv}}.
+  \code{\link{bigumbelI}}.
+
 
 }
 \examples{
diff --git a/man/amhUC.Rd b/man/amhUC.Rd
index 167eee5..2b67d89 100644
--- a/man/amhUC.Rd
+++ b/man/amhUC.Rd
@@ -3,12 +3,13 @@
 \alias{damh}
 \alias{pamh}
 \alias{ramh}
-\title{Ali-Mikhail-Haq Distribution's Bivariate Distribution}
+\title{Ali-Mikhail-Haq Bivariate Distribution}
 \description{
   Density, distribution function, and random
   generation for the (one parameter) bivariate
   Ali-Mikhail-Haq distribution.
 
+
 }
 \usage{
 damh(x1, x2, alpha, log = FALSE)
@@ -18,18 +19,24 @@ ramh(n, alpha)
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
   \item{n}{number of observations.
-    Must be a positive integer of length 1.}
+  Same as \code{\link[stats]{runif}}
+
+
+  }
   \item{alpha}{the association parameter.}
   \item{log}{
   Logical.
   If \code{TRUE} then the logarithm is returned.
 
+
   }
 }
 \value{
   \code{damh} gives the density,
   \code{pamh} gives the distribution function, and
   \code{ramh} generates random deviates (a two-column matrix).
+
+
 }
 %\references{
 %
@@ -41,27 +48,27 @@ ramh(n, alpha)
   parameter by maximum likelihood estimation, for the formula of the
   cumulative distribution function and other details.
 
+
 }
 %\note{
 %}
 \seealso{
   \code{\link{amh}}.
 
+
 }
-\examples{
-x = seq(0, 1, len=(N <- 101))
-alpha = 0.7
-ox = expand.grid(x, x)
-z = damh(ox[,1], ox[,2], alpha=alpha)
+\examples{ x <- seq(0, 1, len = (N <- 101)); alpha <- 0.7
+ox <- expand.grid(x, x)
+zedd <- damh(ox[, 1], ox[, 2], alpha = alpha)
 \dontrun{
-contour(x, x, matrix(z, N, N), col="blue")
-z = pamh(ox[,1], ox[,2], alpha=alpha)
-contour(x, x, matrix(z, N, N), col="blue")
-
-plot(r <- ramh(n=1000, alpha=alpha), col="blue")
-par(mfrow=c(1,2))
-hist(r[,1]) # Should be uniform
-hist(r[,2]) # Should be uniform
+contour(x, x, matrix(zedd, N, N), col = "blue")
+zedd <- pamh(ox[, 1], ox[, 2], alpha = alpha)
+contour(x, x, matrix(zedd, N, N), col = "blue")
+
+plot(r <- ramh(n = 1000, alpha = alpha), col = "blue")
+par(mfrow = c(1, 2))
+hist(r[, 1])  # Should be uniform
+hist(r[, 2])  # Should be uniform
 }
 }
 \keyword{distribution}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 7bf7d38..04552a2 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -126,18 +126,18 @@ fit at extra
 par(mfrow = c(1,2))
 # Quantile plot
 with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
-     paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile-expectile curves")))
 with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
 
 
 # Compare the fitted expectiles with the quantiles
 with(mydat, plot(x, jitter(y), col = "blue", las = 1, main = 
-     paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile curves are red")))
 with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
 
-for(ii in fit at extra$percentile)
+for (ii in fit at extra$percentile)
     with(mydat, matlines(x, 100 *
          qbinom(p = ii/100, size = sizevec, prob = prob) / sizevec,
                   col = "red", lwd = 2, lty = 1))
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index 002736a..f91f02a 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -144,17 +144,17 @@ fit at extra
 par(mfrow = c(1,2))
 # Quantile plot
 with(mydat, plot(x, sqrt(y), col = "blue", las = 1, main =
-     paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile-expectile curves")))
 with(mydat, matlines(x, sqrt(fitted(fit)), lwd = 2, col = "blue", lty = 1))
 
 # Compare the fitted expectiles with the quantiles
 with(mydat, plot(x, sqrt(y), col = "blue", las = 1, main =
-     paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile curves are orange")))
 with(mydat, matlines(x, sqrt(fitted(fit)), lwd = 2, col = "blue", lty = 1))
 
-for(ii in fit at extra$percentile)
+for (ii in fit at extra$percentile)
   with(mydat, matlines(x, sqrt(qexp(p = ii/100, rate = 1/mu)), col = "orange")) }
 }
 \keyword{models}
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 12e6529..51d2431 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -148,7 +148,7 @@ coef(fit, matrix = TRUE)
 
 # Quantile plot
 with(bmi.nz, plot(age, BMI, col = "blue", main =
-     paste(round(fit at extra$percentile, dig = 1),
+     paste(round(fit at extra$percentile, digits = 1),
            "expectile-percentile curve")))
 with(bmi.nz, lines(age, c(fitted(fit)), col = "black"))
 
@@ -161,7 +161,7 @@ findw <- function(w, percentile = 50) {
 # Quantile plot
 with(bmi.nz, plot(age, BMI, col = "blue", las = 1, main =
      "25, 50 and 75 expectile-percentile curves"))
-for(myp in c(25, 50, 75)) {
+for (myp in c(25, 50, 75)) {
 # Note: uniroot() can only find one root at a time
   bestw <- uniroot(f = findw, interval = c(1/10^4, 10^4), percentile = myp)
   fit2 <- vglm(BMI ~ bs(age), fam = amlnormal(w = bestw$root), data = bmi.nz)
@@ -171,7 +171,7 @@ for(myp in c(25, 50, 75)) {
 # Example 3; this is Example 1 but with smoothing splines and
 # a vector w and a parallelism assumption.
 ooo <- with(bmi.nz, order(age))
-bmi.nz <- bmi.nz[ooo,] # Sort by age
+bmi.nz <- bmi.nz[ooo,]  # Sort by age
 fit3 <- vgam(BMI ~ s(age, df = 4), bmi.nz, trace = TRUE,
              fam = amlnormal(w = c(0.1, 1, 10), parallel = TRUE))
 fit3 at extra # The w values, percentiles and weighted deviances
@@ -181,10 +181,10 @@ coef(fit3, matrix = TRUE)
 
 # Quantile plot
 with(bmi.nz, plot(age, BMI, col="blue", main =
-     paste(paste(round(fit3 at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit3 at extra$percentile, digits = 1), collapse = ", "),
            "expectile-percentile curves")))
 with(bmi.nz, matlines(age, fitted(fit3), col = 1:fit3 at extra$M, lwd = 2))
-with(bmi.nz, lines(age, c(fitted(fit )), col = "black")) # For comparison
+with(bmi.nz, lines(age, c(fitted(fit )), col = "black"))  # For comparison
 }
 }
 \keyword{models}
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 9a79416..036a30c 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -150,7 +150,7 @@ fit at extra
 \dontrun{
 # Quantile plot
 with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
-     paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
+     paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile-expectile curves")))
 with(mydat, matlines(x, fitted(fit), lwd = 2)) }
 }
diff --git a/man/auxposbernoulli.t.Rd b/man/auxposbernoulli.t.Rd
new file mode 100644
index 0000000..c86a9a7
--- /dev/null
+++ b/man/auxposbernoulli.t.Rd
@@ -0,0 +1,115 @@
+\name{aux.posbernoulli.t}
+\alias{aux.posbernoulli.t}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Auxiliary Function for the
+  Positive Bernoulli Family Function with Time Effects }
+\description{
+  Returns behavioural effects indicator variables from a
+  capture history matrix.
+
+
+}
+\usage{
+aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{y}{
+  Capture history matrix.
+  Rows are animals, columns are sampling occasions, and
+  values should be 0s and 1s only.
+
+  }
+  \item{check.y}{
+  Logical, if \code{TRUE} then some basic checking is performed.
+
+  }
+  \item{rename, name}{
+  If \code{rename = TRUE} then the behavioural effects indicator 
+  are named using the value of \code{name} as the prefix.
+  If \code{FALSE} then use the same column names as \code{y}.
+
+  }
+}
+\details{
+  This function can help fit certain capture--recapture models
+  (commonly known as \eqn{M_{tb}} or \eqn{M_{tbh}}
+  (no prefix \eqn{h} means it is an intercept-only model)
+  in the literature).
+  See \code{\link{posbernoulli.t}} for details.
+
+
+}
+\value{
+  A list with the following components.
+
+  \describe{
+    \item{cap.hist1}{
+      A matrix the same dimension as \code{y}.
+      In any particular row there are 0s up to
+      the first capture. Then there are 1s thereafter.
+
+      
+    }
+    \item{cap1}{
+      A vector specifying which time occasion the animal
+      was first captured.
+      
+
+    }
+    \item{y0i}{
+      Number of noncaptures before the first capture.
+
+    }
+    \item{yr0i}{
+      Number of noncaptures after the first capture.
+      
+
+    }
+    \item{yr1i}{
+      Number of recaptures after the first capture.
+      
+
+    }
+  }
+}
+
+% \author{ Thomas W. Yee. }
+
+%\note{
+%  Models \eqn{M_{tbh}}{M_tbh} can be fitted using the
+%  \code{xij} argument (see \code{\link{vglm.control}})
+%  to input the behavioural effect indicator variables.
+%  Rather than manually setting these up, they may be more conveniently
+%  obtained by \code{\link{aux.posbernoulli.t}}. See
+%  the example below.
+%
+%
+%}
+
+%\section{Warning }{
+%
+%  See \code{\link{posbernoulli.tb}}.
+%
+%
+%}
+
+\seealso{ 
+  \code{\link{posbernoulli.t}},
+  \code{\link{deermice}}.
+
+}
+
+\examples{
+# Fit a M_tbh model to the deermice data:
+(pdata <- aux.posbernoulli.t(with(deermice, cbind(y1, y2, y3, y4, y5, y6))))
+
+deermice <- data.frame(deermice,
+                    bei = 0,  # Add this
+                    pdata$cap.hist1)  # Incorporate these
+head(deermice)  # Augmented with behavioural effect indicator variables
+tail(deermice)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/beggs.Rd b/man/beggs.Rd
new file mode 100644
index 0000000..d7e75da
--- /dev/null
+++ b/man/beggs.Rd
@@ -0,0 +1,88 @@
+\name{beggs}
+\alias{beggs}
+\docType{data}
+\title{Bacon and Eggs Data}
+\description{
+  Purchasing of bacon and eggs.
+
+}
+\usage{
+data(beggs)
+}
+\format{
+  Data frame of a two way table.
+
+  \describe{
+   
+    \item{b0, b1, b2, b3, b4}{
+    The \code{b} refers to bacon.
+    The number of times bacon was purchased was 0, 1, 2, 3, or 4.
+
+
+    }
+    \item{e0, e1, e2, e3, e4}{
+    The \code{e} refers to eggs.
+    The number of times eggs was purchased was 0, 1, 2, 3, or 4.
+
+
+    }
+
+  }
+}
+\details{
+
+The data is from Information Resources, Inc., a consumer panel
+based in a large US city [see Bell and Lattin (1998) for further
+details]. Starting in June 1991, the purchases in the bacon and
+fresh eggs product categories for a sample of 548 households over
+four consecutive store trips was tracked.  Only those grocery
+shopping trips with a total basket value of at least five dollars
+was considered.  For each household, the total number of bacon
+purchases in their four eligible shopping trips and the total
+number of egg purchases (usually a package of eggs) for the same
+trips, were counted.
+
+
+%    Data from Bell and Latin (1998).
+%    Also see Danaher and Hardie (2005).
+
+     
+}
+\source{
+ 
+  Bell, D. R. and Lattin, J. M. (1998)
+  Shopping Behavior and Consumer Preference
+  for Store Price Format: Why `Large Basket' Shoppers Prefer EDLP.
+  \emph{Marketing Science},
+  \bold{17}, 66--88.
+
+ 
+}
+\references{
+
+  Danaher, P. J. and Hardie, B. G. S. (2005)
+  Bacon with Your Eggs?
+  Applications of a New Bivariate Beta-Binomial Distribution.
+  \emph{American Statistician},
+  \bold{59}(4), 282--286.
+
+
+
+}
+\seealso{
+  \code{\link[VGAM]{rrvglm}},
+  \code{\link[VGAM]{rcim}},
+  \code{\link[VGAM]{grc}}.
+
+
+}
+\examples{
+beggs
+colSums(beggs)
+rowSums(beggs)
+}
+\keyword{datasets}
+
+
+%
+%
diff --git a/man/benfUC.Rd b/man/benfUC.Rd
index 1f55710..3b9823e 100644
--- a/man/benfUC.Rd
+++ b/man/benfUC.Rd
@@ -24,23 +24,28 @@ rbenf(n, ndigits = 1)
    Vector of quantiles.
    See \code{ndigits}.
 
+
   }
   \item{p}{vector of probabilities.}
   \item{n}{number of observations. A single positive integer.
   Else if \code{length(n) > 1} then the length is
           taken to be the number required.
+
+
   }
   \item{ndigits}{
   Number of leading digits, either 1 or 2.
   If 1 then the support of the distribution is \{1,\ldots,9\}, else
   \{10,\ldots,99\}.
 
+
   }
   \item{log, log.p}{
   Logical.
   If \code{log.p = TRUE} then all probabilities \code{p} are
   given as \code{log(p)}.
 
+
   }
 
 }
diff --git a/man/benini.Rd b/man/benini.Rd
index 8980f99..79d02df 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -97,7 +97,7 @@ fit <- vglm(y ~ 1, benini(y0 = y0), bdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
 fit at extra$y0
-c(head(fitted(fit), 1), with(bdata, median(y))) # Should be equal
+c(head(fitted(fit), 1), with(bdata, median(y)))  # Should be equal
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index 87bbeed..93e2f45 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -9,6 +9,8 @@
   Density, distribution function, quantile function and random
   generation for the Benini distribution with parameter
   \code{shape}.
+
+
 }
 \usage{
 dbenini(x, shape, y0, log = FALSE)
@@ -20,13 +22,23 @@ rbenini(n, shape, y0)
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-    Must be a positive integer of length 1.}
-  \item{shape}{the shape parameter \eqn{b}.}
-  \item{y0}{the scale parameter \eqn{y_0}{y0}.}
+    Same as \code{\link[stats]{runif}}.
+
+
+  }
+  \item{shape}{the shape parameter \eqn{b}.
+
+
+  }
+  \item{y0}{the scale parameter \eqn{y_0}{y0}.
+
+
+  }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -36,13 +48,16 @@ rbenini(n, shape, y0)
   \code{qbenini} gives the quantile function, and
   \code{rbenini} generates random deviates.
 
+
 }
 \references{
+
 Kleiber, C. and Kotz, S. (2003)
 \emph{Statistical Size Distributions in Economics and
              Actuarial Sciences},
 Hoboken, NJ, USA: Wiley-Interscience.
 
+
 }
 \author{ T. W. Yee }
 \details{
@@ -50,6 +65,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
   for estimating the parameter \eqn{b} by maximum likelihood estimation,
   for the formula of the probability density function and other details.
 
+
 }
 %\note{
 %  
@@ -57,22 +73,23 @@ Hoboken, NJ, USA: Wiley-Interscience.
 \seealso{
   \code{\link{benini}}.
 
+
 }
 \examples{
 \dontrun{
-y0 = 1; shape = exp(1)
-xx = seq(0.0, 4, len = 101)
+y0 <- 1; shape <- exp(1)
+xx <- seq(0.0, 4, len = 101)
 plot(xx, dbenini(xx, y0 = y0,shape = shape), type = "l", col = "blue",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1,
      las = 1, ylab = "", xlab = "x")
 abline(h = 0, col = "blue", lty = 2)
 lines(xx, pbenini(xx, y0 = y0, shape = shape), col = "orange")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qbenini(probs, y0 = y0, shape = shape)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qbenini(probs, y0 = y0, shape = shape)
 lines(Q, dbenini(Q, y0 = y0, shape = shape),
       col = "purple", lty = 3, type = "h")
-pbenini(Q, y0 = y0, shape = shape) - probs    # Should be all zero
+pbenini(Q, y0 = y0, shape = shape) - probs  # Should be all zero
 }
 }
 \keyword{distribution}
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 7082876..5d70814 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -98,9 +98,9 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
   New York: Marcel Dekker.
 
 
-%Evans, M., Hastings, N. and Peacock, B. (2000)
+%Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 %\emph{Statistical Distributions},
-%New York: Wiley-Interscience, Third edition.
+%Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
   Documentation accompanying the \pkg{VGAM} package at
diff --git a/man/betaII.Rd b/man/betaII.Rd
index 8b036c3..d0fe269 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -17,10 +17,12 @@ betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
   (positive) parameters \code{scale}, \code{p} and \code{q}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iscale, ishape2.p, ishape3.q}{
   Optional initial values for \code{scale}, \code{p} and \code{q}.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -28,6 +30,7 @@ betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
   Here, the values must be from the set \{1,2,3\} which correspond to
   \code{scale}, \code{p}, \code{q}, respectively.
 
+
   }
 }
 \details{
@@ -60,10 +63,12 @@ provided \eqn{q > 1}; these are returned as the fitted values.
 
 }
 \references{
+
 Kleiber, C. and Kotz, S. (2003)
 \emph{Statistical Size Distributions in Economics and Actuarial Sciences},
 Hoboken, NJ, USA: Wiley-Interscience.
 
+
 }
 
 \author{ T. W. Yee }
@@ -84,10 +89,11 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{paralogistic}},
     \code{\link{invparalogistic}}.
 
+
 }
 
 \examples{
-bdata <- data.frame(y = rsinmad(2000, shape1.a = 1, exp(2), exp(1))) # Not genuine data!
+bdata <- data.frame(y = rsinmad(2000, shape1.a = 1, exp(2), exp(1)))  # Not genuine data!
 fit <- vglm(y ~ 1, betaII, bdata, trace = TRUE)
 fit <- vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7),
             bdata, trace = TRUE)
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index 78d0e33..62c3f64 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -13,6 +13,7 @@
   Density, distribution function, and random
   generation for the beta-binomial distribution.
 
+
 }
 \usage{
 dbetabinom(x, size, prob, rho = 0, log = FALSE)
@@ -27,11 +28,15 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 % \item{p}{vector of probabilities.}
   \item{size}{number of trials.}
   \item{n}{number of observations.
-  Must be a positive integer of length 1.}
+  Same as \code{\link[stats]{runif}}.
+
+
+  }
   \item{prob}{
   the probability of success \eqn{\mu}{mu}.
   Must be in the unit closed interval \eqn{[0,1]}.
 
+
   }
   \item{rho}{
   the correlation parameter \eqn{\rho}{rho}.
@@ -39,22 +44,26 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
   however, the value 0 is sometimes supported
   (if so then it corresponds to the usual binomial distribution).
 
+
   }
   \item{shape1, shape2}{
   the two (positive) shape parameters of the standard
   beta distribution. They are called \code{a} and \code{b} in
   \code{\link[base:Special]{beta}} respectively.
 
+
   }
   \item{log, log.p}{
   Logical.
   If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
 
+
   }
 
   \item{.dontuse.prob}{
   An argument that should be ignored and unused.
 
+
   }
 
 
@@ -104,7 +113,7 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 }
 \examples{
 set.seed(1); rbetabinom(10, 100, prob = 0.5)
-set.seed(1);     rbinom(10, 100, prob = 0.5) # The same since rho = 0
+set.seed(1);     rbinom(10, 100, prob = 0.5)  # The same since rho = 0
 
 \dontrun{ N <- 9; xx <- 0:N; s1 <- 2; s2 <- 3
 dy <- dbetabinom.ab(xx, size = N, shape1 = s1, shape2 = s2)
@@ -114,18 +123,18 @@ barplot(rbind(dy, dbinom(xx, size = N, prob = s1 / (s1+s2))),
                    ", shape2=", s2, ") (blue) vs\n",
         " Binomial(size=", N, ", prob=", s1/(s1+s2), ") (green)", sep = ""),
         names.arg = as.character(xx), cex.main = 0.8)
-sum(dy * xx) # Check expected values are equal
-sum(dbinom(xx, size = N, prob = s1 / (s1+s2))*xx)
-cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2)
+sum(dy * xx)  # Check expected values are equal
+sum(dbinom(xx, size = N, prob = s1 / (s1+s2)) * xx)
+cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2)  # Should be all 0
 
 y <- rbetabinom.ab(n = 10000, size = N, shape1 = s1, shape2 = s2)
 ty <- table(y)
 barplot(rbind(dy, ty / sum(ty)),
-        beside = TRUE, col = c("blue","red"), las = 1,
-        main = paste("Beta-binomial (size=",N,", shape1=",s1,
-                   ", shape2=",s2,") (blue) vs\n",
+        beside = TRUE, col = c("blue", "orange"), las = 1,
+        main = paste("Beta-binomial (size=", N, ", shape1=", s1,
+                     ", shape2=", s2, ") (blue) vs\n",
         " Random generated beta-binomial(size=", N, ", prob=", s1/(s1+s2),
-        ") (red)", sep = ""), cex.main = 0.8,
+        ") (orange)", sep = ""), cex.main = 0.8,
         names.arg = as.character(xx)) }
 }
 \keyword{distribution}
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index 080fb26..bb5074d 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -6,6 +6,7 @@
   Fits a beta-binomial distribution by maximum likelihood estimation.
   The two parameters here are the mean and correlation coefficient.
 
+
 }
 \usage{
 betabinomial(lmu = "logit", lrho = "logit",
@@ -127,6 +128,7 @@ betabinomial(lmu = "logit", lrho = "logit",
 
 }
 \references{
+
   Moore, D. F. and Tsiatis, A. (1991)
   Robust estimation of the variance in moment methods for
   extra-binomial and extra-Poisson variation.
@@ -193,7 +195,7 @@ betabinomial(lmu = "logit", lrho = "logit",
 # Example 1
 bdata <- data.frame(N = 10, mu = 0.5, rho = 0.8)
 bdata <- transform(bdata,
-                   y = rbetabinom(n=100, size = N, prob = mu, rho = rho))
+                   y = rbetabinom(n = 100, size = N, prob = mu, rho = rho))
 fit <- vglm(cbind(y, N-y) ~ 1, betabinomial, bdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
@@ -219,16 +221,17 @@ coef(fit2, matrix = TRUE)
 \dontrun{ with(lirat, plot(hb[N > 1], fit2 at misc$rho,
                  xlab = "Hemoglobin", ylab = "Estimated rho",
                  pch = as.character(grp[N > 1]), col = grp[N > 1])) }
-\dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991)
+\dontrun{  # cf. Figure 3 of Moore and Tsiatis (1991)
 with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, las = 1,
                  xlab = "Hemoglobin level", ylab = "Proportion Dead",
                  main = "Fitted values (lines)"))
 smalldf <- with(lirat, lirat[N > 1, ])
-for(gp in 1:4) {
+for (gp in 1:4) {
   xx <- with(smalldf, hb[grp == gp])
   yy <- with(smalldf, fitted(fit2)[grp == gp])
   ooo <- order(xx)
-  lines(xx[ooo], yy[ooo], col = gp) } }
+  lines(xx[ooo], yy[ooo], col = gp)
+} }
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/betabinomial.ab.Rd b/man/betabinomial.ab.Rd
index 83664c1..1bce1e8 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomial.ab.Rd
@@ -7,6 +7,7 @@
   The two parameters here are the shape parameters of the underlying
   beta distribution.
 
+
 }
 \usage{
 betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
@@ -20,6 +21,7 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
   of the beta distribution.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{i1, i2}{ 
   Initial value for the shape parameters.
@@ -28,14 +30,16 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
   If a failure to converge occurs, try assigning a different value
   to \code{i1} and/or using \code{i2}.
 
+
   }
   \item{zero}{ 
   An integer specifying which linear/additive predictor is to be modelled
-  as an intercept only.  If assigned, the single value should be either
-  \code{1} or \code{2}.  The default is to model both shape parameters
-  as functions of the covariates.  If a failure to converge occurs,
+  as an intercept only. If assigned, the single value should be either
+  \code{1} or \code{2}. The default is to model both shape parameters
+  as functions of the covariates. If a failure to converge occurs,
   try \code{zero = 2}.
 
+
   }
   \item{shrinkage.init, nsimEIM, imethod}{
   See \code{\link{CommonVGAMffArguments}} for more information.
@@ -43,6 +47,7 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
   Using the argument \code{nsimEIM} may offer large advantages for large
   values of \eqn{N} and/or large data sets.
 
+
   }
 }
 \details{
@@ -112,6 +117,7 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
 
 }
 \references{
+
   Moore, D. F. and Tsiatis, A. (1991)
   Robust estimation of the variance in moment methods for
   extra-binomial and extra-Poisson variation.
@@ -186,7 +192,7 @@ y <- rbetabinom.ab(n = 100, size = N, shape1 = s1, shape2 = s2)
 fit <- vglm(cbind(y, N-y) ~ 1, betabinomial.ab, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
-head(fit at misc$rho) # The correlation parameter
+head(fit at misc$rho)  # The correlation parameter
 head(cbind(depvar(fit), weights(fit, type = "prior")))
 
 
@@ -195,7 +201,7 @@ fit <- vglm(cbind(R, N-R) ~ 1, betabinomial.ab, data = lirat,
             trace = TRUE, subset = N > 1)
 coef(fit, matrix = TRUE)
 Coef(fit)
-fit at misc$rho      # The correlation parameter
+fit at misc$rho  # The correlation parameter
 t(fitted(fit))
 t(depvar(fit))
 t(weights(fit, type = "prior"))
@@ -207,26 +213,27 @@ all.equal(c(fitted(fit)),
 
 # Example 3, which is more complicated
 lirat <- transform(lirat, fgrp = factor(grp))
-summary(lirat)   # Only 5 litters in group 3
+summary(lirat)  # Only 5 litters in group 3
 fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial.ab(zero = 2),
            data = lirat, trace = TRUE, subset = N > 1)
 coef(fit2, matrix = TRUE)
 coef(fit2, matrix = TRUE)[, 1] -
-coef(fit2, matrix = TRUE)[, 2] # logit(p)
+coef(fit2, matrix = TRUE)[, 2]  # logit(p)
 \dontrun{ with(lirat, plot(hb[N > 1], fit2 at misc$rho,
                  xlab = "Hemoglobin", ylab = "Estimated rho",
                  pch = as.character(grp[N > 1]), col = grp[N > 1])) }
-\dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991)
+\dontrun{  # cf. Figure 3 of Moore and Tsiatis (1991)
 with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, las = 1,
             xlab = "Hemoglobin level", ylab = "Proportion Dead",
             main = "Fitted values (lines)"))
 
 smalldf <- with(lirat, lirat[N > 1, ])
-for(gp in 1:4) {
-    xx <- with(smalldf, hb[grp == gp])
-    yy <- with(smalldf, fitted(fit2)[grp == gp])
-    ooo <- order(xx)
-    lines(xx[ooo], yy[ooo], col = gp) } }
+for (gp in 1:4) {
+  xx <- with(smalldf, hb[grp == gp])
+  yy <- with(smalldf, fitted(fit2)[grp == gp])
+  ooo <- order(xx)
+  lines(xx[ooo], yy[ooo], col = gp)
+} }
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 5fe5dbb..58ce807 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -135,17 +135,17 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
 bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1)))
 fit1 <- vglm(y ~ 1, betaff, bdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
-Coef(fit1) # Useful for intercept-only models
+Coef(fit1)  # Useful for intercept-only models
 
 # General A and B, and with a covariate
 bdata <- transform(bdata, x2 = runif(nn))
 bdata <- transform(bdata, mu   = logit(0.5 - x2, inverse = TRUE),
-                          prec =   exp(3.0 + x2)) # prec == phi
+                          prec =   exp(3.0 + x2))  # prec == phi
 bdata <- transform(bdata, shape2 = prec * (1 - mu),
                          shape1 = mu * prec)
 bdata <- transform(bdata,
                    y = rbeta(nn, shape1 = shape1, shape2 = shape2))
-bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
+bdata <- transform(bdata, Y = 5 + 8 * y)  # From 5 to 13, not 0 to 1
 fit <- vglm(Y ~ x2, data = bdata, trace = TRUE,
             betaff(A = 5, B = 13, lmu = elogit(min = 5, max = 13)))
 coef(fit, matrix = TRUE)
diff --git a/man/betageomUC.Rd b/man/betageomUC.Rd
index 7095511..3672833 100644
--- a/man/betageomUC.Rd
+++ b/man/betageomUC.Rd
@@ -9,6 +9,7 @@
   Density, distribution function, and random
   generation for the beta-geometric distribution.
 
+
 }
 \usage{
 dbetageom(x, shape1, shape2, log = FALSE)
@@ -16,20 +17,28 @@ pbetageom(q, shape1, shape2, log.p = FALSE)
 rbetageom(n, shape1, shape2)
 }
 \arguments{
-  \item{x, q}{vector of quantiles.}
+  \item{x, q}{vector of quantiles.
+
+
+  }
 % \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-  Must be a positive integer of length 1.}
+  Same as \code{\link[stats]{runif}}.
+
+
+  }
   \item{shape1, shape2}{
   the two (positive) shape parameters of the standard
   beta distribution. They are called \code{a} and \code{b} in
   \code{\link[base:Special]{beta}} respectively.
 
+
   }
   \item{log, log.p}{
   Logical.
   If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
 
+
   }
 }
 \value{
@@ -37,6 +46,8 @@ rbetageom(n, shape1, shape2)
   \code{pbetageom} gives the distribution function, and
 % \code{qbetageom} gives the quantile function, and
   \code{rbetageom} generates random deviates.
+
+
 }
 \author{ T. W. Yee }
 \details{
@@ -56,19 +67,21 @@ rbetageom(n, shape1, shape2)
 \note{
   \code{pbetageom} can be particularly slow.
 
+
 }
 \seealso{
   \code{\link{geometric}},
   \code{\link{betaff}},
   \code{\link[stats:Beta]{Beta}}.
 
+
 }
 \examples{
 \dontrun{
-shape1 = 1; shape2 = 2; y = 0:30
-proby = dbetageom(y, shape1, shape2, log = FALSE)
+shape1 <- 1; shape2 <- 2; y <- 0:30
+proby <- dbetageom(y, shape1, shape2, log = FALSE)
 plot(y, proby, type = "h", col = "blue", ylab = "P[Y=y]", main = paste(
-     "Y ~ Beta-geometric(shape1=", shape1,", shape2=", shape2,")", sep=""))
+     "Y ~ Beta-geometric(shape1=", shape1,", shape2=", shape2, ")", sep = ""))
 sum(proby)
 }
 }
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index a9de4df..6e49f63 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -9,7 +9,7 @@
 \usage{
 betageometric(lprob = "logit", lshape = "loge",
               iprob = NULL,    ishape = 0.1,
-              moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
+              moreSummation = c(2, 100), tolerance = 1.0e-10, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,12 +20,14 @@ betageometric(lprob = "logit", lshape = "loge",
   The former lies in the unit interval and the latter is positive.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iprob, ishape}{ 
   Numeric. 
   Initial values for the two parameters.
   A \code{NULL} means a value is computed internally.
 
+
   }
   \item{moreSummation}{ 
   Integer, of length 2. 
@@ -34,18 +36,21 @@ betageometric(lprob = "logit", lshape = "loge",
   upper limit is an approximation to infinity.
   Here, \code{y} is the response.
 
+
   }
   \item{tolerance}{ 
   Positive numeric. 
   When all terms are less than this then the series is deemed to have
   converged.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
   linear/additive predictors are modelled as intercepts only.
   If used, the value must be from the set \{1,2\}.
 
+
   }
 }
 \details{
@@ -64,18 +69,21 @@ betageometric(lprob = "logit", lshape = "loge",
   \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) = 
        shape2 / (shape1-1) = (1-prob) / (prob-phi)}.
 
+
   The geometric distribution is a special case of the beta-geometric
   distribution with \eqn{\phi=0}{phi=0} (see \code{\link{geometric}}).
   However, fitting data from a geometric distribution may result in
   numerical problems because the estimate of \eqn{\log(\phi)}{log(phi)}
   will 'converge' to \code{-Inf}.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Paul, S. R. (2005)
@@ -83,6 +91,7 @@ betageometric(lprob = "logit", lshape = "loge",
   an application to human fecundability data.
   \emph{Journal of Modern Applied Statistical Methods}, \bold{4}, 425--433.
 
+
 }
 
 \author{ T. W. Yee }
@@ -90,14 +99,16 @@ betageometric(lprob = "logit", lshape = "loge",
   The first iteration may be very slow;
   if practical, it is best for the \code{weights} argument of
   \code{\link{vglm}} etc. to be used rather than inputting a very
-  long vector as the response, i.e., \code{vglm(y ~ 1, ..., weights=wts)}
+  long vector as the response, i.e., \code{vglm(y ~ 1, ..., weights = wts)}
   is to be preferred over \code{vglm(rep(y, wts) ~ 1, ...)}.
   If convergence problems occur try inputting some values of argument
   \code{ishape}.
 
+
   If an intercept-only model is fitted then the \code{misc} slot of the
   fitted object has list components \code{shape1} and \code{shape2}.
 
+
 }
 
 \seealso{ 
@@ -109,15 +120,15 @@ betageometric(lprob = "logit", lshape = "loge",
 }
 \examples{
 bdata <- data.frame(y = 0:11, wts = c(227,123,72,42,21,31,11,14,6,4,7,28))
-fit  <- vglm(y ~ 1, betageometric, bdata, weight = wts, trace = TRUE)
+fitb <- vglm(y ~ 1, betageometric, bdata, weight = wts, trace = TRUE)
 fitg <- vglm(y ~ 1,     geometric, bdata, weight = wts, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit)
-sqrt(diag(vcov(fit, untransform = TRUE)))
-fit at misc$shape1
-fit at misc$shape2
+coef(fitb, matrix = TRUE)
+Coef(fitb)
+sqrt(diag(vcov(fitb, untransform = TRUE)))
+fitb at misc$shape1
+fitb at misc$shape2
 # Very strong evidence of a beta-geometric:
-pchisq(2*(logLik(fit) - logLik(fitg)), df = 1, lower.tail = FALSE)
+pchisq(2 * (logLik(fitb) - logLik(fitg)), df = 1, lower.tail = FALSE)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/betanormUC.Rd b/man/betanormUC.Rd
index bccce6d..2585c44 100644
--- a/man/betanormUC.Rd
+++ b/man/betanormUC.Rd
@@ -21,28 +21,35 @@ rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-    Must be a positive integer of length 1.}
+  Same as \code{\link[stats]{runif}}.
+
+
+  }
   \item{shape1, shape2}{
   the two (positive) shape parameters of the standard beta distribution.
   They are called \code{a} and \code{b} respectively in
   \code{\link[base:Special]{beta}}.
 
+
   }
   \item{mean, sd}{
   the mean and standard deviation of the univariate
   normal distribution
   (\code{\link[stats:Normal]{Normal}}).
 
+
   }
   \item{log, log.p}{
   Logical.
   If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
 
+
   }
   \item{lower.tail}{
   Logical. If \code{TRUE} then the upper tail is returned, i.e.,
   one minus the usual answer.
 
+
   }
 
 }
@@ -52,6 +59,7 @@ rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
   \code{qbetanorm} gives the quantile function, and
   \code{rbetanorm} generates random deviates.
 
+
 }
 \references{
 
@@ -65,33 +73,34 @@ rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
 }
 \author{ T. W. Yee }
 \details{
-  The function \code{betanormal1}, the \pkg{VGAM} family function
+  The function \code{betauninormal}, the \pkg{VGAM} family function
   for estimating the parameters, 
   has not yet been written.
 
+
 % for the formula of the probability density function and other details.
 }
 %\note{
 %}
 %\seealso{
-%  zz code{link{betanormal1}}.
+%  zz code{link{betauninormal}}.
 %}
 \examples{
 \dontrun{
-shape1 = 0.1; shape2 = 4; m = 1
-x = seq(-10, 2, len=501)
-plot(x, dbetanorm(x, shape1, shape2, m=m), type="l", ylim=0:1, las=1,
-     ylab=paste("betanorm(",shape1,", ",shape2,", m=",m, ", sd=1)", sep=""),
-     main="Blue is density, red is cumulative distribution function",
-     sub="Purple lines are the 10,20,...,90 percentiles", col="blue")
-lines(x, pbetanorm(x, shape1, shape2, m=m), col="red")
-abline(h=0)
-probs = seq(0.1, 0.9, by=0.1)
-Q = qbetanorm(probs, shape1, shape2, m=m)
-lines(Q, dbetanorm(Q, shape1, shape2, m=m), col="purple", lty=3, type="h")
-lines(Q, pbetanorm(Q, shape1, shape2, m=m), col="purple", lty=3, type="h")
-abline(h=probs, col="purple", lty=3)
-pbetanorm(Q, shape1, shape2, m=m) - probs # Should be all 0
+shape1 <- 0.1; shape2 <- 4; m <- 1
+x <- seq(-10, 2, len = 501)
+plot(x, dbetanorm(x, shape1, shape2, m = m), type = "l", ylim = 0:1, las = 1,
+     ylab = paste("betanorm(",shape1,", ",shape2,", m=",m, ", sd=1)", sep = ""),
+     main = "Blue is density, orange is cumulative distribution function",
+     sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
+lines(x, pbetanorm(x, shape1, shape2, m = m), col = "orange")
+abline(h = 0)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qbetanorm(probs, shape1, shape2, m = m)
+lines(Q, dbetanorm(Q, shape1, shape2, m = m), col = "purple", lty = 3, type = "h")
+lines(Q, pbetanorm(Q, shape1, shape2, m = m), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+pbetanorm(Q, shape1, shape2, m = m) - probs  # Should be all 0
 }
 }
 \keyword{distribution}
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
index b804404..c95874f 100644
--- a/man/betaprime.Rd
+++ b/man/betaprime.Rd
@@ -16,12 +16,14 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
   Parameter link function applied to the two (positive) shape parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{i1, i2}{
   Initial values for the first and second shape parameters.
   A \code{NULL} value means it is obtained in the \code{initialize} slot.
   Note that \code{i2} is obtained using \code{i1}.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which linear/additive predictors
@@ -30,6 +32,7 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
   respectively.  If \code{zero=NULL} then both parameters are modelled
   with the explanatory variables.
 
+
   }
 }
 
@@ -37,14 +40,13 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
 
 \details{
   The beta-prime distribution is given by
-  \deqn{f(y) = y^{shape1-1} (1+y)^{-shape1-shape2} /
-  B(shape1,shape2)}{%
-  f(y) = y^(shape1-1) * (1+y)^(-shape1-shape2) /
-  B(shape1,shape2) }
+  \deqn{f(y) = y^{shape1-1}   (1+y)^{-shape1-shape2} / B(shape1,shape2)}{%
+        f(y) = y^(shape1-1) * (1+y)^(-shape1-shape2) / B(shape1,shape2) }
   for \eqn{y > 0}.
   The shape parameters are positive, and
   here, \eqn{B} is the beta function.
-  The mean of \eqn{Y} is \eqn{shape1 / (shape2-1)} provided \eqn{shape2>1}.
+  The mean of \eqn{Y} is \eqn{shape1 / (shape2-1)} provided \eqn{shape2>1};
+  these are returned as the fitted values.
 
 
   If \eqn{Y} has a \eqn{Beta(shape1,shape2)} distribution then
@@ -96,7 +98,8 @@ contains further information and examples.
 }
 
 \seealso{ 
-  \code{\link{betaff}}.
+  \code{\link{betaff}},
+  \code{\link[stats]{Beta}}.
 
 
 }
@@ -104,22 +107,23 @@ contains further information and examples.
 nn <- 1000
 bdata <- data.frame(shape1 = exp(1), shape2 = exp(3))
 bdata <- transform(bdata, yb = rbeta(nn, shape1, shape2))
-bdata <- transform(bdata, y1 = (1-yb)/yb, y2 = yb/(1-yb),
-                             y3 = rgamma(nn, exp(3)) / rgamma(nn, exp(2)))
+bdata <- transform(bdata, y1 = (1-yb) /    yb,
+                          y2 =    yb  / (1-yb),
+                          y3 = rgamma(nn, exp(3)) / rgamma(nn, exp(2)))
 
-fit1 <- vglm(y1 ~ 1, betaprime, bdata, trace = TRUE)
+fit1 <- vglm(y1 ~ 1, betaprime, data = bdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 
-fit2 <- vglm(y2 ~ 1, betaprime, bdata, trace = TRUE)
+fit2 <- vglm(y2 ~ 1, betaprime, data = bdata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 
-fit3 <- vglm(y3 ~ 1, betaprime, bdata, trace = TRUE)
+fit3 <- vglm(y3 ~ 1, betaprime, data = bdata, trace = TRUE)
 coef(fit3, matrix = TRUE)
 
 # Compare the fitted values
 with(bdata, mean(y3))
 head(fitted(fit3))
-Coef(fit3) # Useful for intercept-only models
+Coef(fit3)  # Useful for intercept-only models
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/biclaytoncop.Rd b/man/biclaytoncop.Rd
new file mode 100644
index 0000000..dbe98b7
--- /dev/null
+++ b/man/biclaytoncop.Rd
@@ -0,0 +1,132 @@
+\name{biclaytoncop}
+\alias{biclaytoncop}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Clayton Copula (Bivariate) Family Function }
+\description{
+  Estimate the correlation parameter of 
+  the (bivariate) Clayton copula
+  distribution by maximum likelihood estimation.
+
+}
+\usage{
+biclaytoncop(lalpha = "loge", ialpha = NULL, imethod = 1,
+             parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lalpha, ialpha, imethod}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  See \code{\link{Links}} for more link function choices.
+
+
+  }
+  \item{parallel, zero}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  If \code{parallel = TRUE} then the constraint is also applied
+  to the intercept.
+
+
+  }
+}
+\details{
+
+  The cumulative distribution function is
+  \deqn{P(u_1, u_2;\alpha) = (u_1^{-\alpha} + u_2^{-\alpha}-1)^{-1/\alpha}}{%
+        P(u1,u2,alpha) = (u1^(-alpha) + u2^(-alpha)-1)^(-1/alpha)}
+  for \eqn{0 \leq \alpha }{0 <= alpha}.
+  The support of the function is the interior of the unit square;
+  however, values of 0 and/or 1 are not allowed (currently).
+  The marginal distributions are the standard uniform distributions.
+  When \eqn{\alpha = 0}{alpha=0} the random variables are independent.
+
+
+  This \pkg{VGAM} family function can handle multiple responses,
+  for example, a six-column matrix where the first 2 columns
+  is the first out of three responses,
+  the next 2 columns being the next response, etc.
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}}
+  and \code{\link{vgam}}.
+
+
+}
+
+\references{
+
+%A Model for Association in Bivariate Survival Data.
+
+
+Clayton, D. (1982)
+A model for association in bivariate survival data.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{44}, 414--422.
+
+
+Stober, J. and Schepsmeier, U. (2013)
+Derivatives and Fisher information of bivariate copulas.
+\emph{Statistical Papers}.
+
+
+}
+\author{ R. Feyter and T. W. Yee }
+\note{
+  The response matrix must have a multiple of two-columns.
+  Currently, the fitted
+  value is a matrix with the same number of columns and values equal to 0.5.
+  This is because each marginal distribution corresponds to a standard
+  uniform distribution.
+
+
+  This \pkg{VGAM} family function is fragile;
+  each response must be in the interior of the unit square.
+%  Setting \code{crit = "coef"} is sometimes a good idea because
+%  inaccuracies in \code{\link{pbinorm}} might mean
+%  unnecessary half-stepping will occur near the solution.
+
+
+}
+
+\seealso{
+  \code{\link{rbiclaytoncop}},
+  \code{\link{dbiclaytoncop}},
+  \code{\link{kendall.tau}}.
+
+
+}
+\examples{
+ymat <- rbiclaytoncop(n = (nn <- 1000), alpha = exp(2))
+bdata <- data.frame(y1 = ymat[, 1],
+                    y2 = ymat[, 2],
+                    y3 = ymat[, 1],
+                    y4 = ymat[, 2],
+                    x2 = runif(nn))
+
+summary(bdata)
+\dontrun{ plot(ymat, col = "blue") }
+fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1,  # 2 responses, e.g., (y1,y2) is the first
+             biclaytoncop, data = bdata,
+             trace = TRUE, crit = "coef")  # Sometimes a good idea
+
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+head(fitted(fit1))
+summary(fit1)
+
+# Another example; alpha is a function of x2
+bdata <- transform(bdata, alpha = exp(-0.5 + x2))
+ymat <- rbiclaytoncop(n = nn, alpha = with(bdata, alpha))
+bdata <- transform(bdata, y5 = ymat[, 1],
+                          y6 = ymat[, 2])
+fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata,
+             biclaytoncop(lalpha = "loge"), trace = TRUE)
+\dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) }
+}
+\keyword{models}
+\keyword{regression}
+
+% for real \eqn{\alpha}{alpha} in (-1,1).
diff --git a/man/biclaytoncopUC.Rd b/man/biclaytoncopUC.Rd
new file mode 100644
index 0000000..f35fa83
--- /dev/null
+++ b/man/biclaytoncopUC.Rd
@@ -0,0 +1,106 @@
+\name{Biclaytoncop}
+\alias{dbiclaytoncop}
+%\alias{pbiclaytoncop}
+\alias{rbiclaytoncop}
+\title{Clayton Copula (Bivariate) Distribution}
+\description{
+  Density and random generation
+  for the (one parameter) bivariate 
+  Clayton copula distribution.
+
+
+}
+\usage{
+dbiclaytoncop(x1, x2, alpha = 0, log = FALSE)
+rbiclaytoncop(n, alpha = 0)
+}
+%pbiclaytoncop(q1, q2, rho = 0)
+\arguments{
+  \item{x1, x2}{vector of quantiles.
+  The \code{x1} and \code{x2} should both be in the interval \eqn{(0,1)}.
+
+
+  }
+  \item{n}{number of observations.
+    Same as \code{\link[stats]{rnorm}}.
+
+  }
+  \item{alpha}{the association parameter.
+  Should be in the interval \eqn{[0, \infty)}{[0, Inf)}.
+  The default corresponds to independence.
+
+
+  }
+  \item{log}{
+  Logical.
+  If \code{TRUE} then the logarithm is returned.
+%   Same as \code{\link[stats]{rnorm}}.
+
+
+  }
+}
+\value{
+  \code{dbiclaytoncop} gives the density at point (\code{x1},\code{x2}),
+  \code{rbiclaytoncop} generates random deviates (a two-column matrix).
+
+% \code{pbiclaytoncop} gives the distribution function, and
+
+
+}
+\references{
+
+% A Model for Association in Bivariate Survival Data
+
+
+Clayton, D. (1982)
+A model for association in bivariate survival data.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{44}, 414--422.
+
+
+}
+
+\author{ R. Feyter and T. W. Yee }
+\details{
+  See \code{\link{biclaytoncop}}, the \pkg{VGAM}
+  family functions for estimating the
+  parameter by maximum likelihood estimation,
+  for the formula of the
+  cumulative distribution function and other details.
+
+
+}
+\note{
+  \code{dbiclaytoncop()} does not yet handle
+  \code{x1 = 0} and/or \code{x2 = 0}.
+
+
+%Yettodo: allow \code{x1} and/or \code{x2} to have values 1,
+%and to allow any values for \code{x1} and/or \code{x2} to be
+%outside the unit square.
+
+
+}
+\seealso{
+  \code{\link{binormalcop}},
+  \code{\link{binormal}}.
+
+
+}
+\examples{
+\dontrun{ edge <- 0.01  # A small positive value
+N <- 101; x <- seq(edge, 1.0 - edge, len = N); Rho <- 0.7
+ox <- expand.grid(x, x)
+zedd <- dbiclaytoncop(ox[, 1], ox[, 2], alpha = Rho, log = TRUE)
+par(mfrow = c(1, 2))
+contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5, las = 1)
+plot(rbiclaytoncop(1000, 2), col = "blue", las = 1)
+}
+}
+\keyword{distribution}
+
+
+%plot(r <- rbiclaytoncop(n = 3000, alpha = exp(2)), col = "blue")
+%par(mfrow = c(1, 2))
+%hist(r[, 1])  # Should be uniform
+%hist(r[, 2])  # Should be uniform
diff --git a/man/gumbelIbiv.Rd b/man/bigumbelI.Rd
similarity index 93%
rename from man/gumbelIbiv.Rd
rename to man/bigumbelI.Rd
index 2a402e8..32e5c4b 100644
--- a/man/gumbelIbiv.Rd
+++ b/man/bigumbelI.Rd
@@ -1,14 +1,15 @@
-\name{gumbelIbiv}
-\alias{gumbelIbiv}
+\name{bigumbelI}
+\alias{bigumbelI}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Gumbel's Type I Bivariate Distribution Family Function }
 \description{
   Estimate the association parameter of Gumbel's Type I bivariate
   distribution by maximum likelihood estimation.
 
+
 }
 \usage{
-gumbelIbiv(lapar = "identity", iapar = NULL, imethod = 1)
+bigumbelI(lapar = "identity", iapar = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -17,6 +18,7 @@ gumbelIbiv(lapar = "identity", iapar = NULL, imethod = 1)
   \eqn{\alpha}{alpha}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iapar}{
   Numeric. Optional initial value for \eqn{\alpha}{alpha}.
@@ -24,12 +26,14 @@ gumbelIbiv(lapar = "identity", iapar = NULL, imethod = 1)
   If a convergence failure occurs try assigning a different value.
   Assigning a value will override the argument \code{imethod}.
 
+
   }
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
   specifies the initialization method. If failure to converge occurs
   try the other value, or else specify a value for \code{ia}.
 
+
   }
 }
 \details{
@@ -89,7 +93,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 nn <- 1000
 gdata <- data.frame(y1 = rexp(nn), y2 = rexp(nn))
 \dontrun{ with(gdata, plot(cbind(y1, y2))) }
-fit <- vglm(cbind(y1, y2) ~ 1, fam = gumbelIbiv, gdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, fam = bigumbelI, gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/bilogis4UC.Rd b/man/bilogis4UC.Rd
index 4c62d31..42b92b8 100644
--- a/man/bilogis4UC.Rd
+++ b/man/bilogis4UC.Rd
@@ -8,6 +8,7 @@
   Density, distribution function, quantile function and random generation
   for the 4-parameter bivariate logistic distribution.
 
+
 }
 \usage{
 dbilogis4(x1, x2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1, log = FALSE)
@@ -17,13 +18,17 @@ rbilogis4(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
   \item{n}{number of observations.
-    Same as \code{\link[stats]{rlogis}}. }
+    Same as \code{\link[stats]{rlogis}}.
+
+
+  }
   \item{loc1, loc2}{the location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}.}
   \item{scale1, scale2}{the scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.}
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -60,8 +65,7 @@ Bivariate logistic distributions.
 }
 \examples{
 \dontrun{ par(mfrow = c(1, 3))
-n <- 2000
-ymat <- rbilogis4(n, loc1 = 5, loc2 = 7, scale2 = exp(1))
+ymat <- rbilogis4(n = 2000, loc1 = 5, loc2 = 7, scale2 = exp(1))
 myxlim <- c(-2, 15); myylim <- c(-10, 30)
 plot(ymat, xlim = myxlim, ylim = myylim)
 
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
index b21bd0f..a9661b9 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic4.Rd
@@ -19,28 +19,38 @@ bilogistic4(llocation = "identity", lscale = "loge",
   \eqn{l_1}{l1} and \eqn{l_2}{l2}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{lscale}{
   Parameter link function applied to both
   (positive) scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.
   See \code{\link{Links}} for more choices. 
 
+
   }
   \item{iloc1, iloc2}{ Initial values for the location parameters.
     By default, initial values are chosen internally using
     \code{imethod}. Assigning values here will override
-    the argument \code{imethod}. }
+    the argument \code{imethod}.
+
+  }
   \item{iscale1, iscale2}{ Initial values for the scale parameters.
     By default, initial values are chosen internally using
     \code{imethod}. Assigning values here will override
-    the argument \code{imethod}. }
+    the argument \code{imethod}.
+
+  }
   \item{imethod}{ An integer with value \code{1} or \code{2} which
     specifies the initialization method. If failure to converge occurs
-    try the other value. }
+    try the other value.
+
+  }
   \item{zero}{ An integer-valued vector specifying which
   linear/additive predictors are modelled as intercepts only.
   The default is none of them.
   If used, choose values from the set \{1,2,3,4\}.
+
+
   }
 }
 \details{
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 3bc519a..510b0a7 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -14,7 +14,7 @@
 \usage{
 binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
           imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = 3,
-          exchangeable = FALSE, tol = 0.001, morerobust = FALSE)
+          exchangeable = FALSE, tol = 0.001, more.robust = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -23,16 +23,19 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
   See \code{\link{Links}} for more choices.
   See the note below.
 
+
   }
   \item{lmu1, lmu2}{
   Link function applied to the first and second of the two marginal
   probabilities.
 
+
   }
   \item{loratio}{
   Link function applied to the odds ratio.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu1, imu2, ioratio}{
   Optional initial values for the marginal probabilities and odds ratio.
@@ -40,28 +43,35 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
   In general good initial values are often required so use these
   arguments if convergence failure occurs.
 
+
   }
   \item{zero}{
-  Which linear/additive predictor is modelled as an intercept only? A
-  \code{NULL} means none.
+  Which linear/additive predictor is modelled as an intercept only?
+  A \code{NULL} means none.
+
 
   }
   \item{exchangeable}{
-  Logical. If \code{TRUE}, the two marginal probabilities are constrained
-  to be equal.
+  Logical.
+  If \code{TRUE}, the two marginal probabilities are constrained to be equal.
+
+
+
 
   }
   \item{tol}{
   Tolerance for testing independence. Should be some
   small positive numerical value.
 
+
   }
-  \item{morerobust}{
+  \item{more.robust}{
   Logical. If \code{TRUE} then some measures are taken to compute the
   derivatives and working weights more robustly, i.e., in an attempt
   to avoid numerical problems. Currently this feature is not debugged
   if set \code{TRUE}.
 
+
   }
 }
 \details{
@@ -202,7 +212,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
 \examples{
 # Fit the model in Table 6.7 in McCullagh and Nelder (1989)
 coalminers <- transform(coalminers, Age = (age - 42) / 5)
-fit <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers)
+fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ Age, binom2.or(zero = NULL), coalminers)
 fitted(fit)
 summary(fit)
 coef(fit, matrix = TRUE)
@@ -212,10 +222,10 @@ c(weights(fit, type = "prior")) * fitted(fit)  # Table 6.8
                          xlab = "(age - 42) / 5", lwd = 2))
 with(coalminers, matpoints(Age, depvar(fit), col=1:4))
 legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
-       legend=c("1 = (Breathlessness=0, Wheeze=0)",
-                "2 = (Breathlessness=0, Wheeze=1)",
-                "3 = (Breathlessness=1, Wheeze=0)",
-                "4 = (Breathlessness=1, Wheeze=1)")) }
+       legend = c("1 = (Breathlessness=0, Wheeze=0)",
+                  "2 = (Breathlessness=0, Wheeze=1)",
+                  "3 = (Breathlessness=1, Wheeze=0)",
+                  "4 = (Breathlessness=1, Wheeze=1)")) }
 
 
 # Another model: pet ownership
@@ -224,10 +234,10 @@ legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
 petdata <- subset(xs.nz, ethnic == "0" & age < 70 & sex == "M")
 petdata <- na.omit(petdata[, c("cat", "dog", "age")])
 summary(petdata)
-with(petdata, table(cat, dog)) # Can compute the odds ratio
+with(petdata, table(cat, dog))  # Can compute the odds ratio
 
-fit <- vgam(cbind((1-cat)*(1-dog), (1-cat)*dog,
-                     cat *(1-dog),    cat *dog) ~ s(age, df = 5),
+fit <- vgam(cbind((1-cat) * (1-dog), (1-cat) * dog,
+                     cat  * (1-dog),    cat  * dog) ~ s(age, df = 5),
             binom2.or(zero =    3), data = petdata, trace = TRUE)
 colSums(depvar(fit))
 coef(fit, matrix = TRUE)
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index fe79ba9..3cdf4b2 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -10,11 +10,12 @@
 }
 \usage{
 binomialff(link = "logit", dispersion = 1, mv = FALSE,
-           onedpar = !mv, parallel = FALSE, apply.parint = FALSE,
+           onedpar = !mv, parallel = FALSE,
            zero = NULL, bred = FALSE, earg.link = FALSE)
 
 }
 %- maybe also 'usage' for other objects documented here.
+% apply.parint = FALSE,
 \arguments{
 
   \item{link}{
@@ -58,6 +59,8 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   argument allows for the parallelism assumption whereby the regression
   coefficients for a variable is constrained to be equal over the \eqn{M}
   linear/additive predictors.
+  If \code{parallel = TRUE} then the constraint is not applied to the
+  intercepts.
 
 
   }
@@ -70,7 +73,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
 
 
   }
-  \item{apply.parint, earg.link}{ 
+  \item{earg.link}{ 
   Details at \code{\link{CommonVGAMffArguments}}.
 
 
@@ -197,8 +200,8 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
     \code{\link{betabinomial}},
     \code{\link{posbinomial}},
     \code{\link{zibinomial}},
-    \code{\link{dexpbinomial}},
-    \code{\link{mbinomial}},
+    \code{\link{double.expbinomial}},
+    \code{\link{matched.binomial}},
     \code{\link{seq2binomial}},
     \code{\link{amlbinomial}},
     \code{\link{simplex}},
@@ -265,7 +268,7 @@ bdata <- data.frame(x2 = sort(rnorm(nn <- 100)))
 bdata <- transform(bdata, y1 = ifelse(x2 < threshold, 0, 1))
 fit <- vglm(y1 ~ x2, binomialff(bred = TRUE),
             data = bdata, criter = "coef", trace = TRUE)
-coef(fit, matrix = TRUE) # Finite!!
+coef(fit, matrix = TRUE)  # Finite!!
 summary(fit)
 \dontrun{ plot(depvar(fit) ~ x2, data = bdata, col = "blue", las = 1)
 lines(fitted(fit) ~ x2, data = bdata, col = "orange")
diff --git a/man/binormal.Rd b/man/binormal.Rd
index e00ef66..d0945c8 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -28,16 +28,19 @@ binormal(lmean1 = "identity", lmean2 = "identity",
   Being positive quantities, a log link is the default for the
   standard deviations.
 
+
   }
   \item{imean1, imean2, isd1, isd2, irho, imethod, zero}{ 
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{eq.mean, eq.sd}{ 
   Logical or formula.
   Constrains the means or the standard deviations to be equal.
   Only one of these arguments may be assigned a value.
 
+
   }
 
 }
@@ -88,17 +91,18 @@ binormal(lmean1 = "identity", lmean2 = "identity",
 }
 
 \seealso{
-    \code{\link{normal1}},
-    \code{\link{gaussianff}},
-    \code{\link{pnorm2}}.
+  \code{\link{uninormal}},
+  \code{\link{gaussianff}},
+  \code{\link{pnorm2}},
+  \code{\link{bistudentt}}.
 
 
 }
 \examples{
 set.seed(123); nn <- 1000
 bdata <- data.frame(x2 = runif(nn), x3 = runif(nn))
-bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2*x2),
-                          y2 = rnorm(nn, 3 + 4*x2))
+bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2 * x2),
+                          y2 = rnorm(nn, 3 + 4 * x2))
 fit1 <- vglm(cbind(y1, y2) ~ x2,
             binormal(eq.sd = TRUE), data = bdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
diff --git a/man/pnorm2UC.Rd b/man/binormalUC.Rd
similarity index 51%
rename from man/pnorm2UC.Rd
rename to man/binormalUC.Rd
index 0dac580..2035675 100644
--- a/man/pnorm2UC.Rd
+++ b/man/binormalUC.Rd
@@ -1,18 +1,26 @@
-\name{pnorm2}
+\name{Binorm}
+\alias{Binorm}
 \alias{pnorm2}
+\alias{dbinorm}
+\alias{pbinorm}
+\alias{rbinorm}
 \title{Bivariate normal distribution cumulative distribution function}
 \description{
-% Density,
-  Cumulative distribution function
-% quantile function
-% and
-% random generation
+  Density,
+  cumulative distribution function
+  and
+  random generation
   for the bivariate normal distribution distribution.
 
 }
+% quantile function
 \usage{
-pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
+dbinorm(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0, log = FALSE)
+pbinorm(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
+rbinorm(n,      mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
+ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 }
+% dbinorm(x1, x2, mean1 = 0, mean2 = 0,  sd1 = 1,  sd2 = 1,   rho = 0, log = FALSE)
 \arguments{
   \item{x1, x2}{vector of quantiles.}
   \item{mean1, mean2, var1, var2, cov12}{
@@ -21,11 +29,22 @@ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 % standard deviations and correlation parameter.
 
   }
-% \item{n}{number of observations. }
-% \item{log}{
-% Logical.
-% If \code{log = TRUE} then the logarithm of the density is returned.
+% \item{sd1, sd2, rho}{
+% vector of standard deviations and correlation parameter.
+
 % }
+  \item{n}{number of observations.
+  Same as \code{\link[stats]{rnorm}}.
+
+
+   }
+
+  \item{log}{
+  Logical.
+  If \code{log = TRUE} then the logarithm of the density is returned.
+
+
+  }
 
 
 % \item{rho}{
@@ -36,10 +55,10 @@ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 
 }
 \value{
-% \code{dmakeham} gives the density,
-  \code{pnorm2} gives the cumulative distribution function.
-% \code{qmakeham} gives the quantile function, and
-% \code{rmakeham} generates random deviates.
+  \code{dbinorm} gives the density,
+  \code{pbinorm} gives the cumulative distribution function,
+% \code{qnorm2} gives the quantile function, and
+  \code{rbinorm} generates random deviates (\eqn{n} by 2 matrix).
 
 
 }
@@ -48,8 +67,8 @@ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 
   The default arguments correspond to the standard bivariate normal
   distribution with correlation parameter \eqn{\rho = 0}{rho = 0}.
-  That is, two independent standard normal distibutions.
-  Let \code{sd1} be \code{sqrt(var1)} and
+  That is, two independent standard normal distributions.
+  Let \code{sd1} (say) be \code{sqrt(var1)} and
   written \eqn{\sigma_1}{sigma_1}, etc.
   Then the general formula for the correlation coefficient is
   \eqn{\rho = cov / (\sigma_1 \sigma_2)}{rho = cov / (sigma_1 * sigma_2)}
@@ -67,7 +86,8 @@ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 }
 \references{
 
-  Based on Donnelly (1973),
+  \code{pbinorm()} is
+  based on Donnelly (1973),
   the code was translated from FORTRAN to ratfor using struct, and
   then from ratfor to C manually.
   The function was originally called \code{bivnor}, and TWY only
@@ -90,33 +110,49 @@ pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 
 
 \section{Warning}{
-  Being based on an approximation, the results may be negative!
-  Also, this function may be renamed to \code{pnormal2()}, or
-  something similar, at a later date.
+  Being based on an approximation, the results of \code{pbinorm()}
+  may be negative!
+  Also,
+  \code{pnorm2()} should be withdrawn soon;
+  use \code{pbinorm()} instead because it is identical.
+
+
+% this function used to be called \code{pnorm2()}.
+
 
+% \code{dbinorm()}'s arguments might change!
+% Currently they differ from \code{pbinorm()} 
+% and \code{rbinorm()}, so use the full argument name
+% to future-proof possible changes!
 
+ 
 }
 
 
 
 
-%\note{
-%
-%}
+\note{
+  For \code{rbinorm()},
+  if the \eqn{i}th variance-covariance matrix is not
+  positive-definite then the \eqn{i}th row is all \code{NA}s.
+
+
+
+}
 \seealso{
   \code{\link[stats]{pnorm}},
   \code{\link{binormal}},
-  \code{\link{normal1}}.
+  \code{\link{uninormal}}.
 
 
 }
 \examples{
 yvec <- c(-5, -1.96, 0, 1.96, 5)
 ymat <- expand.grid(yvec, yvec)
-cbind(ymat, pnorm2(ymat[, 1], ymat[, 2]))
+cbind(ymat, pbinorm(ymat[, 1], ymat[, 2]))
 
 \dontrun{ rhovec <- seq(-0.95, 0.95, by = 0.01)
-plot(rhovec, pnorm2(0, 0, cov12 = rhovec), type = "l", col = "blue", las = 1)
+plot(rhovec, pbinorm(0, 0, cov12 = rhovec), type = "l", col = "blue", las = 1)
 abline(v = 0, h = 0.25, col = "gray", lty = "dashed") }
 }
 \keyword{distribution}
diff --git a/man/binormalcop.Rd b/man/binormalcop.Rd
new file mode 100644
index 0000000..366bf66
--- /dev/null
+++ b/man/binormalcop.Rd
@@ -0,0 +1,135 @@
+\name{binormalcop}
+\alias{binormalcop}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gaussian Copula (Bivariate) Family Function }
+\description{
+  Estimate the correlation parameter of 
+  the (bivariate) Gaussian copula
+  distribution by maximum likelihood estimation.
+
+}
+\usage{
+binormalcop(lrho = "rhobit", irho = NULL, imethod = 1,
+            parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+%  apply.parint = TRUE,
+\arguments{
+  \item{lrho, irho, imethod}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  See \code{\link{Links}} for more link function choices.
+
+
+  }
+  \item{parallel, zero}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  If \code{parallel = TRUE} then the constraint is applied to the
+  intercept too.
+
+
+  }
+}
+\details{
+  The cumulative distribution function is
+  \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = \Phi_2
+             ( \Phi^{-1}(y_1), \Phi^{-1}(y_2); \rho ) }{%
+        P(Y1 <= y1, Y2 <= y2) = 
+        Phi_2(\Phi^(-1)(y_1), \Phi^(-1)(y_2); \rho)}
+  for \eqn{-1 < \rho < 1}{-1 < rho < 1},
+  \eqn{\Phi_2}{Phi_2} is the cumulative distribution function
+  of a standard bivariate normal
+  (see \code{\link{pbinorm}}),
+  and \eqn{\Phi}{Phi} is the cumulative distribution function
+  of a standard univariate normal
+  (see \code{\link[stats]{pnorm}}).
+
+
+  The support of the function is the interior of the unit square;
+  however, values of 0 and/or 1 are not allowed.
+  The marginal distributions are the standard uniform distributions.
+  When \eqn{\rho = 0}{rho=0} the random variables are
+  independent.
+
+
+  This \pkg{VGAM} family function can handle multiple responses,
+  for example, a six-column matrix where the first 2 columns
+  is the first out of three responses,
+  the next 2 columns being the next response, etc.
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}}
+  and \code{\link{vgam}}.
+
+
+}
+
+\references{
+
+Schepsmeier, U. and Stober, J. (2013)
+Derivatives and Fisher information of bivariate copulas.
+\emph{Statistical Papers}.
+
+
+}
+\author{ T. W. Yee }
+\note{
+  The response matrix must have a multiple of two-columns.
+  Currently, the fitted
+  value is a matrix with the same number of columns and values equal to 0.5.
+  This is because each marginal distribution corresponds to a standard
+  uniform distribution.
+
+
+  This \pkg{VGAM} family function is fragile;
+  each response must be in the interior of the unit square.
+  Setting \code{crit = "coef"} is sometimes a good idea because
+  inaccuracies in \code{\link{pbinorm}} might mean
+  unnecessary half-stepping will occur near the solution.
+
+}
+
+\seealso{
+  \code{\link{rbinormcop}},
+  \code{\link[stats]{pnorm}},
+  \code{\link{kendall.tau}}.
+
+
+}
+\examples{
+nn <- 1000
+ymat <- rbinormcop(n = nn, rho = rhobit(-0.9, inverse = TRUE))
+bdata <- data.frame(y1 = ymat[, 1],
+                    y2 = ymat[, 2],
+                    y3 = ymat[, 1],
+                    y4 = ymat[, 2],
+                    x2 = runif(nn))
+
+summary(bdata)
+\dontrun{ plot(ymat, col = "blue") }
+fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1,  # 2 responses, e.g., (y1,y2) is the first
+             fam = binormalcop,
+             crit = "coef",  # Sometimes a good idea
+             data = bdata, trace = TRUE)
+
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+head(fitted(fit1))
+summary(fit1)
+
+# Another example; rho is a linear function of x2
+bdata <- transform(bdata, rho = -0.5 + x2)
+ymat <- rbinormcop(n = nn, rho = with(bdata, rho))
+bdata <- transform(bdata, y5 = ymat[, 1],
+                          y6 = ymat[, 2])
+fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata,
+             binormalcop(lrho = "identity"), trace = TRUE)
+\dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) }
+}
+\keyword{models}
+\keyword{regression}
+
+% for real \eqn{\rho}{rho} in (-1,1).
diff --git a/man/binormcopUC.Rd b/man/binormcopUC.Rd
new file mode 100644
index 0000000..e057634
--- /dev/null
+++ b/man/binormcopUC.Rd
@@ -0,0 +1,94 @@
+\name{Binormcop}
+\alias{Binormcop}
+\alias{dbinormcop}
+\alias{pbinormcop}
+\alias{rbinormcop}
+\title{Gaussian Copula (Bivariate) Distribution}
+\description{
+  Density, distribution function,
+  and random generation
+  for the (one parameter) bivariate 
+  Gaussian copula distribution.
+
+
+}
+\usage{
+dbinormcop(x1, x2, rho = 0, log = FALSE)
+pbinormcop(q1, q2, rho = 0)
+rbinormcop(n, rho = 0)
+}
+\arguments{
+  \item{x1, x2, q1, q2}{vector of quantiles.
+  The \code{x1} and \code{x2} should be in the interval \eqn{(0,1)}.
+  Ditto for \code{q1} and \code{q2}.
+
+
+  }
+  \item{n}{number of observations.
+    Same as \code{\link[stats]{rnorm}}.
+
+  }
+  \item{rho}{the correlation parameter.
+  Should be in the interval \eqn{(-1,1)}.
+
+
+  }
+  \item{log}{
+  Logical.
+  If \code{TRUE} then the logarithm is returned.
+%   Same as \code{\link[stats]{rnorm}}.
+
+
+  }
+}
+\value{
+  \code{dbinormcop} gives the density,
+  \code{pbinormcop} gives the distribution function, and
+  \code{rbinormcop} generates random deviates (a two-column matrix).
+
+
+}
+%\references{
+%
+%}
+
+\author{ T. W. Yee }
+\details{
+  See \code{\link{binormalcop}}, the \pkg{VGAM}
+  family functions for estimating the
+  parameter by maximum likelihood estimation,
+  for the formula of the
+  cumulative distribution function and other details.
+
+
+}
+\note{
+  Yettodo: allow \code{x1} and/or \code{x2} to have values 1,
+  and to allow any values for \code{x1} and/or \code{x2} to be
+  outside the unit square.
+
+
+}
+\seealso{
+  \code{\link{binormalcop}},
+  \code{\link{binormal}}.
+
+
+}
+\examples{
+\dontrun{ edge <- 0.01  # A small positive value
+N <- 101; x <- seq(edge, 1.0 - edge, len = N); Rho <- 0.7
+ox <- expand.grid(x, x)
+zedd <- dbinormcop(ox[, 1], ox[, 2], rho = Rho, log = TRUE)
+contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5)
+zedd <- pbinormcop(ox[, 1], ox[, 2], rho = Rho)
+contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5)
+}
+}
+\keyword{distribution}
+
+
+%plot(r <- rbinormcop(n = 3000, rho = Rho), col = "blue")
+%par(mfrow = c(1, 2))
+%hist(r[, 1])  # Should be uniform
+%hist(r[, 2])  # Should be uniform
diff --git a/man/bisa.Rd b/man/bisa.Rd
index 3dd990c..aa1973c 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -123,24 +123,25 @@ New York: Wiley.
   \code{\link{pbisa}},
   \code{\link{inv.gaussianff}}.
 
+
 }
 \examples{
-bdat1 <- data.frame(x2 = runif(nn <- 1000))
-bdat1 <- transform(bdat1, shape = exp(-0.5 + x2), scale = exp(1.5))
-bdat1 <- transform(bdat1, y = rbisa(nn, shape, scale))
-fit1 <- vglm(y ~ x2, bisa(zero = 2), bdat1, trace = TRUE)
+bdata1 <- data.frame(x2 = runif(nn <- 1000))
+bdata1 <- transform(bdata1, shape = exp(-0.5 + x2), scale = exp(1.5))
+bdata1 <- transform(bdata1, y = rbisa(nn, shape, scale))
+fit1 <- vglm(y ~ x2, bisa(zero = 2), bdata1, trace = TRUE)
 coef(fit1, matrix = TRUE)
 
 \dontrun{
-bdat2 <- data.frame(shape = exp(-0.5), scale = exp(0.5))
-bdat2 <- transform(bdat2, y = rbisa(nn, shape, scale))
-fit <- vglm(y ~ 1, bisa, bdat2, trace = TRUE)
-with(bdat2, hist(y, prob = TRUE, ylim = c(0, 0.5), col = "lightblue"))
+bdata2 <- data.frame(shape = exp(-0.5), scale = exp(0.5))
+bdata2 <- transform(bdata2, y = rbisa(nn, shape, scale))
+fit <- vglm(y ~ 1, bisa, bdata2, trace = TRUE)
+with(bdata2, hist(y, prob = TRUE, ylim = c(0, 0.5), col = "lightblue"))
 coef(fit, matrix = TRUE)
-with(bdat2, mean(y))
+with(bdata2, mean(y))
 head(fitted(fit))
-x <- with(bdat2, seq(0, max(y), len = 200))
-lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, bdat2, col = "orange", lwd = 2) }
+x <- with(bdata2, seq(0, max(y), len = 200))
+lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, bdata2, col = "orange", lwd = 2) }
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index 98f08dc..5c0eb15 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -27,11 +27,13 @@ rbisa(n, shape, scale = 1)
   \item{shape, scale}{
   the (positive) shape and scale parameters.
 
+
   }
   \item{log}{
   Logical.
   If \code{TRUE} then the logarithm of the density is returned.
 
+
   }
 }
 \value{
@@ -40,6 +42,7 @@ rbisa(n, shape, scale = 1)
   \code{qbisa} gives the quantile function, and
   \code{rbisa} generates random deviates.
 
+
 }
 \author{ T. W. Yee }
 \details{
@@ -64,21 +67,21 @@ x <- seq(0, 6, len = 400)
 plot(x, dbisa(x, shape = 1), type = "l", col = "blue",
      ylab = "Density", lwd = 2, ylim = c(0,1.3), lty = 3,
      main = "X ~ Birnbaum-Saunders(shape, scale = 1)")
-lines(x, dbisa(x, shape = 2), col = "red", lty = 2, lwd = 2)
+lines(x, dbisa(x, shape = 2), col = "orange", lty = 2, lwd = 2)
 lines(x, dbisa(x, shape = 0.5), col = "green", lty = 1, lwd = 2)
 legend(x = 3, y = 0.9, legend = paste("shape  = ",c(0.5, 1,2)),
-       col = c("green","blue","red"), lty = 1:3, lwd = 2)
+       col = c("green","blue","orange"), lty = 1:3, lwd = 2)
 
 shape <- 1; x <- seq(0.0, 4, len = 401)
 plot(x, dbisa(x, shape = shape), type = "l", col = "blue", las = 1, ylab = "",
-     main = "Blue is density, red is cumulative distribution function",
+     main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1)
 abline(h = 0, col = "blue", lty = 2)
-lines(x, pbisa(x, shape = shape), col = "red")
+lines(x, pbisa(x, shape = shape), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qbisa(probs, shape = shape)
 lines(Q, dbisa(Q, shape = shape), col = "purple", lty = 3, type = "h")
-pbisa(Q, shape = shape) - probs # Should be all zero
+pbisa(Q, shape = shape) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3)
 lines(Q, pbisa(Q, shape), col = "purple", lty = 3, type = "h")
 }
diff --git a/man/bistudentt.Rd b/man/bistudentt.Rd
new file mode 100644
index 0000000..2d89492
--- /dev/null
+++ b/man/bistudentt.Rd
@@ -0,0 +1,125 @@
+\name{bistudentt}
+\alias{bistudentt}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bivariate Student-t Family Function }
+\description{
+  Estimate the degrees of freedom and correlation parameters of 
+  the (bivariate) Student-t
+  distribution by maximum likelihood estimation.
+
+}
+\usage{
+bistudentt(ldf = "loglog", lrho = "rhobit",
+           idf = NULL, irho = NULL, imethod = 1,
+           parallel = FALSE, zero = -1)
+}
+%- maybe also 'usage' for other objects documented here.
+%apply.parint = TRUE,
+\arguments{
+  \item{ldf, lrho, idf, irho, imethod}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  See \code{\link{Links}} for more link function choices.
+
+
+  }
+  \item{parallel, zero}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+
+
+  }
+}
+\details{
+  The density function is
+  \deqn{f(y_1, y_2; \nu, \rho) =
+        \frac{1}{2\pi\sqrt{1-\rho^2}}
+        (1 + y_1^2 + y_2^2 - 2\rho y_1 y_2) / (\nu (1-\rho^2))^{(\nu+2)/2} }{%
+        f(y1, y2; nu, rho) =
+        (1/(2*pi*sqrt(1-\rho^2))) *
+        (1 + y1^2 + y_2^2 - 2*rho*y1*y2) / (nu*(1-rho^2))^((\nu+2)/2) }
+  for \eqn{-1 < \rho < 1}{-1 < rho < 1},
+  and real \eqn{y_1}{y1} and \eqn{y_2}{y2}.
+
+% The support of the function is the interior of the unit square;
+% however, values of 0 and/or 1 are not allowed.
+% The marginal distributions are the standard uniform distributions.
+% When \eqn{\rho = 0}{rho=0} the random variables are
+% independent.
+
+
+  This \pkg{VGAM} family function can handle multiple responses,
+  for example, a six-column matrix where the first 2 columns
+  is the first out of three responses,
+  the next 2 columns being the next response, etc.
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}}
+  and \code{\link{vgam}}.
+
+
+}
+
+\references{
+
+Schepsmeier, U. and Stober, J. (2013)
+Derivatives and Fisher information of bivariate copulas.
+\emph{Statistical Papers}.
+
+
+}
+\author{ T. W. Yee,
+with help from Thibault Vatter.
+
+
+}
+\note{
+  The response matrix must have a multiple of two-columns.
+  Currently, the fitted
+  value is a matrix with the same number of columns and values equal to 0.0.
+
+}
+\section{Warning }{
+
+  The working weight matrices have not been fully checked.
+
+}
+
+\seealso{
+  \code{\link{dbistudentt}},
+  \code{\link{binormal}},
+  \code{\link[stats]{pt}}.
+
+
+}
+\examples{
+nn <- 1000
+mydof <- loglog(1, inverse = TRUE)
+ymat <- cbind(rt(nn, df = mydof), rt(nn, df = mydof))
+
+bdata <- data.frame(y1 = ymat[, 1],
+                    y2 = ymat[, 2],
+                    y3 = ymat[, 1],
+                    y4 = ymat[, 2],
+                    x2 = runif(nn))
+
+summary(bdata)
+\dontrun{ plot(ymat, col = "blue") }
+fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1,  # 2 responses, e.g., (y1,y2) is the first
+             fam = bistudentt,
+#            crit = "coef",  # Sometimes a good idea
+             data = bdata, trace = TRUE)
+
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+head(fitted(fit1))
+summary(fit1)
+}
+\keyword{models}
+\keyword{regression}
+
+%
+
+
diff --git a/man/bistudenttUC.Rd b/man/bistudenttUC.Rd
new file mode 100644
index 0000000..c7e081d
--- /dev/null
+++ b/man/bistudenttUC.Rd
@@ -0,0 +1,120 @@
+\name{Bistudentt}
+\alias{Bistudentt}
+\alias{dbistudentt}
+%\alias{rbistudentt}
+\title{Bivariate Student-t distribution cumulative distribution function}
+\description{
+  Density
+% cumulative distribution function
+% quantile function
+% and
+% random generation
+  for the bivariate Student-t distribution distribution.
+
+}
+\usage{
+dbistudentt(x1, x2, df, rho = 0, log = FALSE)
+}
+\arguments{
+  \item{x1, x2}{vector of quantiles.}
+  \item{df, rho}{
+  vector of degrees of freedom and correlation parameter.
+  For \code{df}, a value \code{Inf} is currently not working.
+
+% standard deviations and correlation parameter.
+
+  }
+% \item{n}{number of observations.
+% Same as \code{\link[stats]{rt}}.
+
+
+%  }
+
+  \item{log}{
+  Logical.
+  If \code{log = TRUE} then the logarithm of the density is returned.
+
+
+  }
+
+
+% \item{rho}{
+% See \code{\link{bistudenttal}}.
+% } 
+
+
+
+}
+\value{
+  \code{dbistudentt} gives the density.
+% \code{pnorm2} gives the cumulative distribution function,
+% \code{qnorm2} gives the quantile function, and
+% \code{rbistudentt} generates random deviates (\eqn{n} by 2 matrix).
+
+
+}
+% \author{ T. W. Yee }
+\details{
+
+% The default arguments correspond to the standard bivariate Student-t
+% distribution with correlation parameter \eqn{\rho = 0}{rho = 0}.
+% That is, two independent standard Student-t distibutions.
+% Let \code{sd1} be \code{sqrt(var1)} and
+% written \eqn{\sigma_1}{sigma_1}, etc.
+% Then the general formula for the correlation coefficient is
+% \eqn{\rho = cov / (\sigma_1 \sigma_2)}{rho = cov / (sigma_1 * sigma_2)}
+% where \eqn{cov} is argument \code{cov12}.
+% Thus if arguments \code{var1} and \code{var2} are left alone then
+% \code{cov12} can be inputted with \eqn{\rho}{rho}.
+
+
+  One can think of this function as an extension of
+  \code{\link[stats]{dt}} to two dimensions.
+  See \code{\link{bistudentt}} for more information.
+
+
+}
+\references{
+
+Schepsmeier, U. and Stober, J. (2013)
+Derivatives and Fisher information of bivariate copulas.
+\emph{Statistical Papers}.
+
+
+}
+
+
+%\section{Warning}{
+%
+%
+%}
+
+
+
+
+%\note{
+%  For \code{rbistudentt()},
+%  if the \eqn{i}th variance-covariance matrix is not
+%  positive-definite then the \eqn{i}th row is all \code{NA}s.
+
+
+
+%}
+\seealso{
+  \code{\link{bistudentt}},
+  \code{\link[stats]{dt}}.
+
+
+}
+
+\examples{
+\dontrun{ N <- 101; x <- seq(-4, 4, len = N); Rho <- 0.7; mydf <- 10
+ox <- expand.grid(x, x)
+zedd <- dbistudentt(ox[, 1], ox[, 2], df = mydf, rho = Rho, log = TRUE)
+contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5)
+}
+}
+
+\keyword{distribution}
+
+
diff --git a/man/bivgamma.mckay.Rd b/man/bivgamma.mckay.Rd
index 3d8f874..8b96b2f 100644
--- a/man/bivgamma.mckay.Rd
+++ b/man/bivgamma.mckay.Rd
@@ -1,5 +1,5 @@
-\name{bivgamma.mckay}
-\alias{bivgamma.mckay}
+\name{bigamma.mckay}
+\alias{bigamma.mckay}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Bivariate Gamma: McKay's Distribution }
 \description{
@@ -8,9 +8,9 @@
 
 }
 \usage{
-bivgamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
-               iscale = NULL, ishape1 = NULL, ishape2 = NULL,
-               imethod = 1, zero = 1)
+bigamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
+              iscale = NULL, ishape1 = NULL, ishape2 = NULL,
+              imethod = 1, zero = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -91,6 +91,7 @@ Balakrishnan, N. and Lai, C.-D. (2009)
 2nd edition.
 New York: Springer.
 
+
 }
 \author{ T. W. Yee }
 \note{
@@ -116,13 +117,14 @@ New York: Springer.
 \seealso{
   \code{\link{gamma2}}.
 
+
 }
 \examples{
-shape1 <- exp(1); shape2 = exp(2); scalepar = exp(3)
+shape1 <- exp(1); shape2 <- exp(2); scalepar <- exp(3)
 mdata <- data.frame(y1 = rgamma(nn <- 1000, shape = shape1, scale = scalepar))
 mdata <- transform(mdata, zedd = rgamma(nn, shape = shape2, scale = scalepar))
 mdata <- transform(mdata, y2 = y1 + zedd)  # Z is defined as Y2-y1|Y1=y1
-fit <- vglm(cbind(y1, y2) ~ 1, bivgamma.mckay, mdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, bigamma.mckay, mdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 vcov(fit)
diff --git a/man/brat.Rd b/man/brat.Rd
index fdf87d3..12f5c7a 100644
--- a/man/brat.Rd
+++ b/man/brat.Rd
@@ -145,11 +145,11 @@ dimnames(mat) <- list(winner = journal, loser = journal)
 fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE)
 fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE, crit = "coef")
 summary(fit)
-c(0, coef(fit)) # Log-abilities (in order of "journal")
-c(1, Coef(fit)) # Abilities (in order of "journal")
+c(0, coef(fit))  # Log-abilities (in order of "journal")
+c(1, Coef(fit))  # Abilities (in order of "journal")
 fitted(fit)     # Probabilities of winning in awkward form
-(check <- InverseBrat(fitted(fit))) # Probabilities of winning 
-check + t(check) # Should be 1's in the off-diagonals 
+(check <- InverseBrat(fitted(fit)))  # Probabilities of winning 
+check + t(check)  # Should be 1's in the off-diagonals 
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/bratUC.Rd b/man/bratUC.Rd
index f569270..89f2f80 100644
--- a/man/bratUC.Rd
+++ b/man/bratUC.Rd
@@ -9,7 +9,7 @@
 
 }
 \usage{
-Brat(mat, ties = 0 * mat, string = c(">","=="), whitespace = FALSE)
+Brat(mat, ties = 0 * mat, string = c(">", "=="), whitespace = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -96,12 +96,12 @@ Agresti, A. (2002)
 
 }
 \examples{
-journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
-mat = matrix(c( NA, 33, 320, 284,   730, NA, 813, 276,
-             498, 68,  NA, 325,   221, 17, 142, NA), 4, 4)
-dimnames(mat) = list(winner = journal, loser = journal)
-Brat(mat) # Less readable
-Brat(mat, whitespace = TRUE) # More readable
+journal <- c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
+mat <- matrix(c( NA, 33, 320, 284,   730, NA, 813, 276,
+                498, 68,  NA, 325,   221, 17, 142, NA), 4, 4)
+dimnames(mat) <- list(winner = journal, loser = journal)
+Brat(mat)  # Less readable
+Brat(mat, whitespace = TRUE)  # More readable
 vglm(Brat(mat, whitespace = TRUE) ~ 1, brat, trace = TRUE)
 }
 \keyword{models}
diff --git a/man/bratt.Rd b/man/bratt.Rd
index 79f559e..34d5b6b 100644
--- a/man/bratt.Rd
+++ b/man/bratt.Rd
@@ -141,26 +141,26 @@ mat <- matrix(c( NA, 33, 320, 284,
 dimnames(mat) <- list(winner = journal, loser = journal)
 
 # Add some ties. This is fictitional data.
-ties = 5 + 0*mat
-ties[2,1] = ties[1,2] = 9
+ties <- 5 + 0 * mat
+ties[2, 1] <- ties[1,2] <- 9
 
 # Now fit the model
 fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE)
 fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE, crit = "coef")
 
 summary(fit)
-c(0, coef(fit)) # Log-abilities (in order of "journal"); last is log(alpha0)
-c(1, Coef(fit)) # Abilities (in order of "journal"); last is alpha0
+c(0, coef(fit))  # Log-abilities (in order of "journal"); last is log(alpha0)
+c(1, Coef(fit))  #     Abilities (in order of "journal"); last is alpha0
 
-fit at misc$alpha # alpha_1,...,alpha_M
-fit at misc$alpha0 # alpha_0
+fit at misc$alpha   # alpha_1,...,alpha_M
+fit at misc$alpha0  # alpha_0
 
-fitted(fit) # Probabilities of winning and tying, in awkward form
+fitted(fit)  # Probabilities of winning and tying, in awkward form
 predict(fit)
-(check <- InverseBrat(fitted(fit)))   # Probabilities of winning 
-qprob <- attr(fitted(fit), "probtie") # Probabilities of a tie 
-qprobmat <- InverseBrat(c(qprob), NCo=nrow(ties)) # Probabilities of a tie 
-check + t(check) + qprobmat # Should be 1's in the off-diagonals 
+(check <- InverseBrat(fitted(fit)))    # Probabilities of winning 
+qprob <- attr(fitted(fit), "probtie")  # Probabilities of a tie 
+qprobmat <- InverseBrat(c(qprob), NCo = nrow(ties))  # Probabilities of a tie
+check + t(check) + qprobmat  # Should be 1s in the off-diagonals 
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/calibrate.Rd b/man/calibrate.Rd
index 6416a69..25c44ca 100644
--- a/man/calibrate.Rd
+++ b/man/calibrate.Rd
@@ -71,7 +71,7 @@ calibrate(object, ...)
 
 \examples{
 \dontrun{
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 set.seed(123)
 p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
           WaterCon + BareSand + FallTwig + 
@@ -81,13 +81,14 @@ p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
           Bestof = 3, Crow1positive = TRUE)
 
 siteNos <- 1:2  # Calibrate these sites
-cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos,]), trace = TRUE)
+cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
 
 # Graphically compare the actual site scores with their calibrated values
 persp(p1, main = "Solid=actual, dashed=calibrated site scores",
       label = TRUE, col = "blue", las = 1)
-abline(v = lv(p1)[siteNos], lty = 1, col = 1:length(siteNos)) # actual site scores
-abline(v = cp1, lty = 2, col = 1:length(siteNos)) # calibrated values
+# Actual site scores:
+abline(v = latvar(p1)[siteNos], lty = 1, col = 1:length(siteNos))
+abline(v = cp1, lty = 2, col = 1:length(siteNos))  # Calibrated values
 }
 }
 \keyword{models}
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index 012dda2..c8c0aa2 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -1,7 +1,7 @@
 \name{calibrate.qrrvglm}
 \alias{calibrate.qrrvglm}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Calibration for CQO, UQO and CAO models }
+\title{ Calibration for CQO and CAO models }
 \description{
   Performs maximum likelihood calibration for constrained and
   unconstrained quadratic and additive ordination models (CQO and CAO
@@ -10,7 +10,7 @@
 }
 \usage{
 calibrate.qrrvglm(object, newdata = NULL,
-        type = c("lv", "predictors", "response", "vcov", "all3or4"),
+        type = c("latvar", "predictors", "response", "vcov", "all3or4"),
         initial.vals = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -84,7 +84,7 @@ calibrate.qrrvglm(object, newdata = NULL,
   Each component has length \code{nrow(newdata)}.
 
 
-  \item{lv}{Calibrated latent variables or site scores.
+  \item{latvar}{Calibrated latent variables or site scores.
 
 
   }
@@ -124,10 +124,14 @@ Cambridge.
 }
 \author{T. W. Yee}
 \note{
-  Despite the name of this function, UQO and CAO models are handled 
+  Despite the name of this function, CAO models are handled 
   as well.
 
 
+
+% Despite the name of this function, UQO and CAO models are handled 
+
+
 }
 \section{Warning }{
   This function is computationally expensive.
@@ -140,14 +144,14 @@ Cambridge.
   \code{\link{calibrate.qrrvglm.control}},
   \code{\link{calibrate}},
   \code{\link{cqo}},
-  \code{\link{uqo}},
   \code{\link{cao}}.
+% \code{\link{uqo}},
 
 
 }
 \examples{
 \dontrun{
-hspider[,1:6] <- scale(hspider[, 1:6]) # Standardize the environmental variables
+hspider[,1:6] <- scale(hspider[, 1:6])  # Standardize the environmental variables
 set.seed(123)
 p1 <- cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
          WaterCon + BareSand + FallTwig +
@@ -155,7 +159,7 @@ p1 <- cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
          family = poissonff, data = hspider, Rank = 1,
          IToler = TRUE, Crow1positive = TRUE)
 
-siteNos <- 1:2  # Calibrate these sites
+siteNos <- 3:4  # Calibrate these sites
 cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
 }
 
@@ -163,8 +167,9 @@ cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
 # Graphically compare the actual site scores with their calibrated values
 persp(p1, main = "Site scores: solid=actual, dashed=calibrated",
       label = TRUE, col = "blue", las = 1)
-abline(v = lv(p1)[siteNos], lty = 1, col = 1:length(siteNos)) # actual site scores
-abline(v = cp1, lty = 2, col = 1:length(siteNos)) # calibrated values
+# Actual site scores:
+abline(v = latvar(p1)[siteNos], lty = 1, col = 1:length(siteNos))
+abline(v = cp1, lty = 2, col = 1:length(siteNos))  # Calibrated values
 }
 }
 \keyword{models}
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index 4c253b2..3a42df2 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -1,7 +1,7 @@
 \name{calibrate.qrrvglm.control}
 \alias{calibrate.qrrvglm.control}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Control function for CQO/UQO/CAO calibration }
+\title{ Control function for CQO/CAO calibration }
 \description{
   Algorithmic constants and parameters for running
   \code{\link{calibrate.qrrvglm}} are set using this function.
@@ -9,12 +9,16 @@
 }
 \usage{
 calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
-    gridSize = if (Rank == 1) 9 else 5, varlvI = FALSE, ...)
+    gridSize = if (Rank == 1) 9 else 5, varI.latvar = FALSE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{object}{
-    The fitted CQO/UQO/CAO model. The user should ignore this argument.
+    The fitted CQO/CAO model. The user should ignore this argument.
+
+
+%   The fitted CQO/UQO/CAO model. The user should ignore this argument.
+
 
   }
   \item{trace}{ 
@@ -22,11 +26,13 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
     is a good idea to set this argument to be \code{TRUE} since the
     computations are expensive.
 
+
   }
 \item{Method.optim}{ 
   Character. Fed into the \code{method} argument of
   \code{\link[stats]{optim}}.
 
+
   }
 \item{gridSize}{ 
   Numeric, recycled to length \code{Rank}.  Controls the resolution
@@ -38,15 +44,18 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
   the chance of obtaining the global solution, however, the computing
   time increases proportionately.
 
+
   }
-  \item{varlvI}{ 
+  \item{varI.latvar}{ 
   Logical. For CQO objects only, this argument is fed into
   \code{\link{Coef.qrrvglm}}.
 
-    }
+
+  }
   \item{\dots}{ 
   Avoids an error message for extraneous arguments.
 
+
   }
 }
 \details{
@@ -55,17 +64,19 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
   call to \code{\link{calibrate.qrrvglm}}, not this function
   directly.
 
+
   }
 \value{
   A list which with the following components.
-  \item{trace }{Numeric (even though the input can be logical). }
-  \item{gridSize }{Positive integer. }
-  \item{varlvI }{Logical.}
+  \item{trace}{Numeric (even though the input can be logical). }
+  \item{gridSize}{Positive integer. }
+  \item{varI.latvar}{Logical.}
+
 
 }
 \references{
 
-Yee, T. W. (2012)
+Yee, T. W. (2013)
 On constrained and unconstrained quadratic ordination.
 \emph{Manuscript in preparation}.
 
@@ -73,11 +84,16 @@ On constrained and unconstrained quadratic ordination.
 }
 \author{T. W. Yee}
 \note{
-  Despite the name of this function, UQO and CAO models are handled
+  Despite the name of this function, CAO models are handled
   as well.
 
+
+% Despite the name of this function, UQO and CAO models are handled
+
 }
 
+
+
 \seealso{
   \code{\link{calibrate.qrrvglm}},
   \code{\link{Coef.qrrvglm}}.
@@ -85,16 +101,16 @@ On constrained and unconstrained quadratic ordination.
 
 }
 \examples{
-\dontrun{ hspider[,1:6] <- scale(hspider[,1:6]) # Needed when ITol = TRUE
+\dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6])  # Needed when ITol = TRUE
 set.seed(123)
 p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr, 
                 Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig +
           CoveMoss + CoveHerb + ReflLux,
           family = poissonff, data = hspider, ITol = TRUE)
-sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 
-siteNos <- 1:2  # Calibrate these sites
+siteNos <- 3:4  # Calibrate these sites
 cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
 }
 
@@ -102,8 +118,9 @@ cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
 # Graphically compare the actual site scores with their calibrated values
 persp(p1, main = "Site scores: solid=actual, dashed=calibrated",
       label = TRUE, col = "blue", las = 1)
-abline(v = lv(p1)[siteNos], lty = 1, col = 1:length(siteNos)) # actual site scores
-abline(v = cp1, lty = 2, col = 1:length(siteNos)) # calibrated values
+abline(v = latvar(p1)[siteNos], lty = 1,
+       col = 1:length(siteNos))  # Actual site scores
+abline(v = cp1, lty = 2, col = 1:length(siteNos))  # Calibrated values
 }
 }
 \keyword{models}
diff --git a/man/cao.Rd b/man/cao.Rd
index a665c1b..8d13d63 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -29,6 +29,7 @@ cao(formula, family, data = list(),
     of the formula contains the response variables, which should be a
     matrix with each column being a response (species).
 
+
   }
   \item{family}{ 
   a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
@@ -38,6 +39,7 @@ cao(formula, family, data = list(),
   type of function.
     See \code{\link{cqo}} for a list of those presently implemented.
 
+
   }
 
   \item{data}{
@@ -45,17 +47,20 @@ cao(formula, family, data = list(),
     By default the variables are taken from \code{environment(formula)},
     typically the environment from which \code{cao} is called.
 
+
   }
   \item{weights}{ 
     an optional vector or matrix of (prior) weights to be used in the
     fitting process.  For \code{cao}, this argument currently should
     not be used.
 
+
   }
   \item{subset}{ 
     an optional logical vector specifying a subset of observations to
     be used in the fitting process.
 
+
   }
   \item{na.action}{ 
     a function which indicates what should happen when the data contain
@@ -63,34 +68,40 @@ cao(formula, family, data = list(),
     \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
     The ``factory-fresh'' default is \code{na.omit}.
 
+
   }
   \item{etastart}{ 
     starting values for the linear predictors.  It is a \eqn{M}-column
     matrix. If \eqn{M=1} then it may be a vector.  For \code{cao},
     this argument currently should not be used.
 
+
     }
   \item{mustart}{ 
     starting values for the fitted values. It can be a vector or a
     matrix.  Some family functions do not make use of this argument.
     For \code{cao}, this argument currently should not be used.
 
+
   }
   \item{coefstart}{ 
     starting values for the coefficient vector.  For \code{cao}, this
     argument currently should not be used.
 
+
   }
   \item{control}{ 
     a list of parameters for controlling the fitting process.
     See \code{\link{cao.control}} for details.
 
+
   }
   \item{offset}{ 
     a vector or \eqn{M}-column matrix of offset values.  These are
     \emph{a priori} known and are added to the linear predictors during
     fitting.  For \code{cao}, this argument currently should not be used.
 
+
   }
   \item{method}{
     the method to be used in fitting the model.  The default
@@ -98,11 +109,13 @@ cao(formula, family, data = list(),
     reweighted least squares (IRLS) within FORTRAN code called from
     \code{\link[stats]{optim}}.
 
+
   }
   \item{model}{ 
     a logical value indicating whether the \emph{model frame} should be
     assigned in the \code{model} slot.
 
+
   }
   \item{x.arg, y.arg}{ 
     logical values indicating whether the model matrix and response
@@ -110,11 +123,13 @@ cao(formula, family, data = list(),
     \code{x} and \code{y} slots.  Note the model matrix is the linear
     model (LM) matrix.
 
+
   }
   \item{contrasts}{ 
     an optional list. See the \code{contrasts.arg} of
     \code{\link{model.matrix.default}}.
 
+
   }
   \item{constraints}{ 
     an optional list  of constraint matrices.  For \code{cao}, this
@@ -127,25 +142,30 @@ cao(formula, family, data = list(),
     is used it must contain \emph{all} the terms; an incomplete list is
     not accepted.
 
+
   }
   \item{extra}{ 
     an optional list with any extra information that might be needed by
     the family function.  For \code{cao}, this argument currently should
     not be used.
 
+
   }
   \item{qr.arg}{ 
     For \code{cao}, this argument currently should not be used.
 
+
   }
   \item{smart}{ 
     logical value indicating whether smart prediction
     (\code{\link{smartpred}}) will be used.
 
+
   }
   \item{\dots}{ 
     further arguments passed into \code{\link{cao.control}}.
 
+
   }
 }
 \details{
@@ -225,9 +245,10 @@ cao(formula, family, data = list(),
   An object of class \code{"cao"}
   (this may change to \code{"rrvgam"} in the future).
   Several generic functions can be applied to the object, e.g.,
-  \code{\link{Coef}}, \code{\link{ccoef}}, \code{\link{lvplot}},
+  \code{\link{Coef}}, \code{\link{concoef}}, \code{\link{lvplot}},
   \code{\link{summary}}.
 
+
 }
 
 \references{
@@ -242,9 +263,9 @@ Constrained additive ordination.
 }   
 \author{T. W. Yee}
 \note{
-  CAO models are computationally expensive, therefore setting \code{trace
-  = TRUE} is a good idea, as well as running it on a simple random sample
-  of the data set instead.
+  CAO models are computationally expensive, therefore
+  setting \code{trace = TRUE} is a good idea, as well as running it
+  on a simple random sample of the data set instead.
 
 
   Sometimes the IRLS algorithm does not converge within the FORTRAN
@@ -289,10 +310,9 @@ Constrained additive ordination.
   \code{\link{cao.control}},
   \code{Coef.cao},
   \code{\link{cqo}},
-  \code{\link{lv}},
+  \code{\link{latvar}},
   \code{\link{Opt}},
   \code{\link{Max}},
-  \code{\link{lv}},
   \code{persp.cao},
   \code{\link{poissonff}},
   \code{\link{binomialff}},
@@ -306,31 +326,30 @@ Constrained additive ordination.
 
 \examples{
 \dontrun{
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
-set.seed(149) # For reproducible results 
+hspider[, 1:6] <- scale(hspider[, 1:6])  # Standardized environmental vars
+set.seed(149)  # For reproducible results 
 ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
-           WaterCon + BareSand + FallTwig +
-           CoveMoss + CoveHerb + ReflLux,
+           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
            family = poissonff, data = hspider, Rank = 1,
-           df1.nl = c(Pardpull=2.7, 2.5),
+           df1.nl = c(Pardpull= 2.7, 2.5),
            Bestof = 7, Crow1positive = FALSE)
-sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
+sort(ap1 at misc$deviance.Bestof)  # A history of all the iterations
 
 Coef(ap1)
-ccoef(ap1)
+concoef(ap1)
 
 par(mfrow = c(2, 2))
-plot(ap1)   # All the curves are unimodal; some quite symmetric
+plot(ap1)  # All the curves are unimodal; some quite symmetric
 
 par(mfrow = c(1, 1), las = 1)
 index <- 1:ncol(depvar(ap1))
 lvplot(ap1, lcol = index, pcol = index, y = TRUE)
 
 trplot(ap1, label = TRUE, col = index)
-abline(a=0, b = 1, lty = 2)
+abline(a = 0, b = 1, lty = 2)
 
-trplot(ap1, label = TRUE, col = "blue", log = "xy", whichSp = c(1,3))
-abline(a=0, b = 1, lty = 2)
+trplot(ap1, label = TRUE, col = "blue", log = "xy", which.sp = c(1, 3))
+abline(a = 0, b = 1, lty = 2)
 
 persp(ap1, col = index, lwd = 2, label = TRUE)
 abline(v = Opt(ap1), lty = 2, col = index)
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index 9c06465..c816b58 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -17,8 +17,9 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
             SmallNo = 5.0e-13, Use.Init.Poisson.QO = TRUE,
             Bestof = if (length(Cinit)) 1 else 10, maxitl = 10,
             imethod = 1, bf.epsilon = 1.0e-7, bf.maxit = 10,
-            Maxit.optim = 250, optim.maxit = 20, SD.sitescores = 1.0,
-            SD.Cinit = 0.02, trace = TRUE, df1.nl = 2.5, df2.nl = 2.5,
+            Maxit.optim = 250, optim.maxit = 20, sd.sitescores = 1.0,
+            sd.Cinit = 0.02, suppress.warnings = TRUE,
+            trace = TRUE, df1.nl = 2.5, df2.nl = 2.5,
             spar1 = 0, spar2 = 0, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -30,43 +31,46 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 
   }
   \item{all.knots}{
-    Logical indicating if all distinct points of the smoothing variables
-    are to be used as knots.  Assigning the value \code{FALSE} means
-    fewer knots are chosen when the number of distinct points is large,
-    meaning less computational expense. See \code{\link{vgam.control}}
-    for details.
+    Logical indicating if all distinct points of the smoothing
+    variables are to be used as knots.  Assigning the value
+    \code{FALSE} means fewer knots are chosen when the number
+    of distinct points is large, meaning less computational
+    expense. See \code{\link{vgam.control}} for details.
 
   }
   \item{criterion}{ 
-    Convergence criterion. Currently, only one is supported: the deviance
-    is minimized.
+    Convergence criterion. Currently, only one is supported:
+    the deviance is minimized.
 
   }
   \item{Cinit}{
-    Optional initial \bold{C} matrix which may speed up convergence.
+    Optional initial \bold{C} matrix which may speed up
+    convergence.
 
   }
   \item{Crow1positive}{ 
-    Logical vector of length \code{Rank} (recycled if necessary): are
-    the elements of the first row of \bold{C} positive?  For example,
-    if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE,
-    TRUE)} will force \bold{C[1,1]} and \bold{C[1,3]} to be negative,
-    and \bold{C[1,2]} and \bold{C[1,4]} to be positive.
+    Logical vector of length \code{Rank} (recycled if
+    necessary): are the elements of the first row of \bold{C}
+    positive?  For example, if \code{Rank} is 4, then specifying
+    \code{Crow1positive = c(FALSE, TRUE)} will force \bold{C[1,1]}
+    and \bold{C[1,3]} to be negative, and \bold{C[1,2]} and
+    \bold{C[1,4]} to be positive.
 
   }
     \item{epsilon}{
-    Positive numeric. Used to test for convergence for GLMs fitted in
-    FORTRAN.  Larger values mean a loosening of the convergence criterion.
+    Positive numeric. Used to test for convergence for GLMs
+    fitted in FORTRAN.  Larger values mean a loosening of the
+    convergence criterion.
 
 %   Used only if \code{FastAlgorithm} is \code{TRUE}.
 
     }
   \item{Etamat.colmax}{
-    Positive integer, no smaller than \code{Rank}.  Controls the amount
-    of memory used by \code{.Init.Poisson.QO()}.  It is the maximum
-    number of columns allowed for the pseudo-response and its weights.
-    In general, the larger the value, the better the initial value.
-    Used only if \code{Use.Init.Poisson.QO = TRUE}.
+    Positive integer, no smaller than \code{Rank}.  Controls the
+    amount of memory used by \code{.Init.Poisson.QO()}.  It is the
+    maximum number of columns allowed for the pseudo-response and
+    its weights.  In general, the larger the value, the better the
+    initial value.  Used only if \code{Use.Init.Poisson.QO = TRUE}.
 
   }
 
@@ -80,8 +84,10 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 \item{GradientFunction}{
   Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr}
   is used or not, i.e., to compute gradient values.  Used only if
-  \code{FastAlgorithm} is \code{TRUE}.  Currently, this argument must
-  be set to \code{FALSE}.
+  \code{FastAlgorithm} is \code{TRUE}.  Currently, this argument
+  must be set to \code{FALSE}.
+
+
 
 }
   \item{iKvector, iShape}{
@@ -105,10 +111,11 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %}
 
   \item{noRRR}{
-    Formula giving terms that are \emph{not} to be included in the
-    reduced-rank regression (or formation of the latent variables).
-    The default is to omit the intercept term from the latent variables.
-    Currently, only \code{noRRR = ~ 1} is implemented.
+  Formula giving terms that are \emph{not} to be included in the
+  reduced-rank regression (or formation of the latent variables).
+  The default is to omit the intercept term from the latent
+  variables.  Currently, only \code{noRRR = ~ 1} is implemented.
+
 
   }
   \item{Norrr}{
@@ -131,16 +138,21 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 % }
 
   \item{SmallNo}{
-   Positive numeric between \code{.Machine$double.eps} and \code{0.0001}.
-   Used to avoid under- or over-flow in the IRLS algorithm.
+   Positive numeric between \code{.Machine$double.eps} and
+   \code{0.0001}.  Used to avoid under- or over-flow in the
+   IRLS algorithm.
+
 
 %  Used only if \code{FastAlgorithm} is \code{TRUE}.
 
+
   }
   \item{Use.Init.Poisson.QO }{
-    Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO} is
-    used to obtain initial values for the canonical coefficients \bold{C}.
-    If \code{FALSE} then random numbers are used instead.
+  Logical. If \code{TRUE} then the function
+  \code{.Init.Poisson.QO} is used to obtain initial values
+  for the canonical coefficients \bold{C}.  If \code{FALSE}
+  then random numbers are used instead.
+
 
   }
 
@@ -153,11 +165,13 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
     The default is only a convenient minimal number and users are urged
     to increase this value.
 
+
   }
   \item{maxitl}{ 
     Positive integer. Maximum number of
     Newton-Raphson/Fisher-scoring/local-scoring iterations allowed.
 
+
   }
   \item{imethod}{
   See \code{\link{qrrvglm.control}}.
@@ -192,20 +206,26 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %   Currently this argument must have the value \code{FALSE}.
 
 % }
-  \item{SD.sitescores}{ 
+  \item{sd.sitescores}{ 
     Numeric. Standard deviation of the
     initial values of the site scores, which are generated from
     a normal distribution.
     Used when \code{Use.Init.Poisson.QO} is \code{FALSE}.
 
+
     }
-  \item{SD.Cinit}{ 
+  \item{sd.Cinit}{ 
     Standard deviation of the initial values for the elements
     of \bold{C}.
     These are normally distributed with mean zero.
     This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
 
   }   
+  \item{suppress.warnings}{ 
+    Logical. Suppress warnings?
+ 
+
+  }   
   \item{trace}{ 
     Logical indicating if output should be produced for each
     iteration. Having the value \code{TRUE} is a good idea for large
@@ -290,7 +310,7 @@ London: Chapman & Hall.
 }
 
 \examples{\dontrun{
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 set.seed(123)
 ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
            WaterCon + BareSand + FallTwig +
@@ -298,7 +318,7 @@ ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
            family = poissonff, data = hspider,
            df1.nl = c(Zoraspin = 2.3, 2.1),
            Bestof = 10, Crow1positive = FALSE)
-sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
+sort(ap1 at misc$deviance.Bestof)  # A history of all the iterations
 
 Coef(ap1)
 
@@ -306,7 +326,7 @@ par(mfrow = c(2, 3))  # All or most of the curves are unimodal; some are
 plot(ap1, lcol = "blue")  # quite symmetric. Hence a CQO model should be ok
 
 par(mfrow = c(1, 1), las = 1)
-index = 1:ncol(depvar(ap1))  # lvplot is jagged because only 28 sites
+index <- 1:ncol(depvar(ap1))  # lvplot is jagged because only 28 sites
 lvplot(ap1, lcol = index, pcol = index, y = TRUE)
 
 trplot(ap1, label = TRUE, col = index)
@@ -339,6 +359,6 @@ persp(ap1, label = TRUE, col = 1:4)
 %            bf.epsilon = 1.0e-7, bf.maxit = 40,
 %            Maxit.optim = 250, optim.maxit = 20,
 %%           se.fit = FALSE, 
-%            SD.sitescores = 1,
-%            SD.Cinit = 0.02, trace = TRUE,
+%            sd.sitescores = 1,
+%            sd.Cinit = 0.02, trace = TRUE,
 %%            df1.nl = 2.5, spar1 = 0, ...)
diff --git a/man/cardUC.Rd b/man/cardUC.Rd
index 9d2f938..d3fafc6 100644
--- a/man/cardUC.Rd
+++ b/man/cardUC.Rd
@@ -25,6 +25,7 @@ rcard(n, mu, rho, ...)
   \item{mu, rho}{
   See \code{\link{cardioid}} for more information.
 
+
   }
   \item{tolerance, maxits, ...}{
   The first two are control parameters for the algorithm
@@ -34,10 +35,12 @@ rcard(n, mu, rho, ...)
   \code{rcard} calls \code{qcard} so the \code{...} can be used
   to vary the two arguments.
 
+
   }
   \item{log}{
   Logical.
-  If \code{log=TRUE} then the logarithm of the density is returned.
+  If \code{log = TRUE} then the logarithm of the density is returned.
+
 
   }
 
@@ -47,40 +50,44 @@ rcard(n, mu, rho, ...)
   for estimating the two parameters by maximum likelihood estimation,
   for the formula of the probability density function and other details.
 
+
 }
 \value{
   \code{dcard} gives the density,
   \code{pcard} gives the distribution function,
   \code{qcard} gives the quantile function, and
   \code{rcard} generates random deviates.
+
+
 }
 %\references{ }
 \author{ Thomas W. Yee }
 \note{ 
-    Convergence problems might occur with \code{rcard}.
+  Convergence problems might occur with \code{rcard}.
+
 
 }
 
 \seealso{ 
-    \code{\link{cardioid}}.
+  \code{\link{cardioid}}.
+
 
 }
 \examples{
 \dontrun{
-mu = 4; rho = 0.4
-x = seq(0, 2*pi, len=501)
-plot(x, dcard(x, mu, rho), type="l", las=1, ylim=c(0,1), col="blue",
-     ylab=paste("[dp]card(mu=", mu, ", rho=", rho, ")"),
-     main="Blue is density, red is cumulative distribution function",
-     sub="Purple lines are the 10,20,...,90 percentiles")
-lines(x, pcard(x, mu, rho), col="red")
-
-probs = seq(0.1, 0.9, by=0.1)
-Q = qcard(probs, mu, rho)
-lines(Q, dcard(Q, mu, rho), col="purple", lty=3, type="h")
-lines(Q, pcard(Q, mu, rho), col="purple", lty=3, type="h")
-abline(h=c(0,probs,1), v=c(0,2*pi), col="purple", lty=3)
-max(abs(pcard(Q, mu, rho) - probs)) # Should be 0
+mu <- 4; rho <- 0.4; x <- seq(0, 2*pi, len = 501)
+plot(x, dcard(x, mu, rho), type = "l", las = 1, ylim = c(0, 1), col = "blue",
+     ylab = paste("[dp]card(mu=", mu, ", rho=", rho, ")"),
+     main = "Blue is density, orange is cumulative distribution function",
+     sub = "Purple lines are the 10,20,...,90 percentiles")
+lines(x, pcard(x, mu, rho), col = "orange")
+
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qcard(probs, mu, rho)
+lines(Q, dcard(Q, mu, rho), col = "purple", lty = 3, type = "h")
+lines(Q, pcard(Q, mu, rho), col = "purple", lty = 3, type = "h")
+abline(h = c(0,probs, 1), v = c(0, 2*pi), col = "purple", lty = 3)
+max(abs(pcard(Q, mu, rho) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index 2c55a7a..cf4f833 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -18,16 +18,19 @@ cardioid(lmu = elogit(min = 0, max = 2*pi),
   and \eqn{\rho}{rho} parameters, respectively.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu, irho}{
   Initial values.
   A \code{NULL} means an initial value is chosen internally.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{nsimEIM, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 
 }
@@ -72,6 +75,7 @@ Singapore: World Scientific.
 \note{
  Fisher scoring using simulation is used.
 
+
 }
 \section{Warning }{
   Numerically, this distribution can be difficult to fit because of a
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index 1169e68..b7565a2 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -85,7 +85,7 @@ cauchit(theta, bvalue = .Machine$double.eps,
 \examples{
 p <- seq(0.01, 0.99, by=0.01)
 cauchit(p)
-max(abs(cauchit(cauchit(p), inverse = TRUE) - p)) # Should be 0
+max(abs(cauchit(cauchit(p), inverse = TRUE) - p))  # Should be 0
 
 p <- c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by = 0.01))
 cauchit(p)  # Has no NAs
@@ -95,7 +95,7 @@ par(mfrow = c(2, 2), lwd = (mylwd <- 2))
 y <- seq(-4, 4, length = 100)
 p <- seq(0.01, 0.99, by = 0.01)
 
-for(d in 0:1) {
+for (d in 0:1) {
   matplot(p, cbind(logit(p, deriv = d), probit(p, deriv = d)),
           type = "n", col = "purple", ylab = "transformation",
           las = 1, main = if (d == 0) "Some probability link functions"
@@ -112,7 +112,7 @@ for(d in 0:1) {
     abline(v = 0.5, lty = "dashed")
 }
 
-for(d in 0) {
+for (d in 0) {
   matplot(y, cbind( logit(y, deriv = d, inverse = TRUE),
                    probit(y, deriv = d, inverse = TRUE)),
           type  = "n", col = "purple", xlab = "transformation", ylab = "p",
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index fe99e07..102755b 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -92,9 +92,9 @@ cauchy1(scale.arg = 1, llocation = "identity",
 
 \references{ 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 Barnett, V. D. (1966)
@@ -144,7 +144,7 @@ cdata1 <- transform(cdata1, loc = exp(1+0.5*x), scale = exp(1))
 cdata1 <- transform(cdata1, y = rcauchy(nn, loc, scale))
 fit <- vglm(y ~ x, cauchy(lloc = "loge"), cdata1, trace = TRUE)
 coef(fit, matrix = TRUE)
-head(fitted(fit)) # Location estimates
+head(fitted(fit))  # Location estimates
 summary(fit)
 
 # Location parameter unknown
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
index 63e9a07..2920eff 100644
--- a/man/cdf.lmscreg.Rd
+++ b/man/cdf.lmscreg.Rd
@@ -81,7 +81,7 @@ The CDF values of the model have been placed in
 \examples{
 fit <- vgam(BMI ~ s(age, df=c(4, 2)), lms.bcn(zero = 1), data = bmi.nz)
 head(fit at post$cdf)
-head(cdf(fit)) # Same 
+head(cdf(fit))  # Same 
 head(depvar(fit))
 head(fitted(fit))
 
diff --git a/man/cennormal1.Rd b/man/cennormal.Rd
similarity index 71%
rename from man/cennormal1.Rd
rename to man/cennormal.Rd
index 153f3e0..75d4e09 100644
--- a/man/cennormal1.Rd
+++ b/man/cennormal.Rd
@@ -1,4 +1,6 @@
-\name{cennormal1}
+\name{cennormal}
+\alias{cennormal}
+% 20131111: just for \pkg{cg}:
 \alias{cennormal1}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Censored Normal Distribution }
@@ -6,9 +8,10 @@
   Maximum likelihood estimation for the normal distribution with
   left and right censoring.
 
+
 }
 \usage{
-cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
+cennormal(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -19,11 +22,13 @@ cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
   The standard deviation is a positive quantity, therefore a log link 
   is the default.
 
+
   }
   \item{imethod}{
   Initialization method. Either 1 or 2, this specifies
   two methods for obtaining initial values for the parameters.
 
+
   }
   \item{zero}{
   An integer vector, containing the value 1 or 2. If so,
@@ -32,10 +37,11 @@ cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
   Setting \code{zero = NULL} means both linear/additive predictors
   are modelled as functions of the explanatory variables.
 
+
   }
 }
 \details{
-  This function is like \code{\link{normal1}} but handles observations
+  This function is like \code{\link{uninormal}} but handles observations
   that are left-censored (so that the true value would be less than
   the observed value) else right-censored (so that the true value would be
   greater than the observed value). To indicate which type of censoring,
@@ -54,6 +60,7 @@ cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 %\references{
 
@@ -64,35 +71,41 @@ cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
   This function is an alternative to \code{\link{tobit}}
   but cannot handle a matrix response
   and uses different working weights.
-  If there are no censored observations then \code{\link{normal1}}
+  If there are no censored observations then \code{\link{uninormal}}
   is recommended instead.
 
 
+
+% Function \code{\link{cennormal1}} will be depreciated soon.
+% It is exactly the same as \code{\link{cennormal}}.
+
+
 }
 \seealso{
-    \code{\link{tobit}},
-    \code{\link{normal1}},
-    \code{\link{dcennormal1}}.
+  \code{\link{tobit}},
+  \code{\link{uninormal}},
+  \code{\link{double.cennormal}}.
+
 
 }
 
 \examples{
 \dontrun{
-cdata <- data.frame(x2 = runif(nn <- 1000)) # ystar are true values
+cdata <- data.frame(x2 = runif(nn <- 1000))  # ystar are true values
 cdata <- transform(cdata, ystar = rnorm(nn, m = 100 + 15 * x2, sd = exp(3)))
 with(cdata, hist(ystar))
-cdata <- transform(cdata, L = runif(nn,  80,  90), # Lower censoring points
-                          U = runif(nn, 130, 140)) # Upper censoring points
-cdata <- transform(cdata, y = pmax(L, ystar)) # Left  censored
-cdata <- transform(cdata, y = pmin(U, y))     # Right censored
+cdata <- transform(cdata, L = runif(nn,  80,  90),  # Lower censoring points
+                          U = runif(nn, 130, 140))  # Upper censoring points
+cdata <- transform(cdata, y = pmax(L, ystar))  # Left  censored
+cdata <- transform(cdata, y = pmin(U, y))      # Right censored
 with(cdata, hist(y))
 Extra <- list(leftcensored = with(cdata, ystar < L),
-             rightcensored = with(cdata, ystar > U))
-fit1 <- vglm(y ~ x2, cennormal1, cdata, crit = "c", extra = Extra, trace = TRUE)
+              rightcensored = with(cdata, ystar > U))
+fit1 <- vglm(y ~ x2, cennormal, cdata, crit = "c", extra = Extra, trace = TRUE)
 fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
             cdata, crit = "c", trace = TRUE)
 coef(fit1, matrix = TRUE)
-max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be 0
+max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE)))  # Should be 0
 names(fit1 at extra)
 }
 }
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index ed123db..0eca698 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -87,16 +87,16 @@ cdata <- transform(cdata, cy = pmin(U, y),
 cdata <- transform(cdata, status = ifelse(rcensored, 0, 1))
 with(cdata, table(cy))
 with(cdata, table(rcensored))
-with(cdata, table(ii <- print(SurvS4(cy, status)))) # Check; U+ means >= U
+with(cdata, table(ii <- print(SurvS4(cy, status))))  # Check; U+ means >= U
 fit <- vglm(SurvS4(cy, status) ~ 1, cenpoisson, cdata, trace = TRUE)
 coef(fit, matrix = TRUE)
-table(print(depvar(fit))) # Another check; U+ means >= U
+table(print(depvar(fit)))  # Another check; U+ means >= U
 
 
 # Example 2: left censored data
 L <- 15
 cdata <- transform(cdata, cY = pmax(L, y),
-                          lcensored = y <  L) # Note y < L, not cY == L or y <= L
+                          lcensored = y <  L)  # Note y < L, not cY == L or y <= L
 cdata <- transform(cdata, status = ifelse(lcensored, 0, 1))
 with(cdata, table(cY))
 with(cdata, table(lcensored))
@@ -108,17 +108,17 @@ coef(fit, matrix = TRUE)
 # Example 3: interval censored data
 cdata <- transform(cdata, Lvec = rep(L, len = N),
                           Uvec = rep(U, len = N))
-cdata <- transform(cdata, icensored = Lvec <= y & y < Uvec) # Not lcensored or rcensored
+cdata <- transform(cdata, icensored = Lvec <= y & y < Uvec)  # Not lcensored or rcensored
 with(cdata, table(icensored))
 cdata <- transform(cdata, status = rep(3, N))       # 3 means interval censored
-cdata <- transform(cdata, status = ifelse(rcensored, 0, status)) # 0 means right censored
-cdata <- transform(cdata, status = ifelse(lcensored, 2, status)) # 2 means left  censored
+cdata <- transform(cdata, status = ifelse(rcensored, 0, status))  # 0 means right censored
+cdata <- transform(cdata, status = ifelse(lcensored, 2, status))  # 2 means left  censored
 # Have to adjust Lvec and Uvec because of the (start, end] format:
 cdata$Lvec[with(cdata, icensored)] <- cdata$Lvec[with(cdata, icensored)] - 1
 cdata$Uvec[with(cdata, icensored)] <- cdata$Uvec[with(cdata, icensored)] - 1
-cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)] # Unchanged
-cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)] # Unchanged
-with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check
+cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)]  # Unchanged
+cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)]  # Unchanged
+with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval"))))  # Check
 
 fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
             cenpoisson, cdata, trace = TRUE)
@@ -129,10 +129,10 @@ table(print(depvar(fit)))  # Another check
 # Example 4: Add in some uncensored observations
 index <- (1:N)[with(cdata, icensored)]
 index <- head(index, 4)
-cdata$status[index] <- 1 # actual or uncensored value
+cdata$status[index] <- 1  # actual or uncensored value
 cdata$Lvec[index] <- cdata$y[index]
 with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status,
-                                     type = "interval")))) # Check
+                                     type = "interval"))))  # Check
 
 fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
             cenpoisson, cdata, trace = TRUE, crit = "c")
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
index 2e2da41..6215520 100644
--- a/man/cgumbel.Rd
+++ b/man/cgumbel.Rd
@@ -7,6 +7,7 @@
    when there are censored observations.
    A matrix response is not allowed.
 
+
 }
 \usage{
 cgumbel(llocation = "identity", lscale = "loge",
@@ -20,6 +21,7 @@ cgumbel(llocation = "identity", lscale = "loge",
   (positive) \eqn{scale} parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iscale}{
   Numeric and positive. 
@@ -27,17 +29,20 @@ cgumbel(llocation = "identity", lscale = "loge",
   In general, a larger value is better than a smaller value.
   The default is to choose the value internally.
 
+
   }
   \item{mean}{
   Logical. Return the mean? If \code{TRUE} then the mean is returned,
   otherwise percentiles given by the \code{percentiles} argument.
 
+
   }
   \item{percentiles}{
   Numeric with values between 0 and 100.
   If \code{mean=FALSE} then the fitted values are percentiles which must
   be specified by this argument.
 
+
   }
 
   \item{zero}{
@@ -63,22 +68,28 @@ cgumbel(llocation = "identity", lscale = "loge",
   values are taken to be \code{FALSE}.  The fitted object has these two
   components stored in the \code{extra} slot.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
+
+
 }
 \references{
+
 Coles, S. (2001)
 \emph{An Introduction to Statistical Modeling of Extreme Values}.
 London: Springer-Verlag.
 
+
 }
 \author{ T. W. Yee }
 \section{Warning}{
   Numerical problems may occur if the amount of censoring is excessive.
 
+
 }
 
 \note{ 
@@ -86,6 +97,7 @@ London: Springer-Verlag.
   The initial values are based on assuming all uncensored observations,
   therefore could be improved upon.
 
+
 }
 
 \seealso{
@@ -95,16 +107,18 @@ London: Springer-Verlag.
   \code{\link{guplot}},
   \code{\link{gev}},
   \code{\link{venice}}.
+
+
 }
 
 \examples{
 # Example 1
 ystar <- venice[["r1"]]  # Use the first order statistic as the response
 nn <- length(ystar)
-L <- runif(nn, 100, 104) # Lower censoring points
-U <- runif(nn, 130, 135) # Upper censoring points
-y <- pmax(L, ystar) # Left  censored
-y <- pmin(U, y)     # Right censored
+L <- runif(nn, 100, 104)  # Lower censoring points
+U <- runif(nn, 130, 135)  # Upper censoring points
+y <- pmax(L, ystar)  # Left  censored
+y <- pmin(U, y)      # Right censored
 extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
 fit <- vglm(y ~ scale(year), data = venice, trace = TRUE, extra = extra,
             cgumbel(mean = FALSE, perc = c(5, 25, 50, 75, 95)))
@@ -117,8 +131,8 @@ nn <- 1000
 ystar <- rgumbel(nn, loc = 1, scale = exp(0.5))  # The uncensored data
 L <- runif(nn, -1, 1)  # Lower censoring points
 U <- runif(nn,  2, 5)  # Upper censoring points
-y <- pmax(L, ystar) # Left  censored
-y <- pmin(U, y)     # Right censored
+y <- pmax(L, ystar)  # Left  censored
+y <- pmin(U, y)      # Right censored
 \dontrun{par(mfrow = c(1, 2)); hist(ystar); hist(y);}
 extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
 fit <- vglm(y ~ 1, trace = TRUE, extra = extra, cgumbel)
diff --git a/man/chinese.nz.Rd b/man/chinese.nz.Rd
index ffa1d79..fc3e120 100644
--- a/man/chinese.nz.Rd
+++ b/man/chinese.nz.Rd
@@ -22,7 +22,7 @@
   region starting in the mid-1800s to the gold fields of
   South Island of New Zealand,
   California,
-  and Southern Australia, etc.
+  and southern Australia, etc.
   Discrimination then meant that only men were allowed
   entry, to hinder permanent settlement.
   In the case of New Zealand, the government relaxed its
@@ -57,6 +57,8 @@
 \dontrun{ par(mfrow = c(1, 2))
 plot(female / (male + female) ~ year, chinese.nz, type = "b",
      ylab = "Proportion", col = "blue", las = 1,
+     cex = 0.015 * sqrt(male + female),
+#    cex = 0.10 * sqrt((male + female)^1.5 / sqrt(female) / sqrt(male)),
      main = "Proportion of NZ Chinese that are female")
 abline(h = 0.5, lty = "dashed", col = "gray")
 
@@ -64,11 +66,11 @@ fit1.cnz <- vglm(cbind(female, male) ~ year,          binomialff, chinese.nz)
 fit2.cnz <- vglm(cbind(female, male) ~ poly(year, 2), binomialff, chinese.nz)
 fit4.cnz <- vglm(cbind(female, male) ~   bs(year, 5), binomialff, chinese.nz)
 
-lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple")
-lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green")
-lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange", lwd = 2)
+lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple", lty = 1)
+lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green", lty = 2)
+lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange", lwd = 2, lty = 1)
 legend("bottomright", col = c("purple", "green", "orange"),
-       lty = 1, leg = c("linear", "quadratic", "B-spline"))
+       lty = c(1, 2, 1), leg = c("linear", "quadratic", "B-spline"))
 
 plot(100*(male+female)/nz ~ year, chinese.nz, type = "b", ylab = "Percent",
      ylim = c(0, max(100*(male+female)/nz)), col = "blue", las = 1,
diff --git a/man/chisq.Rd b/man/chisq.Rd
index 3d43f2e..51afbc5 100644
--- a/man/chisq.Rd
+++ b/man/chisq.Rd
@@ -33,9 +33,9 @@ chisq(link = "loge", zero = NULL)
 
 }
 \references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -50,7 +50,7 @@ New York: Wiley-Interscience, Third edition.
 
 \seealso{
   \code{\link[stats]{Chisquare}}.
-  \code{\link{normal1}}.
+  \code{\link{uninormal}}.
 
 
 }
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index 6cbef03..7564f9a 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -42,13 +42,16 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
   i.e., \code{log(-log(1 - theta))} when \code{inverse = FALSE}, and if
   \code{inverse = TRUE} then \code{1-exp(-exp(theta))},.
 
+
   For \code{deriv = 1}, then the function returns
   \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
+
   Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
   
+
 }
 \references{
     McCullagh, P. and Nelder, J. A. (1989)
@@ -87,15 +90,16 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
     \code{\link{probit}},
     \code{\link{cauchit}}.
 
+
 }
 \examples{
 p <- seq(0.01, 0.99, by = 0.01)
 cloglog(p)
-max(abs(cloglog(cloglog(p), inverse = TRUE) - p)) # Should be 0
+max(abs(cloglog(cloglog(p), inverse = TRUE) - p))  # Should be 0
 
 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-cloglog(p) # Has NAs
-cloglog(p, bvalue = .Machine$double.eps) # Has no NAs
+cloglog(p)  # Has NAs
+cloglog(p, bvalue = .Machine$double.eps)  # Has no NAs
 
 \dontrun{
 p <- seq(0.01, 0.99, by = 0.01)
@@ -106,21 +110,21 @@ lines(p, cloglog(p), col = "chocolate", lwd = 2)
 lines(p, cauchit(p), col = "tan", lwd = 2)
 abline(v = 0.5, h = 0, lty = "dashed")
 legend(0.1, 4, c("logit", "probit", "cloglog", "cauchit"),
-       col=c("limegreen","purple","chocolate", "tan"), lwd = 2)
+       col = c("limegreen", "purple", "chocolate", "tan"), lwd = 2)
 }
 
 \dontrun{
 # This example shows that a cloglog link is preferred over the logit
-n = 500; p = 5; S = 3; Rank = 1  # Species packing model:
-mydata = rcqo(n, p, S, EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE,
-              family = "binomial", hiabundance=5, seed = 123, Rank = Rank)
-fitc = cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata, 
-           fam = binomialff(mv = TRUE, link = "cloglog"), Rank = Rank)
-fitl = cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata, 
-           fam = binomialff(mv = TRUE, link = "logit"), Rank = Rank)
+n <- 500; p <- 5; S <- 3; Rank <- 1  # Species packing model:
+mydata <- rcqo(n, p, S, eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE,
+               family = "binomial", hi.abundance = 5, seed = 123, Rank = Rank)
+fitc <- cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+            fam = binomialff(mv = TRUE, link = "cloglog"), Rank = Rank)
+fitl <- cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+            fam = binomialff(mv = TRUE, link = "logit"), Rank = Rank)
 
 # Compare the fitted models (cols 1 and 3) with the truth (col 2)
-cbind(ccoef(fitc), attr(mydata, "ccoefficients"), ccoef(fitl))
+cbind(concoef(fitc), attr(mydata, "ccoefficients"), concoef(fitl))
 }
 }
 \keyword{math}
diff --git a/man/ccoef-methods.Rd b/man/concoef-methods.Rd
similarity index 72%
rename from man/ccoef-methods.Rd
rename to man/concoef-methods.Rd
index 3fc8d28..4570ad1 100644
--- a/man/ccoef-methods.Rd
+++ b/man/concoef-methods.Rd
@@ -1,6 +1,14 @@
-\name{ccoef-methods}
+\name{concoef-methods}
 \docType{methods}
-%\alias{ccoef,ANY-method}
+%\alias{concoef,ANY-method}
+\alias{concoef-method}
+\alias{concoef,cao-method}
+\alias{concoef,Coef.cao-method}
+\alias{concoef,rrvglm-method}
+\alias{concoef,qrrvglm-method}
+\alias{concoef,Coef.rrvglm-method}
+\alias{concoef,Coef.qrrvglm-method}
+%
 \alias{ccoef-method}
 \alias{ccoef,cao-method}
 \alias{ccoef,Coef.cao-method}
@@ -14,7 +22,7 @@
 %
 \title{ Constrained (Canonical) Coefficients }
 \description{
-  \code{ccoef} is a generic function used to return the constrained
+  \code{concoef} is a generic function used to return the constrained
   (canonical) coefficients of a  constrained ordination model.
   The function invokes particular methods which depend on the class of
   the first argument.
@@ -30,6 +38,8 @@
 \item{object}{
   The object from which the constrained coefficients are
   extracted.
+
+
 }
 
 }
diff --git a/man/ccoef.Rd b/man/concoef.Rd
similarity index 73%
rename from man/ccoef.Rd
rename to man/concoef.Rd
index 4b0d168..388d6cc 100644
--- a/man/ccoef.Rd
+++ b/man/concoef.Rd
@@ -1,15 +1,16 @@
-\name{ccoef}
+\name{concoef}
+\alias{concoef}
 \alias{ccoef}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Extract Model Constrained/Canonical Coefficients }
 \description{
-  \code{ccoef} is a generic function which extracts the constrained
+  \code{concoef} is a generic function which extracts the constrained
   (canonical) coefficients from objects returned by certain modelling
   functions.
 
 }
 \usage{
-ccoef(object, ...)
+concoef(object, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -64,6 +65,10 @@ Constrained additive ordination.
 %}
 
 \section{Warning }{
+  \code{\link{concoef}} and \code{\link{ccoef}} are identical,
+  but the latter will be deprecated soon.
+
+
   For QO models, there is a direct inverse relationship between the
   scaling of the latent variables (site scores) and the tolerances.
   One normalization is for the latent variables to have unit variance.
@@ -79,22 +84,22 @@ Constrained additive ordination.
 }
 
 \seealso{
-   \code{\link{ccoef-method}},
-   \code{ccoef.qrrvglm},
-   \code{ccoef.cao},
+   \code{\link{concoef-method}},
+   \code{concoef.qrrvglm},
+   \code{concoef.cao},
    \code{\link[stats]{coef}}.
 
 
 }
 \examples{
-\dontrun{ set.seed(111) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
-               Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
-               Trocterr, Zoraspin) ~
-         WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-         fam = quasipoissonff, data = hspider, Crow1positive = FALSE)
-ccoef(p1)
+\dontrun{ set.seed(111)  # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+                Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+                Trocterr, Zoraspin) ~
+          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+          fam = quasipoissonff, data = hspider, Crow1positive = FALSE)
+concoef(p1)
 }
 }
 \keyword{models}
diff --git a/man/constraints.Rd b/man/constraints.Rd
index 20a4f42..c781a01 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -11,7 +11,7 @@
 \usage{
 constraints(object, ...)
 constraints.vlm(object, type = c("lm", "term"), all = TRUE, which,
-                matrix.out = FALSE, ...)
+                matrix.out = FALSE, colnames.arg = TRUE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -43,6 +43,12 @@ constraints.vlm(object, type = c("lm", "term"), all = TRUE, which,
 
 
   }
+  \item{colnames.arg}{
+  Logical. If \code{TRUE} then column names are assigned
+  corresponding to the variables.
+
+
+  }
   \item{\dots}{
   Other possible arguments such as \code{type}.
 
@@ -157,21 +163,21 @@ pneumo <- transform(pneumo, let = log(exposure.time))
 (fit1 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
               cumulative(parallel = TRUE, reverse = TRUE), pneumo))
 coef(fit1, matrix = TRUE)
-constraints(fit1) # Parallel assumption results in this
-constraints(fit1, type = "term") # This is the same as the default ("vlm"-type)
+constraints(fit1)  # Parallel assumption results in this
+constraints(fit1, type = "term")  # This is the same as the default ("vlm"-type)
 is.parallel(fit1)
 
 # An equivalent model to fit1 (needs the type "term" constraints):
-clist.term <- constraints(fit1, type = "term") # The "term"-type constraints
+clist.term <- constraints(fit1, type = "term")  # The "term"-type constraints
 (fit2 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
               cumulative(reverse = TRUE), pneumo, constraints = clist.term))
 abs(max(coef(fit1, matrix = TRUE) -
-        coef(fit2, matrix = TRUE))) # Should be zero
+        coef(fit2, matrix = TRUE)))  # Should be zero
 
 # Fit a rank-1 stereotype (RR-multinomial logit) model:
 data(car.all)
 fit <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank = 1)
-constraints(fit) # All except the first are the estimated A matrix 
+constraints(fit)  # All except the first are the estimated A matrix 
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/corbet.Rd b/man/corbet.Rd
new file mode 100644
index 0000000..8be6ca8
--- /dev/null
+++ b/man/corbet.Rd
@@ -0,0 +1,57 @@
+\name{corbet}
+\alias{corbet}
+\docType{data}
+\title{
+  Corbet's Butterfly Data
+
+%%   ~~ data name/kind ... ~~
+}
+\description{
+  About 3300 individual butterflies were caught in Malaya
+  by naturalist Corbet trapping butterflies.
+  They were classified to about 500 species.
+
+%%  ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(corbet)}
+\format{
+  A data frame with 24 observations on the following 2 variables.
+\describe{
+    \item{\code{species}}{Number of species. }
+    \item{\code{ofreq}}{Observed frequency of individual
+      butterflies of that species. }
+  }
+}
+%%\format{
+%%  The format is:
+%% chr "corbet"
+%%}
+\details{
+  In the early 1940s Corbet spent two years trapping butterflies
+  in Malaya. Of interest was the total number of species.
+  Some species were so rare (e.g., 118 species had only
+  one specimen) that it was thought likely that there were
+  many unknown species.
+
+
+%%  ~~ If necessary, more details than the __description__ above
+}
+
+
+%%\source{
+%%  ~~ reference to a publication or URL from which the data were obtained ~~
+%%}
+\references{
+
+  Fisher, R. A., Corbet, A. S. and Williams, C. B. (1943)
+  The Relation Between the Number of Species and
+     the Number of Individuals in a Random Sample of an Animal
+     Population.
+\emph{Journal of Animal Ecology},   
+\bold{12}, 42--58.
+
+}
+\examples{
+summary(corbet)
+}
+\keyword{datasets}
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 75bdacd..4290ee1 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -189,8 +189,8 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
   QRR-VGLMs or \emph{constrained quadratic ordination} (CQO) models
   are estimated here by maximum likelihood estimation. Optimal linear
   combinations of the environmental variables are computed, called
-  \emph{latent variables} (these appear as \code{lv} for \eqn{R=1}
-  else \code{lv1}, \code{lv2}, etc. in the output).  Here, \eqn{R}
+  \emph{latent variables} (these appear as \code{latvar} for \eqn{R=1}
+  else \code{latvar1}, \code{latvar2}, etc. in the output).  Here, \eqn{R}
   is the \emph{rank} or the number of ordination axes.  Each species'
   response is then a regression of these latent variables using quadratic
   polynomials on a transformed scale (e.g., log for Poisson counts, logit
@@ -266,6 +266,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
   \code{deviance.Bestof} which gives the history of deviances over all
   the iterations.
 
+
 }
 \references{
 
@@ -302,12 +303,85 @@ original FORTRAN code into C.
 } 
 
 \note{
+  The input requires care, preparation and
+  thought---\emph{a lot more} than other ordination methods.
+  Here is a partial \bold{checklist}.
+  \describe{
+  \item{(1)}{
+  The number of species should be kept reasonably low, e.g., 12 max.
+  Feeding in 100+ species wholesale is a recipe for failure.
+  Choose a few species carefully.
+  Using 10 well-chosen species is better than 100+ species thrown in
+  willy-nilly.
+
+
+  }
+  \item{(2)}{
+  Each species should be screened individually first, e.g.,
+  for presence/absence is the species totally absent or totally present
+  at all sites?
+  For presence/absence data \code{sort(colMeans(data))} can help
+  avoid such species.
+
+
+  }
+  \item{(3)}{
+  The number of explanatory variables should be kept low,
+  e.g., 7 max.
+
+
+  }
+  \item{(4)}{
+  Each explanatory variable should be screened individually first, e.g.,
+  is it heavily skewed or are there outliers?
+  They should be plotted and then transformed where needed.
+  They should not be too highly correlated with each other.
+
+
+  }
+  \item{(5)}{
+  Each explanatory variable should be scaled, e.g.,
+  to mean 0 and unit variance.
+  This is especially needed for \code{ITolerance = TRUE}.
+
+
+  }
+  \item{(6)}{
+  Keep the rank low.
+  Only if the data is very good should a rank-2 model be attempted.
+  Usually a rank-1 model is all that is practically possible even
+  after a lot of work.
+  The rank-1 model should always be attempted first.
+  Then might be clever and try use this for initial values for
+  a rank-2 model.
+
+
+  }
+  \item{(7)}{
+  If the number of sites is large then choose a random sample of them.
+  For example, choose a maximum of 500 sites.
+  This will reduce the memory and time expense of the computations.
+
+
+  }
+  \item{(8)}{
+  Try \code{ITolerance = TRUE} or \code{EqualTolerance = FALSE}
+  if the inputted data set is large,
+  so as to reduce the computational expense.
+  That's because the default, \code{ITolerance = FALSE} and 
+  \code{EqualTolerance = TRUE}, is very memory hungry.
+
+
+  }
+  }
+
+
 
   By default, a rank-1 equal-tolerances QRR-VGLM model is fitted
   (see \code{\link{qrrvglm.control}} for the default control
   parameters).
-  The latent variables are always transformed so that they
-  are uncorrelated.
+  If \code{Rank > 1} then the latent variables are always transformed
+  so that they are uncorrelated.
   By default, the argument \code{trace} is \code{TRUE} meaning a running
   log is printed out while the computations are taking place.  This is
   because the algorithm is computationally expensive, therefore users
@@ -344,7 +418,7 @@ original FORTRAN code into C.
   \code{Bestof}, 
   \code{ITolerances},
   \code{EqualTolerances},
-  \code{isdlv}, and
+  \code{isd.latvar}, and
   \code{MUXfactor}.
 
 
@@ -394,7 +468,8 @@ original FORTRAN code into C.
 
 
   Unless \code{ITolerances = TRUE} or \code{EqualTolerances = FALSE},
-  CQO is computationally expensive. It pays to keep the rank down to 1
+  CQO is computationally expensive with memory and time.
+  It pays to keep the rank down to 1
   or 2.  If \code{EqualTolerances = TRUE} and \code{ITolerances = FALSE} then
   the cost grows quickly with the number of species and sites (in terms of
   memory requirements and time).  The data needs to conform quite closely
@@ -438,9 +513,9 @@ original FORTRAN code into C.
   \code{\link{predictqrrvglm}},
   \code{\link{rcqo}},
   \code{\link{cao}},
-  \code{\link{uqo}},
+% \code{\link{uqo}},
   \code{\link{rrvglm}},
-%   \code{\link{rrvglm.control}},
+% \code{\link{rrvglm.control}},
   \code{\link{poissonff}},
   \code{\link{binomialff}},
   \code{\link{negbinomial}},
@@ -463,40 +538,40 @@ contains further information and examples.
 \examples{
 \dontrun{
 # Example 1; Fit an unequal tolerances model to the hunting spiders data
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental variables
-set.seed(1234) # For reproducibility of the results
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental variables
+set.seed(1234)  # For reproducibility of the results
 p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
                   Trocterr, Zoraspin) ~
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             fam = poissonff, data = hspider, Crow1positive = FALSE,
             EqualTol = FALSE)
-sort(p1ut at misc$deviance.Bestof) # A history of all the iterations
+sort(p1ut at misc$deviance.Bestof)  # A history of all the iterations
 if(deviance(p1ut) > 1177) warning("suboptimal fit obtained")
 
-S <- ncol(depvar(p1ut)) # Number of species
-clr <- (1:(S+1))[-7] # Omits yellow
-lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1) # ordination diagram
+S <- ncol(depvar(p1ut))  # Number of species
+clr <- (1:(S+1))[-7]  # Omits yellow
+lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1)  # Ordination diagram
 legend("topright", leg = colnames(depvar(p1ut)), col = clr,
        pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
 (cp <- Coef(p1ut))
 
-(a <- cp at lv[cp at lvOrder])  # The ordered site scores along the gradient
+(a <- cp at latvar[cp at latvar.order])  # The ordered site scores along the gradient
 # Names of the ordered sites along the gradient:
-rownames(cp at lv)[cp at lvOrder]
-(aa <- (cp at Optimum)[,cp at OptimumOrder]) # The ordered optima along the gradient
-aa <- aa[!is.na(aa)] # Delete the species that is not unimodal
-names(aa) # Names of the ordered optima along the gradient
+rownames(cp at latvar)[cp at latvar.order]
+(aa <- (cp at Optimum)[,cp at Optimum.order])  # The ordered optima along the gradient
+aa <- aa[!is.na(aa)]  # Delete the species that is not unimodal
+names(aa)  # Names of the ordered optima along the gradient
 
-trplot(p1ut, whichSpecies = 1:3, log = "xy", type = "b", lty = 1, lwd = 2,
-       col = c("blue","red","green"), label = TRUE) -> ii # trajectory plot
+trplot(p1ut, which.species = 1:3, log = "xy", type = "b", lty = 1, lwd = 2,
+       col = c("blue","red","green"), label = TRUE) -> ii  # Trajectory plot
 legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "),
        lwd = 2, lty = 1, col = c("blue", "red", "green"))
 abline(a = 0, b = 1, lty = "dashed")
 
-S <- ncol(depvar(p1ut)) # Number of species
-clr <- (1:(S+1))[-7] # Omits yellow
-persp(p1ut, col = clr, label = TRUE, las = 1) # perspective plot
+S <- ncol(depvar(p1ut))  # Number of species
+clr <- (1:(S+1))[-7]  # Omits yellow
+persp(p1ut, col = clr, label = TRUE, las = 1)  # Perspective plot
 
 
 # Example 2; Fit an equal tolerances model. Less numerically fraught.
@@ -506,10 +581,10 @@ p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Trocterr, Zoraspin) ~
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             poissonff, data = hspider, Crow1positive = FALSE)
-sort(p1et at misc$deviance.Bestof) # A history of all the iterations
+sort(p1et at misc$deviance.Bestof)  # A history of all the iterations
 if (deviance(p1et) > 1586) warning("suboptimal fit obtained")
-S <- ncol(depvar(p1et)) # Number of species
-clr <- (1:(S+1))[-7] # Omits yellow
+S <- ncol(depvar(p1et))  # Number of species
+clr <- (1:(S+1))[-7]  # Omits yellow
 persp(p1et, col = clr, label = TRUE, las = 1)
 
 
@@ -521,8 +596,8 @@ p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           poissonff, data = hspider, Crow1positive = FALSE,
-          IToler = TRUE, Rank = 2, Bestof = 3, isdlv = c(2.1, 0.9))
-sort(p2 at misc$deviance.Bestof) # A history of all the iterations
+          IToler = TRUE, Rank = 2, Bestof = 3, isd.latvar = c(2.1, 0.9))
+sort(p2 at misc$deviance.Bestof)  # A history of all the iterations
 if(deviance(p2) > 1127) warning("suboptimal fit obtained")
 lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
        C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey", 
@@ -532,17 +607,17 @@ lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
 # Example 4: species packing model with presence/absence data
 set.seed(2345)
 n <- 200; p <- 5; S <- 5
-mydata <- rcqo(n, p, S, fam = "binomial", hiabundance = 4,
-               EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE)
+mydata <- rcqo(n, p, S, fam = "binomial", hi.abundance = 4,
+               eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE)
 myform <- attr(mydata, "formula")
 set.seed(1234)
 b1et <- cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
-sort(b1et at misc$deviance.Bestof) # A history of all the iterations
+sort(b1et at misc$deviance.Bestof)  # A history of all the iterations
 lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1)
 Coef(b1et)
 
 # Compare the fitted model with the 'truth'
-cbind(truth = attr(mydata, "ccoefficients"), fitted = ccoef(b1et))
+cbind(truth = attr(mydata, "concoefficients"), fitted = concoef(b1et))
 
 
 # Example 5: Plot the deviance residuals for diagnostic purposes
@@ -552,29 +627,29 @@ p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Trocterr, Zoraspin) ~
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             poissonff, data = hspider, EqualTol = TRUE, trace = FALSE)
-sort(p1et at misc$deviance.Bestof) # A history of all the iterations
+sort(p1et at misc$deviance.Bestof)  # A history of all the iterations
 if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
 S <- ncol(depvar(p1et))
 par(mfrow = c(3, 4))
-for(ii in 1:S) {
-  tempdata <- data.frame(lv1 = c(lv(p1et)), sppCounts = depvar(p1et)[, ii])
-  tempdata <- transform(tempdata, myOffset = -0.5 * lv1^2)
+for (ii in 1:S) {
+  tempdata <- data.frame(latvar1 = c(latvar(p1et)), sppCounts = depvar(p1et)[, ii])
+  tempdata <- transform(tempdata, myOffset = -0.5 * latvar1^2)
 
 # For species ii, refit the model to get the deviance residuals
-  fit1 <- vglm(sppCounts ~ offset(myOffset) + lv1, fam = poissonff,
+  fit1 <- vglm(sppCounts ~ offset(myOffset) + latvar1, poissonff,
                data = tempdata, trace = FALSE)
 
 # For checking: this should be 0
-  print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
-  print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
+# print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
+# print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
 
 # Plot the deviance residuals
   devresid <- resid(fit1, type = "deviance")
   predvalues <- predict(fit1) + fit1 at offset
-  ooo <- with(tempdata, order(lv1))
-  with(tempdata, plot(lv1, predvalues + devresid, col = "darkgreen",
-                      xlab = "lv1", ylab = "", main = colnames(depvar(p1et))[ii]))
-  with(tempdata, lines(lv1[ooo], predvalues[ooo], col = "blue"))
+  ooo <- with(tempdata, order(latvar1))
+  with(tempdata, plot(latvar1, predvalues + devresid, col = "darkgreen",
+                      xlab = "latvar1", ylab = "", main = colnames(depvar(p1et))[ii]))
+  with(tempdata, lines(latvar1[ooo], predvalues[ooo], col = "blue"))
 }
 }
 }
diff --git a/man/crashes.Rd b/man/crashes.Rd
index 174a860..91d0c22 100644
--- a/man/crashes.Rd
+++ b/man/crashes.Rd
@@ -39,7 +39,8 @@ data(alclevels)
 
     }
 
-    \item{0-30, 31-50, 51-80, 81-100, 101-120, 121-150, 151-200, 201-250, 251-300, 301-350, 350+}{
+    \item{0-30, 31-50, 51-80, 81-100, 101-120, 121-150, 151-200,
+          201-250, 251-300, 301-350, 350+}{
     Blood alcohol level (milligrams alcohol per 100 millilitres of blood).
 
 
@@ -59,7 +60,8 @@ data(alclevels)
      
   For crashes,
      \code{chrashi} are the number of injuries by car,
-     \code{crashf}  are the number of fatalities by car (not included in \code{chrashi}),
+     \code{crashf}  are the number of fatalities by car
+     (not included in \code{chrashi}),
      \code{crashtr} are the number of crashes involving trucks, 
      \code{crashmc} are the number of crashes involving motorcyclists,
      \code{crashbc} are the number of crashes involving bicycles,
@@ -80,12 +82,14 @@ data(alclevels)
 
 }
 \references{
+
   Motor Vehicles Crashes in New Zealand 2009;
   Statistical Statement Calendar Year 2009.
   Ministry of Transport, NZ Government;
   Yearly Report 2010.
   ISSN: 1176-3949
 
+
 }
 \seealso{
   \code{\link[VGAM]{rrvglm}},
@@ -108,7 +112,7 @@ abline(v = sort(1 + c((0:7) * 24, (0:6) * 24 + 12)), lty = "dashed",
 
 # Goodmans RC models
 \dontrun{
-fitgrc1 <- grc(alcoff) # Rank-1 model
+fitgrc1 <- grc(alcoff)  # Rank-1 model
 fitgrc2 <- grc(alcoff, Rank = 2, Corner = FALSE, Uncor = TRUE)
 Coef(fitgrc2)
 }
@@ -129,6 +133,6 @@ Coef(fitgrc2)
 %\alias{crashp}     Table 45, p.84
 %\alias{alcoff}     Table  3, p.121
 %\alias{alclevels}  Table  2, p.132
-% print(Coef(fitgrc2), dig = 2)
+% print(Coef(fitgrc2), digits = 2)
 
 
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index a851de8..337dc44 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -10,8 +10,9 @@
 }
 \usage{
 cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
-           mv = FALSE, apply.parint = FALSE, whitespace = FALSE)
+           mv = FALSE, whitespace = FALSE)
 }
+%apply.parint = FALSE,
 %scumulative(link = "logit",
 %            lscale = "loge", escale = list(),
 %            parallel = FALSE, sparallel = TRUE, reverse = FALSE, iscale = 1)
@@ -40,6 +41,9 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
   See below for more information about the parallelism assumption.
   The default results in what some people call the
   \emph{generalized ordered logit model} to be fitted.
+  If \code{parallel = TRUE} then it does not apply to the intercept.
+
+
 
 
   }
@@ -82,17 +86,17 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 
 
   }
-  \item{apply.parint}{
-  Logical.
-  Whether the \code{parallel} argument should be applied to the intercept term.
-  This should be set to \code{TRUE} for \code{link=}
-  \code{\link{golf}},
-  \code{\link{polf}},
-  \code{\link{nbolf}}.
-  See \code{\link{CommonVGAMffArguments}} for more information.
-
-
-  }
+%  \item{apply.parint}{
+%  Logical.
+%  Whether the \code{parallel} argument should be applied to the intercept term.
+%  This should be set to \code{TRUE} for \code{link=}
+%  \code{\link{golf}},
+%  \code{\link{polf}},
+%  \code{\link{nbolf}}.
+%  See \code{\link{CommonVGAMffArguments}} for more information.
+%
+%
+%  }
 % \item{iscale}{
 % Numeric. Initial values for the scale parameters.
 
@@ -292,17 +296,20 @@ by the \pkg{VGAM} package can be found at
   \code{\link{nbolf}},
   \code{\link{logistic1}}.
 
+
 }
 \examples{
 # Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit <- vglm(cbind(normal, mild, severe) ~ let,
              cumulative(parallel = TRUE, reverse = TRUE), pneumo))
-depvar(fit)   # Sample proportions (good technique)
-fit at y         # Sample proportions (bad technique)
-weights(fit, type = "prior")   # Number of observations
+depvar(fit)  # Sample proportions (good technique)
+fit at y        # Sample proportions (bad technique)
+weights(fit, type = "prior")  # Number of observations
 coef(fit, matrix = TRUE)
-constraints(fit)   # Constraint matrices
+constraints(fit)  # Constraint matrices
+apply(fitted(fit), 1, which.max)  # Classification
+apply(predict(fit, newdata = pneumo, type = "response"), 1, which.max)  # Classification
 
 # Check that the model is linear in let ----------------------
 fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
@@ -319,7 +326,7 @@ lrtest(fit3, fit)  # More elegant
 # A factor() version of fit ----------------------------------
 # This is in long format (cf. wide format above)
 Nobs <- round(depvar(fit) * c(weights(fit, type = "prior")))
-sumNobs <- colSums(Nobs) # apply(Nobs, 2, sum)
+sumNobs <- colSums(Nobs)  # apply(Nobs, 2, sum)
 
 pneumo.long <-
   data.frame(symptoms = ordered(rep(rep(colnames(Nobs), nrow(Nobs)),
@@ -327,19 +334,19 @@ pneumo.long <-
                                 levels = colnames(Nobs)),
              let = rep(rep(with(pneumo, let), each = ncol(Nobs)),
                        times = c(t(Nobs))))
-with(pneumo.long, table(let, symptoms)) # Check it; should be same as pneumo
+with(pneumo.long, table(let, symptoms))  # Check it; should be same as pneumo
 
 
-(fit.long1 <- vglm(symptoms ~ let, data = pneumo.long,
-             cumulative(parallel = TRUE, reverse = TRUE), trace = TRUE))
-coef(fit.long1, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
+(fit.long1 <- vglm(symptoms ~ let, data = pneumo.long, trace = TRUE,
+                   cumulative(parallel = TRUE, reverse = TRUE)))
+coef(fit.long1, matrix = TRUE)  # Should be same as coef(fit, matrix = TRUE)
 # Could try using mustart if fit.long1 failed to converge.
 mymustart <- matrix(sumNobs / sum(sumNobs),
-                   nrow(pneumo.long), ncol(Nobs), byrow = TRUE)
-fit.long2 <- vglm(symptoms ~ let,
-                  fam = cumulative(parallel = TRUE, reverse = TRUE),
-                  mustart = mymustart, data = pneumo.long, trace = TRUE)
-coef(fit.long2, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
+                    nrow(pneumo.long), ncol(Nobs), byrow = TRUE)
+fit.long2 <- vglm(symptoms ~ let, mustart = mymustart,
+                  cumulative(parallel = TRUE, reverse = TRUE),
+                  data = pneumo.long, trace = TRUE)
+coef(fit.long2, matrix = TRUE)  # Should be same as coef(fit, matrix = TRUE)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/Perom.Rd b/man/deermice.Rd
similarity index 59%
rename from man/Perom.Rd
rename to man/deermice.Rd
index 400e8bf..4d2188e 100644
--- a/man/Perom.Rd
+++ b/man/deermice.Rd
@@ -1,28 +1,39 @@
-\name{Perom}
-\alias{Perom}
+\name{deermice}
+%\alias{Perom}
+\alias{deermice}
 \docType{data}
 \title{
-  Captures of peromyscus maniculatus
+  Captures of Peromyscus maniculatus, also known as deer mice.
 
 
 %%   ~~ data name/kind ... ~~
 }
 \description{
 
-  Captures of \emph{peromyscus maniculatus} collected at East
+  Captures of \emph{Peromyscus maniculatus} collected at East
   Stuart Gulch, Colorado, USA.
 
+
 %%  ~~ A concise (1-5 lines) description of the dataset. ~~
 }
-\usage{data(Perom)}
+% data(Perom)
+\usage{
+  data(deermice)
+}
 \format{
-  The format is:
- chr "Perom"
+  The format is a data frame.
+
 
 }
 \details{
 
-  The columns
+  \emph{Peromyscus maniculatus} is a rodent native to North America.
+  The deer mouse is small in size, only about 8 to 10 cm long,
+  not counting the length of the tail.
+
+
+  Originally,
+  the columns of this data frame
   represent the sex (\code{m} or \code{f}),
   the ages (\code{y}: young, \code{sa}: semi-adult, \code{a}: adult),
   the weights in grams, and the
@@ -34,6 +45,13 @@
   with the \pkg{CAPTURE} program of Otis et al. (1978).
 
 
+  \code{deermice} has 38 deermice whereas
+  \code{Perom} had 36 deermice
+  (\code{Perom} has been withdrawn.)
+  In \code{deermice} the two semi-adults have been classified as adults.
+  The \code{sex} variable has 1 for female, and 0 for male.
+
+
 %%  ~~ If necessary, more details than the __description__ above ~~
 }
 %\source{
@@ -49,7 +67,6 @@ approach to capture experiments.
 \bold{47}, 725--732.
 
 
-
   Otis, D. L. et al. (1978)
   Statistical inference from capture data on closed animal populations,
   \emph{Wildlife Monographs},
@@ -60,16 +77,19 @@ approach to capture experiments.
 }
 
 \seealso{
+    \code{\link[VGAM:posbernoulli.b]{posbernoulli.b}},
     \code{\link[VGAM:posbernoulli.t]{posbernoulli.t}}.
 
+
 }
 
 \examples{
-head(Perom)
+head(deermice)
 \dontrun{
 fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + age,
-             posbernoulli.t(parallel.t = TRUE), data = Perom, trace = TRUE)
+             posbernoulli.t(parallel.t = TRUE), data = deermice, trace = TRUE)
 coef(fit1)
 coef(fit1, matrix = TRUE)
-}}
+}
+}
 \keyword{datasets}
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
index 5d65a1d..135e96a 100644
--- a/man/deplot.lmscreg.Rd
+++ b/man/deplot.lmscreg.Rd
@@ -7,7 +7,7 @@
   associated with a LMS quantile regression.
 }
 \usage{
-deplot.lmscreg(object, newdata = NULL, x0, y.arg, plot.it = TRUE, ...)
+deplot.lmscreg(object, newdata = NULL, x0, y.arg, show.plot = TRUE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -32,7 +32,7 @@ deplot.lmscreg(object, newdata = NULL, x0, y.arg, plot.it = TRUE, ...)
   \item{y.arg}{ Numerical vector. The values of the response variable 
   at which to evaluate the density. This should be a grid that is fine
   enough to ensure the plotted curves are smooth.  }
-  \item{plot.it}{ Logical. Plot it? If \code{FALSE} no plot will
+  \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will
   be done.
 
 
diff --git a/man/depvar.Rd b/man/depvar.Rd
index 5d0213e..96c91cd 100644
--- a/man/depvar.Rd
+++ b/man/depvar.Rd
@@ -20,12 +20,18 @@ depvar(object, ...)
 \item{\dots}{
   Other arguments fed into the specific methods function of
   the model.
+  In particular, sometimes \code{type = c("lm", "lm2")} is
+  available, in which case the first one is chosen if the
+  user does not input a value.
+  The latter value corresponds to argument \code{form2}, and
+  sometimes a response for that is optional.
 
 
 }
 }
 \details{
-  This function is preferred to calling \code{fit at y}, say.
+  By default
+  this function is preferred to calling \code{fit at y}, say.
 
 
 }
@@ -58,9 +64,9 @@ depvar(object, ...)
 \examples{
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-fit at y       # Sample proportions (not recommended)
-depvar(fit) # Better than using fit at y; dependent variable (response)
-weights(fit, type = "prior") # Number of observations
+fit at y        # Sample proportions (not recommended)
+depvar(fit)  # Better than using fit at y; dependent variable (response)
+weights(fit, type = "prior")  # Number of observations
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/df.residual.Rd b/man/df.residual.Rd
index 0ec788c..13f66aa 100644
--- a/man/df.residual.Rd
+++ b/man/df.residual.Rd
@@ -68,14 +68,14 @@ pneumo <- transform(pneumo, let = log(exposure.time))
 head(model.matrix(fit, type = "vlm"))
 head(model.matrix(fit, type = "lm"))
 
-df.residual(fit, type = "vlm") # n * M - p_VLM
-nobs(fit, type = "vlm") # n * M
-nvar(fit, type = "vlm") # p_VLM
+df.residual(fit, type = "vlm")  # n * M - p_VLM
+nobs(fit, type = "vlm")  # n * M
+nvar(fit, type = "vlm")  # p_VLM
 
-df.residual(fit, type = "lm") # n - p_LM(j); Useful in some situations
-nobs(fit, type = "lm") # n
-nvar(fit, type = "lm") # p_LM
-nvar_vlm(fit, type = "lm") # p_LM(j) (<= p_LM elementwise)
+df.residual(fit, type = "lm")  # n - p_LM(j); Useful in some situations
+nobs(fit, type = "lm")  # n
+nvar(fit, type = "lm")  # p_LM
+nvar_vlm(fit, type = "lm")  # p_LM(j) (<= p_LM elementwise)
 }
 
 \keyword{models}
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index a97ea95..cb000aa 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -88,9 +88,9 @@ Lange, K. (2002)
 2nd ed. New York: Springer-Verlag.
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 %Documentation accompanying the \pkg{VGAM} package at
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
index 5c906d6..93dfec8 100644
--- a/man/dirmul.old.Rd
+++ b/man/dirmul.old.Rd
@@ -81,9 +81,9 @@ Lange, K. (2002)
 2nd ed.  New York: Springer-Verlag.
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 Paul, S. R., Balasooriya, U. and Banerjee, T. (2005)
@@ -144,9 +144,9 @@ fit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
 
 (sfit <- summary(fit))
 vcov(sfit)
-round(eta2theta(coef(fit), fit at misc$link, fit at misc$earg), dig = 2) # not preferred
-round(Coef(fit), dig = 2) # preferred
-round(t(fitted(fit)), dig = 4) # 2nd row of Table 3.5 of Lange (2002)
+round(eta2theta(coef(fit), fit at misc$link, fit at misc$earg), digits = 2)  # not preferred
+round(Coef(fit), digits = 2)  # preferred
+round(t(fitted(fit)), digits = 4)  # 2nd row of Table 3.5 of Lange (2002)
 coef(fit, matrix = TRUE)
 
 
@@ -155,8 +155,8 @@ pfit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
              dirmul.old(parallel = TRUE), trace = TRUE,
              data = alleleCounts)
 round(eta2theta(coef(pfit, matrix = TRUE), pfit at misc$link,
-                pfit at misc$earg), dig = 2) # 'Right' answer
-round(Coef(pfit), dig = 2) # 'Wrong' answer due to parallelism constraint
+                pfit at misc$earg), digits = 2)  # 'Right' answer
+round(Coef(pfit), digits = 2)  # 'Wrong' answer due to parallelism constraint
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index 68daa9d..cd4c850 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -177,13 +177,13 @@ Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetic
 
 \examples{
 nn <- 10; M <- 5
-ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M))) # Integer counts
+ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M)))  # Integer counts
 colnames(ydata) <- paste("y", 1:M, sep = "")
 
 fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial, ydata, trace = TRUE)
 head(fitted(fit))
-depvar(fit) # Sample proportions
-weights(fit, type = "prior", matrix = FALSE) # Total counts per row
+depvar(fit)  # Sample proportions
+weights(fit, type = "prior", matrix = FALSE)  # Total counts per row
 
 ydata <- transform(ydata, x2 = runif(nn))
 fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial, ydata, trace = TRUE)
diff --git a/man/dcennormal1.Rd b/man/double.cennormal.Rd
similarity index 78%
rename from man/dcennormal1.Rd
rename to man/double.cennormal.Rd
index 342b1d5..9bf720e 100644
--- a/man/dcennormal1.Rd
+++ b/man/double.cennormal.Rd
@@ -1,5 +1,5 @@
-\name{dcennormal1}
-\alias{dcennormal1}
+\name{double.cennormal}
+\alias{double.cennormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Univariate Normal Distribution with Double Censoring }
 \description{
@@ -8,24 +8,27 @@
 
 }
 \usage{
-dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
-            imu = NULL, isd = NULL, zero = 2)
+double.cennormal(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
+                 imu = NULL, isd = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{r1, r2}{
   Integers. Number of smallest and largest values censored, respectively.
 
+
   }
   \item{lmu, lsd}{
   Parameter link functions applied to the
   mean and standard deviation.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu, isd, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -44,6 +47,7 @@ dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Harter, H. L. and Moore, A. H. (1966)
@@ -62,28 +66,28 @@ dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
 
 
   With no censoring at all (the default), it is better (and
-  equivalent) to use \code{\link{normal1}}.
+  equivalent) to use \code{\link{uninormal}}.
 
 
 }
 
 \seealso{
-  \code{\link{normal1}},
-  \code{\link{cennormal1}},
+  \code{\link{uninormal}},
+  \code{\link{cennormal}},
   \code{\link{tobit}}.
 
 
 }
-\examples{\dontrun{# Repeat the simulations described in Harter and Moore (1966)
+\examples{\dontrun{ # Repeat the simulations described in Harter and Moore (1966)
 SIMS <- 100  # Number of simulations (change this to 1000)
 mu.save <- sd.save <- rep(NA, len = SIMS)
 r1 <- 0; r2 <- 4; nn <- 20  
-for(sim in 1:SIMS) {
+for (sim in 1:SIMS) {
   y <- sort(rnorm(nn))
   y <- y[(1+r1):(nn-r2)]  # Delete r1 smallest and r2 largest
-  fit <- vglm(y ~ 1, dcennormal1(r1 = r1, r2 = r2))
-  mu.save[sim] <- predict(fit)[1,1]
-  sd.save[sim] <- exp(predict(fit)[1,2])  # Assumes a log link and ~ 1
+  fit <- vglm(y ~ 1, double.cennormal(r1 = r1, r2 = r2))
+  mu.save[sim] <- predict(fit)[1, 1]
+  sd.save[sim] <- exp(predict(fit)[1, 2])  # Assumes a log link and ~ 1
 }
 c(mean(mu.save), mean(sd.save))  # Should be c(0,1)
 c(sd(mu.save), sd(sd.save))
@@ -91,7 +95,7 @@ c(sd(mu.save), sd(sd.save))
 
 # Data from Sarhan and Greenberg (1962); MLEs are mu = 9.2606, sd = 1.3754
 strontium90 <- data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9))
-fit <- vglm(y ~ 1, dcennormal1(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
+fit <- vglm(y ~ 1, double.cennormal(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/dexpbinomial.Rd b/man/double.expbinomial.Rd
similarity index 88%
rename from man/dexpbinomial.Rd
rename to man/double.expbinomial.Rd
index 92b0406..e7a848c 100644
--- a/man/dexpbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -1,5 +1,5 @@
-\name{dexpbinomial}
-\alias{dexpbinomial}
+\name{double.expbinomial}
+\alias{double.expbinomial}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Double Exponential Binomial Distribution Family Function }
 \description{
@@ -7,10 +7,11 @@
   maximum likelihood estimation.
   The two parameters here are the mean and dispersion parameter.
 
+
 }
 \usage{
-dexpbinomial(lmean = "logit", ldispersion = "logit",
-             idispersion = 0.25, zero = 2)
+double.expbinomial(lmean = "logit", ldispersion = "logit",
+                   idispersion = 0.25, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,12 +21,14 @@ dexpbinomial(lmean = "logit", ldispersion = "logit",
   See \code{\link{Links}} for more choices.
   The defaults cause the parameters to be restricted to \eqn{(0,1)}. 
 
+
   }
   \item{idispersion}{ 
   Initial value for the dispersion parameter.
   If given, it must be in range, and is recyled to the necessary length.
   Use this argument if convergence failure occurs.
 
+
   }
   \item{zero}{ 
   An integer specifying which
@@ -35,6 +38,7 @@ dexpbinomial(lmean = "logit", ldispersion = "logit",
   To model both parameters as functions of the covariates assign
   \code{zero = NULL}.
 
+
   }
 }
 \details{
@@ -116,8 +120,8 @@ dexpbinomial(lmean = "logit", ldispersion = "logit",
 
 }
 \examples{
-# This example mimics the example in Efron (1986). The results here
-# differ slightly.
+# This example mimics the example in Efron (1986).
+# The results here differ slightly.
 
 # Scale the variables
 toxop <- transform(toxop,
@@ -126,9 +130,9 @@ toxop <- transform(toxop,
                    sN = scale(ssize))            # (6.2)
 
 # A fit similar (should be identical) to Section 6 of Efron (1986).
-# But does not use poly(), and M=1.25 here, as in (5.3)
-cmlist <- list("(Intercept)" = diag(2),
-               "I(srainfall)" = rbind(1,0),
+# But does not use poly(), and M = 1.25 here, as in (5.3)
+cmlist <- list("(Intercept)"    = diag(2),
+               "I(srainfall)"   = rbind(1,0),
                "I(srainfall^2)" = rbind(1,0),
                "I(srainfall^3)" = rbind(1,0),
                "I(sN)" = rbind(0,1),
@@ -136,7 +140,7 @@ cmlist <- list("(Intercept)" = diag(2),
 fit <- vglm(cbind(phat, 1 - phat) * ssize ~
             I(srainfall) + I(srainfall^2) + I(srainfall^3) +
             I(sN) + I(sN^2),
-            dexpbinomial(ldisp = elogit(min = 0, max = 1.25),
+            double.expbinomial(ldisp = elogit(min = 0, max = 1.25),
                          idisp = 0.2, zero = NULL),
             toxop, trace = TRUE, constraints = cmlist)
 
@@ -145,12 +149,12 @@ coef(fit, matrix = TRUE)
 head(fitted(fit))
 summary(fit)
 vcov(fit)
-sqrt(diag(vcov(fit))) # Standard errors
+sqrt(diag(vcov(fit)))  # Standard errors
 
 # Effective sample size (not quite the last column of Table 1)
 head(predict(fit))
 Dispersion <- elogit(predict(fit)[,2], min = 0, max = 1.25, inverse = TRUE)
-c(round(weights(fit, type = "prior") * Dispersion, dig = 1))
+c(round(weights(fit, type = "prior") * Dispersion, digits = 1))
 
 
 # Ordinary logistic regression (gives same results as (6.5))
@@ -165,18 +169,18 @@ cmlist2 <- list("(Intercept)"                 = diag(2),
                 "poly(sN, degree = 2)"        = rbind(0, 1))
 fit2 <- vglm(cbind(phat, 1 - phat) * ssize ~
              poly(srainfall, degree = 3) + poly(sN, degree = 2),
-             dexpbinomial(ldisp = elogit(min = 0, max = 1.25),
+             double.expbinomial(ldisp = elogit(min = 0, max = 1.25),
                           idisp = 0.2, zero = NULL),
              toxop, trace = TRUE, constraints = cmlist2)
 \dontrun{ par(mfrow = c(1, 2))
-plotvgam(fit2, se = TRUE, lcol = "blue", scol = "red")  # Cf. Figure 1
+plotvgam(fit2, se = TRUE, lcol = "blue", scol = "orange")  # Cf. Figure 1
 
 # Cf. Figure 1(a)
 par(mfrow = c(1,2))
 ooo <- with(toxop, sort.list(rainfall))
 with(toxop, plot(rainfall[ooo], fitted(fit2)[ooo], type = "l",
                  col = "blue", las = 1, ylim = c(0.3, 0.65)))
-with(toxop, points(rainfall[ooo], fitted(ofit)[ooo], col = "red",
+with(toxop, points(rainfall[ooo], fitted(ofit)[ooo], col = "orange",
                    type = "b", pch = 19))
 
 # Cf. Figure 1(b)
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index cc00cdc..1641d74 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -16,7 +16,7 @@
 \usage{
 deexp(x, rate = 1, log = FALSE)
 peexp(q, rate = 1, log = FALSE)
-qeexp(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6)
+qeexp(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
 reexp(n, rate = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -31,7 +31,7 @@ reexp(n, rate = 1)
 
 
   }
-  \item{Maxit_nr, Tol_nr}{
+  \item{Maxit.nr, Tol.nr}{
   See \code{\link{deunif}}.
 
 
@@ -107,7 +107,7 @@ sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
 yy <- seq(-0, 4, len = nn)
 plot(yy, deexp(yy),  col = "blue", ylim = 0:1, xlab = "y", ylab = "g(y)",
      type = "l", main = "g(y) for Exp(1); dotted green is f(y) = dexp(y)")
-lines(yy, dexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
+lines(yy, dexp(yy), col = "darkgreen", lty = "dotted", lwd = 2)  # 'original'
 
 plot(yy, peexp(yy), type = "l", col = "blue", ylim = 0:1,
      xlab = "y", ylab = "G(y)", main = "G(y) for Exp(1)")
@@ -129,13 +129,13 @@ lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
 %myrate <- 8
 %yy <- rexp(nn, rate = myrate)
 %(myexp <- qeexp(my_p, rate = myrate))
-%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
-%peexp(-Inf, rate = myrate)     #  Should be 0
-%peexp( Inf, rate = myrate)     #  Should be 1
-%peexp(mean(yy), rate = myrate) #  Should be 0.5
-%abs(qeexp(0.5, rate = myrate) - mean(yy)) #  Should be 0
-%abs(peexp(myexp, rate = myrate) - my_p) #  Should be 0
-%integrate(f = deexp, lower = -1, upper = Inf, rate = myrate) #  Should be 1
+%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+%peexp(-Inf, rate = myrate)      #  Should be 0
+%peexp( Inf, rate = myrate)      #  Should be 1
+%peexp(mean(yy), rate = myrate)  #  Should be 0.5
+%abs(qeexp(0.5, rate = myrate) - mean(yy))  #  Should be 0
+%abs(peexp(myexp, rate = myrate) - my_p)  #  Should be 0
+%integrate(f = deexp, lower = -1, upper = Inf, rate = myrate)  #  Should be 1
 
 
 
diff --git a/man/enormUC.Rd b/man/enormUC.Rd
index 23dcbfb..c3fbeff 100644
--- a/man/enormUC.Rd
+++ b/man/enormUC.Rd
@@ -16,7 +16,7 @@
 \usage{
 denorm(x, mean = 0, sd = 1, log = FALSE)
 penorm(q, mean = 0, sd = 1, log = FALSE)
-qenorm(p, mean = 0, sd = 1, Maxit_nr = 10, Tol_nr = 1.0e-6)
+qenorm(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
 renorm(n, mean = 0, sd = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -31,7 +31,7 @@ renorm(n, mean = 0, sd = 1)
 
 
   }
-  \item{Maxit_nr, Tol_nr}{
+  \item{Maxit.nr, Tol.nr}{
   See \code{\link{deunif}}.
 
 
@@ -102,27 +102,27 @@ very close to 0 or 1.
 \examples{
 my_p <- 0.25; y <- rnorm(nn <- 1000)
 (myexp <- qenorm(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
 
 # Non-standard normal
 mymean <- 1; mysd <- 2
 yy <- rnorm(nn, mymean, mysd)
 (myexp <- qenorm(my_p, mymean, mysd))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
-penorm(-Inf, mymean, mysd)     #  Should be 0
-penorm( Inf, mymean, mysd)     #  Should be 1
-penorm(mean(yy), mymean, mysd) #  Should be 0.5
-abs(qenorm(0.5, mymean, mysd) - mean(yy)) #  Should be 0
-abs(penorm(myexp, mymean, mysd) - my_p)   #  Should be 0
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+penorm(-Inf, mymean, mysd)      #  Should be 0
+penorm( Inf, mymean, mysd)      #  Should be 1
+penorm(mean(yy), mymean, mysd)  #  Should be 0.5
+abs(qenorm(0.5, mymean, mysd) - mean(yy))  #  Should be 0
+abs(penorm(myexp, mymean, mysd) - my_p)    #  Should be 0
 integrate(f = denorm, lower = -Inf, upper = Inf,
-          mymean, mysd) #  Should be 1
+          mymean, mysd)  #  Should be 1
 
 \dontrun{
 par(mfrow = c(2, 1))
 yy <- seq(-3, 3, len = nn)
 plot(yy, denorm(yy), type = "l", col="blue", xlab = "y", ylab = "g(y)",
      main = "g(y) for N(0,1); dotted green is f(y) = dnorm(y)")
-lines(yy, dnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
+lines(yy, dnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2)  # 'original'
 
 plot(yy, penorm(yy), type = "l", col = "blue", ylim = 0:1,
      xlab = "y", ylab = "G(y)", main = "G(y) for N(0,1)")
diff --git a/man/erf.Rd b/man/erf.Rd
index 32109e4..0699e48 100644
--- a/man/erf.Rd
+++ b/man/erf.Rd
@@ -12,6 +12,8 @@ erf(x)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{x}{ Numeric. }
+
+
 }
 \details{
   \eqn{Erf(x)} is defined as
@@ -19,18 +21,22 @@ erf(x)
     Erf(x) = (2/sqrt(pi)) int_0^x exp(-t^2) dt}
   so that it is closely related to \code{\link[stats:Normal]{pnorm}}.
 
+
 }
 \value{
   Returns the value of the function evaluated at \code{x}.
 
+
 }
 \references{
 
+
 Abramowitz, M. and Stegun, I. A. (1972)
 \emph{Handbook of Mathematical Functions with Formulas,
   Graphs, and Mathematical Tables},
 New York: Dover Publications Inc.
 
+
 }
 \author{ T. W. Yee}
 \note{
@@ -38,22 +44,25 @@ New York: Dover Publications Inc.
   definition of \eqn{Erf(x)}. Although defined for complex
   arguments, this function only works for real arguments.
 
+
   The \emph{complementary error function} \eqn{erfc(x)} is defined
   as \eqn{1-erf(x)}, and is implemented by \code{erfc}.
 
+
 }
 
 \seealso{
   \code{\link[stats:Normal]{pnorm}}.
 
+
 }
 
 \examples{
 \dontrun{
-curve(erf,   -3, 3, col="red", ylab="", las=1)
-curve(pnorm, -3, 3, add=TRUE, col="blue", lty="dotted", lwd=2)
-abline(v=0, h=0, lty="dashed")
-legend("topleft", c("erf(x)", "pnorm(x)"), col=c("red", "blue"),
-       lty=c("solid", "dotted"), lwd=1:2) }
+curve(erf,   -3, 3, col = "orange", ylab = "", las = 1)
+curve(pnorm, -3, 3, add = TRUE, col = "blue", lty = "dotted", lwd = 2)
+abline(v = 0, h = 0, lty = "dashed")
+legend("topleft", c("erf(x)", "pnorm(x)"), col = c("orange", "blue"),
+       lty = c("solid", "dotted"), lwd = 1:2) }
 }
 \keyword{math}
diff --git a/man/erlang.Rd b/man/erlang.Rd
index 46a0791..2ee090f 100644
--- a/man/erlang.Rd
+++ b/man/erlang.Rd
@@ -5,6 +5,8 @@
 \description{
   Estimates the scale parameter of the Erlang distribution
   by maximum likelihood estimation.
+
+
 }
 \usage{
 erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
@@ -15,21 +17,24 @@ erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
   The shape parameter.
   The user must specify a positive integer.
 
+
   }
   \item{link}{
   Link function applied to the (positive) \eqn{scale} parameter.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for more details.
 
+
   }
 
 }
 \details{
   The Erlang distribution is a special case of the gamma distribution
-  with \emph{shape} that is a positive integer.  If \code{shape.arg=1}
+  with \emph{shape} that is a positive integer. If \code{shape.arg = 1}
   then it simplifies to the exponential distribution. As illustrated
   in the example below, the Erlang distribution is the distribution of
   the sum of \code{shape.arg} independent and identically distributed
@@ -65,9 +70,9 @@ erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
   this distribution, e.g.,
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -85,15 +90,16 @@ New York: Wiley-Interscience, Third edition.
   \code{\link{gamma2.ab}},
   \code{\link{exponential}}.
 
+
 }
 \examples{
-rate <- exp(2); myshape = 3
+rate <- exp(2); myshape <- 3
 edata <- data.frame(y = rep(0, nn <- 1000))
-for(ii in 1:myshape)
+for (ii in 1:myshape)
   edata <- transform(edata, y = y + rexp(nn, rate = rate))
 fit <- vglm(y ~ 1, erlang(shape = myshape), edata, trace = TRUE) 
 coef(fit, matrix = TRUE)
-Coef(fit) # Answer = 1/rate
+Coef(fit)  # Answer = 1/rate
 1/rate
 summary(fit)
 }
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index d9bfe28..4e240f6 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -15,7 +15,7 @@
 \usage{
 deunif(x, min = 0, max = 1, log = FALSE)
 peunif(q, min = 0, max = 1, log = FALSE)
-qeunif(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6)
+qeunif(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
 reunif(n, min = 0, max = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -35,14 +35,14 @@ reunif(n, min = 0, max = 1)
 
 
   }
-  \item{Maxit_nr}{
+  \item{Maxit.nr}{
   Numeric.
   Maximum number of Newton-Raphson iterations allowed.
   A warning is issued if convergence is not obtained for all \code{p}
   values.
 
   }
-  \item{Tol_nr}{
+  \item{Tol.nr}{
   Numeric.
   Small positive value specifying the tolerance or precision to which
   the expectiles are computed.
@@ -148,11 +148,11 @@ quantile and expectile regression.
 \examples{
 my_p <- 0.25; y <- runif(nn <- 1000)
 (myexp <- qeunif(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
 # Equivalently:
 I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
 I2 <- mean(y >  myexp) * mean(-myexp + y[y >  myexp])
-I1 / (I1 + I2) # Should be my_p
+I1 / (I1 + I2)  # Should be my_p
 # Or:
 I1 <- sum( myexp - y[y <= myexp])
 I2 <- sum(-myexp + y[y >  myexp])
@@ -161,22 +161,22 @@ I2 <- sum(-myexp + y[y >  myexp])
 mymin <- 1; mymax <- 8
 yy <- runif(nn, mymin, mymax)
 (myexp <- qeunif(my_p, mymin, mymax))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
 peunif(mymin, mymin, mymax)     #  Should be 0
 peunif(mymax, mymin, mymax)     #  Should be 1
 peunif(mean(yy), mymin, mymax)  #  Should be 0.5
-abs(qeunif(0.5, mymin, mymax) - mean(yy)) #  Should be 0
-abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2) #  Should be 0
+abs(qeunif(0.5, mymin, mymax) - mean(yy))  #  Should be 0
+abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2)  #  Should be 0
 abs(peunif(myexp, mymin, mymax) - my_p)  #  Should be 0
 integrate(f = deunif, lower = mymin - 3, upper = mymax + 3,
-          min = mymin, max = mymax) # Should be 1
+          min = mymin, max = mymax)  # Should be 1
 
 \dontrun{
 par(mfrow = c(2,1))
 yy <- seq(0.0, 1.0, len = nn)
 plot(yy, deunif(yy), type = "l", col = "blue", ylim = c(0, 2),
      xlab = "y", ylab = "g(y)", main = "g(y) for Uniform(0,1)")
-lines(yy, dunif(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
+lines(yy, dunif(yy), col = "darkgreen", lty = "dotted", lwd = 2)  # 'original'
 
 plot(yy, peunif(yy), type = "l", col = "blue", ylim = 0:1,
      xlab = "y", ylab = "G(y)", main = "G(y) for Uniform(0,1)")
diff --git a/man/expexp.Rd b/man/expexp.Rd
index d95396d..3bab67f 100644
--- a/man/expexp.Rd
+++ b/man/expexp.Rd
@@ -6,6 +6,7 @@
   Estimates the two parameters of the exponentiated exponential
   distribution by maximum likelihood estimation.
 
+
 }
 \usage{
 expexp(lshape = "loge", lscale = "loge",
@@ -19,23 +20,27 @@ expexp(lshape = "loge", lscale = "loge",
   See \code{\link{Links}} for more choices.
   The defaults ensure both parameters are positive.
 
+
   }
   \item{ishape}{
   Initial value for the \eqn{\alpha}{shape} 
   parameter. If convergence fails try setting a different
   value for this argument.
 
+
   }
   \item{iscale}{
   Initial value for the \eqn{\lambda}{scale} parameter.
   By default, an initial value is chosen internally using
   \code{ishape}.
 
+
   }
   \item{tolerance}{
   Numeric. Small positive value for testing whether values
   are close enough to 1 and 2.
 
+
   }
   \item{zero}{ An integer-valued vector specifying which
   linear/additive predictors are modelled as intercepts only.
@@ -149,8 +154,8 @@ bbearings <- c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
 fit <- vglm(bbearings ~ 1, fam = expexp(iscale = 0.05, ish = 5),
             trace = TRUE, maxit = 300)
 coef(fit, matrix = TRUE)
-Coef(fit)   # Authors get c(shape=5.2589, scale=0.0314)
-logLik(fit) # Authors get -112.9763
+Coef(fit)    # Authors get c(shape=5.2589, scale=0.0314)
+logLik(fit)  # Authors get -112.9763
 
 
 # Failure times of the airconditioning system of an airplane
@@ -160,8 +165,8 @@ acplane <- c(23, 261, 87, 7, 120, 14, 62, 47,
 fit <- vglm(acplane ~ 1, fam = expexp(ishape = 0.8, isc = 0.15),
             trace = TRUE, maxit = 99)
 coef(fit, matrix = TRUE)
-Coef(fit)   # Authors get c(shape=0.8130, scale=0.0145)
-logLik(fit) # Authors get log-lik -152.264
+Coef(fit)    # Authors get c(shape=0.8130, scale=0.0145)
+logLik(fit)  # Authors get log-lik -152.264
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/expexp1.Rd b/man/expexp1.Rd
index 69b3bf8..539f8e3 100644
--- a/man/expexp1.Rd
+++ b/man/expexp1.Rd
@@ -6,6 +6,7 @@
   Estimates the two parameters of the exponentiated exponential
   distribution by maximizing a profile (concentrated) likelihood.
 
+
 }
 \usage{
 expexp1(lscale = "loge", iscale = NULL, ishape = 1)
@@ -16,16 +17,19 @@ expexp1(lscale = "loge", iscale = NULL, ishape = 1)
   Parameter link function for the (positive) \eqn{\lambda}{scale} parameter.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iscale}{
   Initial value for the \eqn{\lambda}{scale} parameter.
   By default, an initial value is chosen internally using \code{ishape}.
 
+
   }
   \item{ishape}{
   Initial value for the \eqn{\alpha}{shape} parameter. If convergence
   fails try setting a different value for this argument.
 
+
   }
 }
 \details{
@@ -102,10 +106,10 @@ bbearings <- data.frame(y = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
 68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
 128.04, 173.40))
 fit <- vglm(y ~ 1, expexp1(ishape = 4), bbearings, trace = TRUE,
-           maxit = 50, checkwz = FALSE)
+            maxit = 50, checkwz = FALSE)
 coef(fit, matrix = TRUE)
-Coef(fit) # Authors get c(0.0314, 5.2589) with log-lik -112.9763
-fit at misc$shape    # Estimate of shape
+Coef(fit)  # Authors get c(0.0314, 5.2589) with log-lik -112.9763
+fit at misc$shape  # Estimate of shape
 logLik(fit)
 
 
@@ -114,10 +118,10 @@ acplane <- data.frame(y = c(23, 261, 87, 7, 120, 14, 62, 47,
 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
 71, 11, 14, 11, 16, 90, 1, 16, 52, 95))
 fit <- vglm(y ~ 1, expexp1(ishape = 0.8), acplane, trace = TRUE,
-           maxit = 50, checkwz = FALSE)
+            maxit = 50, checkwz = FALSE)
 coef(fit, matrix = TRUE)
-Coef(fit) # Authors get c(0.0145, 0.8130) with log-lik -152.264
-fit at misc$shape # Estimate of shape
+Coef(fit)  # Authors get c(0.0145, 0.8130) with log-lik -152.264
+fit at misc$shape  # Estimate of shape
 logLik(fit)
 }
 \keyword{models}
diff --git a/man/expgeometric.Rd b/man/expgeometric.Rd
index dccb109..603353a 100644
--- a/man/expgeometric.Rd
+++ b/man/expgeometric.Rd
@@ -54,6 +54,7 @@ expgeometric(lscale = "loge", lshape = "logit",
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Adamidis, K., Loukas, S. (1998).
@@ -61,6 +62,7 @@ expgeometric(lscale = "loge", lshape = "logit",
   \emph{Statistics and Probability Letters},
   \bold{39}, 35--42.
 
+
 }
 \author{ J. G. Lauder and T. W. Yee }
 \note{
@@ -79,9 +81,9 @@ expgeometric(lscale = "loge", lshape = "logit",
 }
 \examples{
 \dontrun{
-scale = exp(2); shape = logit(-1, inverse = TRUE);
-edata = data.frame(y = rexpgeom(n = 2000, scale = scale, shape = shape))
-fit = vglm(y ~ 1, expgeometric, edata, trace = TRUE)
+scale <- exp(2); shape = logit(-1, inverse = TRUE);
+edata <- data.frame(y = rexpgeom(n = 2000, scale = scale, shape = shape))
+fit <- vglm(y ~ 1, expgeometric, edata, trace = TRUE)
 c(with(edata, mean(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/expgeometricUC.Rd b/man/expgeometricUC.Rd
index cc91cc2..a2e2492 100644
--- a/man/expgeometricUC.Rd
+++ b/man/expgeometricUC.Rd
@@ -9,6 +9,7 @@
   Density, distribution function, quantile function and random
   generation for the exponential geometric distribution.
 
+
 }
 \usage{
 dexpgeom(x, scale = 1, shape, log = FALSE)
@@ -43,11 +44,13 @@ rexpgeom(n, scale = 1, shape)
   for estimating the parameters, 
   for the formula of the probability density function and other details.
 
+
 }
 \note{
   We define \code{scale} as the reciprocal of the scale parameter
   used by Adamidis and Loukas (1998).
 
+
 }
 \seealso{
   \code{\link{expgeometric}},
@@ -58,20 +61,20 @@ rexpgeom(n, scale = 1, shape)
 }
 \examples{
 \dontrun{
-shape = 0.5; scale = 1; nn = 501
-x = seq(-0.10, 3.0, len = nn)
+shape <- 0.5; scale <- 1; nn <- 501
+x <- seq(-0.10, 3.0, len = nn)
 plot(x, dexpgeom(x, scale, shape), type = "l", las = 1, ylim = c(0, 2),
      ylab = paste("[dp]expgeom(shape = ", shape, ", scale = ", scale, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, red is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 lines(x, pexpgeom(x, scale, shape), col = "red")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qexpgeom(probs, scale, shape)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qexpgeom(probs, scale, shape)
 lines(Q, dexpgeom(Q, scale, shape), col = "purple", lty = 3, type = "h")
 lines(Q, pexpgeom(Q, scale, shape), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pexpgeom(Q, scale, shape) - probs)) # Should be 0
+max(abs(pexpgeom(Q, scale, shape) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/expint.Rd b/man/expint.Rd
new file mode 100644
index 0000000..5b9cad1
--- /dev/null
+++ b/man/expint.Rd
@@ -0,0 +1,94 @@
+\name{expint}
+\alias{expint}
+\alias{expexpint}
+\alias{expint.E1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+The Exponential Integral and Variants
+
+}
+\description{
+  Computes the exponential integral \eqn{Ei(x)} for real values,
+  as well as \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} and
+  \eqn{E_1(x)}.
+
+
+}
+\usage{
+expint(x)
+expexpint(x)
+expint.E1(x)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x}{
+  Numeric. Ideally a vector of positive reals.
+
+
+}
+}
+\details{
+  The exponential integral \eqn{Ei(x)} function is the integral of
+  \eqn{exp(t) / t}
+  from 0 to \eqn{x}, for positive real \eqn{x}.
+  The function \eqn{E_1(x)} is the integral of
+  \eqn{exp(-t) / t}
+  from \eqn{x} to infinity, for positive real \eqn{x}.
+
+
+}
+\value{
+  Function \code{expint(x)} returns \eqn{Ei(x)},
+  function \code{expexpint(x)} returns \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)},
+  function \code{expint.E1(x)} returns \eqn{E_1(x)}.
+
+
+}
+\references{
+
+\url{http://www.netlib.org/specfun/ei}.
+
+
+
+}
+\author{
+T. W. Yee has simply written a small wrapper function to call the
+above FORTRAN code.
+
+
+}
+\note{
+This function has not been tested thoroughly.
+
+
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+  \code{\link[base:log]{log}},
+  \code{\link[base:log]{exp}}.
+
+
+}
+\examples{ \dontrun{
+par(mfrow = c(2, 2))
+curve(expint, 0.01, 2, xlim = c(0, 2), ylim = c(-3, 5),
+      las = 1, col = "orange")
+abline(v = (-3):5, h = (-4):5, lwd = 2, lty = "dotted", col = "gray")
+abline(h = 0, v = 0, lty = "dashed", col = "blue")
+
+curve(expexpint, 0.01, 2, xlim = c(0, 2), ylim = c(-3, 2),
+      las = 1, col = "orange")
+abline(v = (-3):2, h = (-4):5, lwd = 2, lty = "dotted", col = "gray")
+abline(h = 0, v = 0, lty = "dashed", col = "blue")
+
+curve(expint.E1, 0.01, 2, xlim = c(0, 2), ylim = c(0, 5),
+      las = 1, col = "orange")
+abline(v = (-3):2, h = (-4):5, lwd = 2, lty = "dotted", col = "gray")
+abline(h = 0, v = 0, lty = "dashed", col = "blue")
+}
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
diff --git a/man/explink.Rd b/man/explink.Rd
index df8ed3f..7586535 100644
--- a/man/explink.Rd
+++ b/man/explink.Rd
@@ -72,7 +72,7 @@ explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FA
 
 \note{
   This function has particular use for computing quasi-variances when
-  used with \code{\link{rcim}} and \code{\link{normal1}}.
+  used with \code{\link{rcim}} and \code{\link{uninormal}}.
 
 
   Numerical instability may occur when \code{theta} is
@@ -87,13 +87,13 @@ explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FA
     \code{\link{loge}},
     \code{\link{rcim}},
     \code{\link{Qvar}},
-    \code{\link{normal1}}.
+    \code{\link{uninormal}}.
 
 }
 \examples{
 theta <- rnorm(30)
 explink(theta)
-max(abs(explink(explink(theta), inverse = TRUE) - theta)) # Should be 0
+max(abs(explink(explink(theta), inverse = TRUE) - theta))  # Should be 0
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/explogarithmicUC.Rd b/man/explogUC.Rd
similarity index 89%
rename from man/explogarithmicUC.Rd
rename to man/explogUC.Rd
index 266c13c..9e640fe 100644
--- a/man/explogarithmicUC.Rd
+++ b/man/explogUC.Rd
@@ -28,6 +28,7 @@ rexplog(n, scale = 1, shape)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -37,37 +38,40 @@ rexplog(n, scale = 1, shape)
   \code{qexplog} gives the quantile function, and
   \code{rexplog} generates random deviates.
 
+
 }
 \author{ J. G. Lauder and T. W. Yee }
 \details{
-  See \code{\link{explogarithmic}}, the \pkg{VGAM} family function
+  See \code{\link{explogff}}, the \pkg{VGAM} family function
   for estimating the parameters, 
   for the formula of the probability density function and other details.
 
+
 }
 \note{
   We define \code{scale} as the reciprocal of the scale parameter
   used by Tahmasabi and Rezaei (2008).
 
+
 }
 \seealso{
-  \code{\link{explogarithmic}},
+  \code{\link{explogff}},
   \code{\link{exponential}}.  
 
 
 }
 \examples{
 \dontrun{
-shape = 0.5; scale = 2; nn = 501
-x = seq(-0.50, 6.0, len = nn)
+shape <- 0.5; scale <- 2; nn <- 501
+x <- seq(-0.50, 6.0, len = nn)
 plot(x, dexplog(x, scale, shape), type = "l", las = 1, ylim = c(0, 1.1),
      ylab = paste("[dp]explog(shape = ", shape, ", scale = ", scale, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 lines(x, pexplog(x, scale, shape), col = "orange")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qexplog(probs, scale, shape = shape)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qexplog(probs, scale, shape = shape)
 lines(Q, dexplog(Q, scale, shape = shape), col = "purple", lty = 3, type = "h")
 lines(Q, pexplog(Q, scale, shape = shape), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
diff --git a/man/explogarithmic.Rd b/man/explogff.Rd
similarity index 84%
rename from man/explogarithmic.Rd
rename to man/explogff.Rd
index 3235c1d..2b5ba57 100644
--- a/man/explogarithmic.Rd
+++ b/man/explogff.Rd
@@ -1,5 +1,5 @@
-\name{explogarithmic}
-\alias{explogarithmic}
+\name{explogff}
+\alias{explogff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Exponential Logarithmic Distribution Family Function}
 \description{
@@ -8,9 +8,9 @@
 
 }
 \usage{
-explogarithmic(lscale = "loge", lshape = "logit",
-               iscale = NULL,   ishape = NULL,
-               tol12 = 1e-05, zero = 1, nsimEIM = 400)
+explogff(lscale = "loge", lshape = "logit",
+         iscale = NULL,   ishape = NULL,
+         tol12 = 1e-05, zero = 1, nsimEIM = 400)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -52,6 +52,7 @@ explogarithmic(lscale = "loge", lshape = "logit",
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Tahmasabi, R., Sadegh, R. (2008).
@@ -59,6 +60,7 @@ explogarithmic(lscale = "loge", lshape = "logit",
   \emph{Computational Statistics and Data Analysis},
   \bold{52}, 3889--3901.
  
+ 
 }
 \author{ J. G. Lauder and T. W .Yee }
 \note{
@@ -78,9 +80,9 @@ explogarithmic(lscale = "loge", lshape = "logit",
 
 }
 \examples{
-\dontrun{ scale = exp(2); shape = logit(-1, inverse = TRUE);
-edata = data.frame(y = rexplog(n = 2000, scale = scale, shape = shape))
-fit = vglm(y ~ 1, explogarithmic, edata, trace = TRUE)
+\dontrun{ scale <- exp(2); shape <- logit(-1, inverse = TRUE)
+edata <- data.frame(y = rexplog(n = 2000, scale = scale, shape = shape))
+fit <- vglm(y ~ 1, explogff, edata, trace = TRUE)
 c(with(edata, median(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/exponential.Rd b/man/exponential.Rd
index 8e1ea9f..1786313 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -52,9 +52,9 @@ exponential(link = "loge", location = 0, expected = TRUE,
 }
 \references{
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -78,7 +78,7 @@ New York: Wiley-Interscience, Third edition.
     \code{\link{amlexponential}},
     \code{\link{laplace}},
     \code{\link{expgeometric}},
-    \code{\link{explogarithmic}},
+    \code{\link{explogff}},
     \code{\link{poissonff}},
     \code{\link{mix2exp}},
     \code{\link{freund61}}.
diff --git a/man/exppoisson.Rd b/man/exppoisson.Rd
index 8dc698c..dc10113 100644
--- a/man/exppoisson.Rd
+++ b/man/exppoisson.Rd
@@ -17,6 +17,7 @@ exppoisson(llambda = "loge", lbetave = "loge",
   Link function for the two positive parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{ilambda, ibetave}{
   Numeric.
@@ -24,10 +25,12 @@ exppoisson(llambda = "loge", lbetave = "loge",
   Currently this function is not intelligent enough to
   obtain better initial values.
 
+
   }
   \item{zero}{
   See \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -55,12 +58,14 @@ exppoisson(llambda = "loge", lbetave = "loge",
 \section{Warning }{
   This \pkg{VGAM} family function does not work properly!
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Kus, C., (2007).
@@ -82,10 +87,10 @@ exppoisson(llambda = "loge", lbetave = "loge",
 }
 \examples{
 \dontrun{
-lambda = exp(1); betave = exp(2)
-rdata = data.frame(y = rexppois(n = 1000, lambda, betave))
+lambda <- exp(1); betave <- exp(2)
+rdata <- data.frame(y = rexppois(n = 1000, lambda, betave))
 library(hypergeo)
-fit = vglm(y ~ 1, exppoisson, rdata, trace = TRUE)
+fit <- vglm(y ~ 1, exppoisson, rdata, trace = TRUE)
 c(with(rdata, mean(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/exppoissonUC.Rd b/man/exppoissonUC.Rd
index ccda42f..ad4feea 100644
--- a/man/exppoissonUC.Rd
+++ b/man/exppoissonUC.Rd
@@ -26,6 +26,7 @@ rexppois(n, lambda, betave = 1)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -35,6 +36,7 @@ rexppois(n, lambda, betave = 1)
   \code{qexppois} gives the quantile function, and
   \code{rexppois} generates random deviates.
 
+
 }
 \author{ J. G. Lauder, jamesglauder at gmail.com }
 \details{
@@ -42,6 +44,7 @@ rexppois(n, lambda, betave = 1)
   for estimating the parameters, 
   for the formula of the probability density function and other details.
 
+
 }
 %\note{
 %}
@@ -52,20 +55,20 @@ rexppois(n, lambda, betave = 1)
 }
 \examples{
 \dontrun{
-lambda = 2; betave = 2; nn = 201
-x = seq(-0.05, 1.05, len = nn)
+lambda <- 2; betave <- 2; nn <- 201
+x <- seq(-0.05, 1.05, len = nn)
 plot(x, dexppois(x, lambda, betave), type = "l", las = 1, ylim = c(0, 5),
      ylab = paste("[dp]exppoisson(lambda = ", lambda, ", betave = ", betave, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 lines(x, pexppois(x, lambda, betave), col = "orange")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qexppois(probs, lambda, betave)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qexppois(probs, lambda, betave)
 lines(Q, dexppois(Q, lambda, betave), col = "purple", lty = 3, type = "h")
 lines(Q, pexppois(Q, lambda, betave), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pexppois(Q, lambda, betave) - probs)) # Should be 0
+max(abs(pexppois(Q, lambda, betave) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/felix.Rd b/man/felix.Rd
index 594b822..a87ab19 100644
--- a/man/felix.Rd
+++ b/man/felix.Rd
@@ -44,6 +44,7 @@ felix(link = elogit(min = 0, max = 0.5), imethod = 1)
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{
 
@@ -64,9 +65,9 @@ Boston: Birkhauser.
 
 }
 \examples{
-fdata <- data.frame(y = 2*rpois(n = 200, 1) + 1) # Not real data!
-fit <- vglm(y ~ 1, felix, fdata, trace = TRUE, crit = "c")
-coef(fit, matrix=TRUE)
+fdata <- data.frame(y = 2 * rpois(n = 200, 1) + 1)  # Not real data!
+fit <- vglm(y ~ 1, felix, fdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
 }
diff --git a/man/felixUC.Rd b/man/felixUC.Rd
index 6c5908e..18149b9 100644
--- a/man/felixUC.Rd
+++ b/man/felixUC.Rd
@@ -27,7 +27,7 @@ dfelix(x, a = 0.25, log = FALSE)
     }
   \item{log}{
   Logical.
-  If \code{log=TRUE} then the logarithm of the density is returned.
+  If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
 
@@ -45,21 +45,24 @@ dfelix(x, a = 0.25, log = FALSE)
   for estimating the parameter,
   for the formula of the probability density function and other details.
 
+
 }
 \section{Warning }{
   The default value of \code{a} is subjective.
 
+
 }
 \seealso{
   \code{\link{felix}}.
 
+
 }
 \examples{
 \dontrun{
-a = 0.25; x = 1:15
-plot(x, dfelix(x, a), type="h", las=1, col="blue",
-     ylab=paste("dfelix(a=", a, ")"),
-     main="Felix density function")
+a <- 0.25; x <- 1:15
+plot(x, dfelix(x, a), type = "h", las = 1, col = "blue",
+     ylab = paste("dfelix(a=", a, ")"),
+     main = "Felix density function")
 }
 }
 \keyword{distribution}
diff --git a/man/fff.Rd b/man/fff.Rd
index ecb1f3d..bd2b5fd 100644
--- a/man/fff.Rd
+++ b/man/fff.Rd
@@ -73,9 +73,9 @@ fff(link = "loge", idf1 = NULL, idf2 = NULL, nsimEIM = 100,
 }
 \references{
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 
diff --git a/man/fgm.Rd b/man/fgm.Rd
index 8b9c483..b64317c 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -9,11 +9,11 @@
 
 }
 \usage{
-fgm(lapar="rhobit", iapar = NULL, imethod = 1, nsimEIM = 200)
+fgm(lapar = "rhobit", iapar = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lapar, iapar, imethod, nsimEIM}{
+  \item{lapar, iapar, imethod}{
   Details at \code{\link{CommonVGAMffArguments}}.
   See \code{\link{Links}} for more link function choices.
 
@@ -55,6 +55,13 @@ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
 Hoboken, NJ, USA: Wiley-Interscience.
 
 
+Smith, M. D. (2007)
+Invariance theorems for Fisher information.
+\emph{Communications in Statistics---Theory and Methods},
+\bold{36}(12), 2213--2222.
+
+
+
 }
 \author{ T. W. Yee }
 \note{
@@ -71,7 +78,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 \seealso{
   \code{\link{rfgm}},
-  \code{\link{frank}},
+  \code{\link{bifrankcop}},
   \code{\link{morgenstern}}.
 
 
diff --git a/man/fgmUC.Rd b/man/fgmUC.Rd
index 7a7ee66..0c2c5a1 100644
--- a/man/fgmUC.Rd
+++ b/man/fgmUC.Rd
@@ -9,6 +9,7 @@
   generation for the (one parameter) bivariate 
   Farlie-Gumbel-Morgenstern's distribution.
 
+
 }
 \usage{
 dfgm(x1, x2, alpha, log = FALSE)
@@ -49,20 +50,20 @@ rfgm(n, alpha)
 \seealso{
   \code{\link{fgm}}.
 
+
 }
 \examples{
-\dontrun{
-N = 101; x = seq(0.0, 1.0, len = N); alpha = 0.7
-ox = expand.grid(x, x)
-z = dfgm(ox[,1], ox[,2], alpha=alpha)
-contour(x, x, matrix(z, N, N), col="blue")
-z = pfgm(ox[,1], ox[,2], alpha=alpha)
-contour(x, x, matrix(z, N, N), col="blue")
+\dontrun{ N <- 101; x <- seq(0.0, 1.0, len = N); alpha <- 0.7
+ox <- expand.grid(x, x)
+zedd <- dfgm(ox[, 1], ox[, 2], alpha = alpha)
+contour(x, x, matrix(zedd, N, N), col = "blue")
+zedd <- pfgm(ox[, 1], ox[, 2], alpha = alpha)
+contour(x, x, matrix(zedd, N, N), col = "blue")
 
 plot(r <- rfgm(n = 3000, alpha = alpha), col = "blue")
 par(mfrow = c(1, 2))
-hist(r[, 1]) # Should be uniform
-hist(r[, 2]) # Should be uniform
+hist(r[, 1])  # Should be uniform
+hist(r[, 2])  # Should be uniform
 }
 }
 \keyword{distribution}
diff --git a/man/fill.Rd b/man/fill.Rd
index 05eec65..deae150 100644
--- a/man/fill.Rd
+++ b/man/fill.Rd
@@ -155,11 +155,11 @@ eyesdat = data.frame(lop = round(runif(nn), 2),
                      rop = round(runif(nn), 2),
                      age = round(rnorm(nn, 40, 10)))
 eyesdat <- transform(eyesdat,
-    mop = (lop + rop) / 2,       # Mean ocular pressure
-    op  = (lop + rop) / 2,       # Value unimportant unless plotting
-#   op  =  lop,                  # Choose this if plotting
-    eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye
-    eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye
+    mop = (lop + rop) / 2,        # Mean ocular pressure
+    op  = (lop + rop) / 2,        # Value unimportant unless plotting
+#   op  =  lop,                   # Choose this if plotting
+    eta1 = 0 - 2*lop + 0.04*age,  # Linear predictor for left eye
+    eta2 = 0 - 2*rop + 0.04*age)  # Linear predictor for right eye
 eyesdat <- transform(eyesdat,
     leye = rbinom(nn, size = 1, prob = logit(eta1, inverse = TRUE)),
     reye = rbinom(nn, size = 1, prob = logit(eta2, inverse = TRUE)))
@@ -178,7 +178,7 @@ coef(fit1, matrix = TRUE)  # Unchanged with 'xij'
 constraints(fit1)
 max(abs(predict(fit1)-predict(fit1, new = eyesdat)))  # Predicts correctly
 summary(fit1)
-\dontrun{ plotvgam(fit1, se = TRUE) # Wrong, e.g., because it plots against op, not lop.
+\dontrun{ plotvgam(fit1, se = TRUE)  # Wrong, e.g., because it plots against op, not lop.
 # So set op=lop in the above for a correct plot.
 }
 
@@ -194,16 +194,16 @@ head(model.matrix(fit2, type = "lm"))   # LM model matrix
 head(model.matrix(fit2, type = "vlm"))  # Big VLM model matrix
 coef(fit2)
 coef(fit2, matrix = TRUE)  # Unchanged with 'xij'
-max(abs(predict(fit2) - predict(fit2, new = eyesdat))) # Predicts correctly
+max(abs(predict(fit2) - predict(fit2, new = eyesdat)))  # Predicts correctly
 summary(fit2)
-\dontrun{ plotvgam(fit2, se = TRUE) # Wrong because it plots against op, not lop.
+\dontrun{ plotvgam(fit2, se = TRUE)  # Wrong because it plots against op, not lop.
 }
 
 
 # Example 3. This model uses regression splines on ocular pressure.
 # It uses a trick to ensure common basis functions.
 BS <- function(x, ...)
-  bs(c(x,...), df = 3)[1:length(x), , drop = FALSE] # trick
+  bs(c(x,...), df = 3)[1:length(x), , drop = FALSE]  # trick
 
 fit3 <- vglm(cbind(leye,reye) ~ BS(lop,rop) + age,
              family = binom2.or(exchangeable = TRUE, zero = 3),
@@ -219,9 +219,9 @@ coef(fit3)
 coef(fit3, matrix = TRUE)
 summary(fit3)
 fit3 at smart.prediction
-max(abs(predict(fit3) - predict(fit3, new = eyesdat))) # Predicts correctly
+max(abs(predict(fit3) - predict(fit3, new = eyesdat)))  # Predicts correctly
 predict(fit3, new = head(eyesdat))  # Note the 'scalar' OR, i.e., zero=3
-max(abs(head(predict(fit3)) - predict(fit3, new = head(eyesdat)))) # Should be 0
+max(abs(head(predict(fit3)) - predict(fit3, new = head(eyesdat))))  # Should be 0
 \dontrun{
 plotvgam(fit3, se = TRUE, xlab = "lop")  # Correct
 }
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index 2035bb3..880eb30 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -105,9 +105,9 @@ y <- fisherz(theta)
 abline(v = (-1):1, h = 0, lty = 2, col = "gray") }
 
 x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01))
-fisherz(x) # Has NAs
+fisherz(x)  # Has NAs
 fisherz(x, bminvalue = -1 + .Machine$double.eps,
-           bmaxvalue =  1 - .Machine$double.eps) # Has no NAs
+           bmaxvalue =  1 - .Machine$double.eps)  # Has no NAs
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 2d29ac7..0f79a02 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -11,7 +11,7 @@
 
 }
 \usage{
-fittedvlm(object, matrix.arg = TRUE, ...)
+fittedvlm(object, matrix.arg = TRUE, type.fitted = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -26,6 +26,17 @@ fittedvlm(object, matrix.arg = TRUE, ...)
 
 
   }
+  \item{type.fitted}{
+  Character.
+  Some \pkg{VGAM} family functions have a \code{type.fitted} argument.
+  If so then a different type of fitted value can be returned.
+  It is recomputed from the model after convergence.
+  Note: this is an experimental feature and not all
+  \pkg{VGAM} family functions have this implemented yet.
+
+
+
+  }
   \item{\dots}{
   Currently unused.
 
@@ -90,15 +101,26 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
 \examples{
 # Categorical regression example 1
 pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-fitted(fit)
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+fitted(fit1)
 
 # LMS quantile regression example 2
-fit <- vgam(BMI ~ s(age, df = c(4, 2)), 
-            lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
-head(predict(fit, type = "response")) # Equal to the the following two:
-head(fitted(fit))
-predict(fit, type = "response", newdata = head(bmi.nz))
+fit2 <- vgam(BMI ~ s(age, df = c(4, 2)), 
+             lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
+head(predict(fit2, type = "response"))  # Equal to the the following two:
+head(fitted(fit2))
+predict(fit2, type = "response", newdata = head(bmi.nz))
+
+# Zero-inflated example 3
+zdata <- data.frame(x2 = runif(nn <- 1000))
+zdata <- transform(zdata, pstr0.3  = logit(-0.5       , inverse = TRUE),
+                          lambda.3 =  loge(-0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda.3, pstr0 = pstr0.3))
+fit3 <- vglm(y1 ~ x2, zipoisson  (zero = NULL), data = zdata, crit = "coef")
+head(fitted(fit3, type.fitted = "mean" ))      # E(Y), which is the default
+head(fitted(fit3, type.fitted = "pobs0"))      # P(Y = 0)
+head(fitted(fit3, type.fitted = "pstr0"))      #     Prob of a structural 0
+head(fitted(fit3, type.fitted = "onempstr0"))  # 1 - prob of a structural 0
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/fnormUC.Rd b/man/fnormUC.Rd
deleted file mode 100644
index 8ebd1ed..0000000
--- a/man/fnormUC.Rd
+++ /dev/null
@@ -1,76 +0,0 @@
-\name{Fnorm}
-\alias{Fnorm}
-\alias{dfnorm}
-\alias{pfnorm}
-\alias{qfnorm}
-\alias{rfnorm}
-\title{The Folded-Normal Distribution}
-\description{
-  Density, distribution function, quantile function and random
-  generation for the (generalized) folded-normal distribution.
-
-}
-\usage{
-dfnorm(x, mean = 0, sd = 1, a1 = 1, a2 = 1)
-pfnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1)
-qfnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...)
-rfnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
-}
-\arguments{
-  \item{x, q}{vector of quantiles.}
-  \item{p}{vector of probabilities.}
-  \item{n}{number of observations.
-    Must be a positive integer of length 1.}
-  \item{mean, sd}{ see \code{\link[stats:Normal]{rnorm}}. }
-  \item{a1, a2}{ see \code{\link{fnormal1}}. }
-  \item{\ldots}{
-  Arguments that can be passed into \code{\link[stats]{uniroot}}.
-
-  }
-}
-\value{
-  \code{dfnorm} gives the density,
-  \code{pfnorm} gives the distribution function,
-  \code{qfnorm} gives the quantile function, and
-  \code{rfnorm} generates random deviates.
-}
-\author{ T. W. Yee }
-\details{
-  See \code{\link{fnormal1}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
-  for the formula of the probability density function and other details.
-
-}
-\note{
-  \code{qfnorm} runs very slowly because it calls
-  \code{\link[stats]{uniroot}} for each value of the argument \code{p}.
-  The solution is consequently not exact; the \code{...} can be used
-  to obtain a more accurate solution if necessary.
-
-}
-\seealso{
-  \code{\link{fnormal1}},
-  \code{\link[stats]{uniroot}}.
-
-}
-\examples{
-\dontrun{
-m <- 1.5; SD<-exp(0)
-x <- seq(-1, 4, len = 501)
-plot(x, dfnorm(x, m = m, sd = SD), type = "l", ylim = 0:1, las = 1,
-     ylab = paste("fnorm(m = ", m, ", sd = ", round(SD, dig = 3), ")"),
-     main = "Blue is density, red is cumulative distribution function",
-     sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
-lines(x, pfnorm(x, m = m, sd = SD), col = "red")
-abline(h = 0)
-probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qfnorm(probs, m = m, sd = SD)
-lines(Q, dfnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
-lines(Q, pfnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
-abline(h = probs, col = "purple", lty = 3)
-max(abs(pfnorm(Q, m = m, sd = SD) - probs)) # Should be 0
-}
-}
-\keyword{distribution}
-
-
diff --git a/man/foldnormUC.Rd b/man/foldnormUC.Rd
new file mode 100644
index 0000000..99659a4
--- /dev/null
+++ b/man/foldnormUC.Rd
@@ -0,0 +1,92 @@
+\name{Foldnorm}
+\alias{Foldnorm}
+\alias{dfoldnorm}
+\alias{pfoldnorm}
+\alias{qfoldnorm}
+\alias{rfoldnorm}
+\title{The Folded-Normal Distribution}
+\description{
+  Density, distribution function, quantile function and random
+  generation for the (generalized) folded-normal distribution.
+
+
+}
+\usage{
+dfoldnorm(x, mean = 0, sd = 1, a1 = 1, a2 = 1, log = FALSE)
+pfoldnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1)
+qfoldnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...)
+rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
+}
+\arguments{
+  \item{x, q}{vector of quantiles.}
+  \item{p}{vector of probabilities.}
+  \item{n}{number of observations.
+    Same as \code{\link[stats:Normal]{rnorm}}.
+
+
+  }
+  \item{mean, sd}{ see \code{\link[stats:Normal]{rnorm}}. }
+  \item{a1, a2}{ see \code{\link{foldnormal}}. }
+  \item{log}{
+  Logical.
+  If \code{TRUE} then the log density is returned.
+
+
+  }
+  \item{\ldots}{
+  Arguments that can be passed into \code{\link[stats]{uniroot}}.
+
+
+  }
+}
+\value{
+  \code{dfoldnorm} gives the density,
+  \code{pfoldnorm} gives the distribution function,
+  \code{qfoldnorm} gives the quantile function, and
+  \code{rfoldnorm} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+  See \code{\link{foldnormal}}, the \pkg{VGAM} family function
+  for estimating the parameters, 
+  for the formula of the probability density function and other details.
+
+
+}
+\note{
+  \code{qfoldnorm} runs very slowly because it calls
+  \code{\link[stats]{uniroot}} for each value of the argument \code{p}.
+  The solution is consequently not exact; the \code{...} can be used
+  to obtain a more accurate solution if necessary.
+
+
+}
+\seealso{
+  \code{\link{foldnormal}},
+  \code{\link[stats]{uniroot}}.
+
+
+}
+\examples{
+\dontrun{
+m <- 1.5; SD <- exp(0)
+x <- seq(-1, 4, len = 501)
+plot(x, dfoldnorm(x, m = m, sd = SD), type = "l", ylim = 0:1, las = 1,
+     ylab = paste("foldnorm(m = ", m, ", sd = ", round(SD, digits = 3), ")"),
+     main = "Blue is density, orange is cumulative distribution function",
+     sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
+lines(x, pfoldnorm(x, m = m, sd = SD), col = "orange")
+abline(h = 0)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qfoldnorm(probs, m = m, sd = SD)
+lines(Q, dfoldnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
+lines(Q, pfoldnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+max(abs(pfoldnorm(Q, m = m, sd = SD) - probs))  # Should be 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/fnormal1.Rd b/man/foldnormal.Rd
similarity index 84%
rename from man/fnormal1.Rd
rename to man/foldnormal.Rd
index 52d135c..a23b742 100644
--- a/man/fnormal1.Rd
+++ b/man/foldnormal.Rd
@@ -1,5 +1,5 @@
-\name{fnormal1}
-\alias{fnormal1}
+\name{foldnormal}
+\alias{foldnormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Folded Normal Distribution Family Function }
 \description{
@@ -7,8 +7,8 @@
 
 }
 \usage{
-fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
-         a1 = 1, a2 = 1, nsimEIM = 500, imethod = 1, zero = NULL)
+foldnormal(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
+           a1 = 1, a2 = 1, nsimEIM = 500, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -104,7 +104,7 @@ fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
 \author{ Thomas W. Yee }
 \note{
   The response variable for this family function is the same as
-  \code{\link{normal1}} except positive values are required.
+  \code{\link{uninormal}} except positive values are required.
   Reasonably good initial values are needed.
   Fisher scoring using simulation is implemented.
 
@@ -123,24 +123,24 @@ fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
 
 }
 \seealso{ 
-    \code{\link{rfnorm}},
-    \code{\link{normal1}},
+    \code{\link{rfoldnorm}},
+    \code{\link{uninormal}},
     \code{\link[stats:Normal]{dnorm}},
-    \code{\link{skewnormal1}}.
+    \code{\link{skewnormal}}.
 
 
 }
 
 \examples{
 \dontrun{ m <-  2; SD <- exp(1)
-y <- rfnorm(n <- 1000, m = m, sd = SD)
-hist(y, prob = TRUE, main = paste("fnormal1(m = ", m,
+fdata <- data.frame(y = rfoldnorm(n <- 1000, m = m, sd = SD))
+hist(with(fdata, y), prob = TRUE, main = paste("foldnormal(m = ", m,
      ", sd = ", round(SD, 2), ")"))
-fit <- vglm(y ~ 1, fam = fnormal1, trace = TRUE)
+fit <- vglm(y ~ 1, fam = foldnormal, fdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 (Cfit <- Coef(fit))
-mygrid <- seq(min(y), max(y), len = 200) # Add the fit to the histogram
-lines(mygrid, dfnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")
+mygrid <- with(fdata, seq(min(y), max(y), len = 200))  # Add the fit to the histogram
+lines(mygrid, dfoldnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")
 }
 }
 \keyword{models}
diff --git a/man/frank.Rd b/man/frank.Rd
index c8e6571..01fd41f 100644
--- a/man/frank.Rd
+++ b/man/frank.Rd
@@ -1,5 +1,5 @@
-\name{frank}
-\alias{frank}
+\name{bifrankcop}
+\alias{bifrankcop}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Frank's Bivariate Distribution Family Function }
 \description{
@@ -8,7 +8,7 @@
 
 }
 \usage{
-frank(lapar = "loge", iapar = 2, nsimEIM = 250)
+bifrankcop(lapar = "loge", iapar = 2, nsimEIM = 250)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -61,7 +61,7 @@ frank(lapar = "loge", iapar = 2, nsimEIM = 250)
 
 
   The default is to use Fisher scoring implemented using
-  \code{\link{rfrank}}.
+  \code{\link{rbifrankcop}}.
   For intercept-only models an alternative is to set \code{nsimEIM=NULL}
   so that a variant of Newton-Raphson is used.
 
@@ -96,16 +96,16 @@ Frank's family of bivariate distributions.
 }
 
 \seealso{
-  \code{\link{rfrank}},
+  \code{\link{rbifrankcop}},
   \code{\link{fgm}}.
 
 
 }
 \examples{
 \dontrun{
-ymat <- rfrank(n = 2000, alpha = exp(4))
+ymat <- rbifrankcop(n = 2000, alpha = exp(4))
 plot(ymat, col = "blue")
-fit <- vglm(ymat ~ 1, fam = frank, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = bifrankcop, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 vcov(fit)
diff --git a/man/frankUC.Rd b/man/frankUC.Rd
index 2f10fbc..0b3955d 100644
--- a/man/frankUC.Rd
+++ b/man/frankUC.Rd
@@ -1,8 +1,8 @@
 \name{Frank}
 \alias{Frank}
-\alias{dfrank}
-\alias{pfrank}
-\alias{rfrank}
+\alias{dbifrankcop}
+\alias{pbifrankcop}
+\alias{rbifrankcop}
 \title{Frank's Bivariate Distribution}
 \description{
   Density, distribution function, and random
@@ -10,9 +10,9 @@
 
 }
 \usage{
-dfrank(x1, x2, alpha, log = FALSE)
-pfrank(q1, q2, alpha)
-rfrank(n, alpha)
+dbifrankcop(x1, x2, alpha, log = FALSE)
+pbifrankcop(q1, q2, alpha)
+rbifrankcop(n, alpha)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
@@ -23,13 +23,16 @@ rfrank(n, alpha)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
 \value{
-  \code{dfrank} gives the density,
-  \code{pfrank} gives the distribution function, and
-  \code{rfrank} generates random deviates (a two-column matrix).
+  \code{dbifrankcop} gives the density,
+  \code{pbifrankcop} gives the distribution function, and
+  \code{rbifrankcop} generates random deviates (a two-column matrix).
+
+
 }
 \references{
 
@@ -41,32 +44,34 @@ Frank's family of bivariate distributions.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{frank}}, the \pkg{VGAM}
+  See \code{\link{bifrankcop}}, the \pkg{VGAM}
   family functions for estimating the association
   parameter by maximum likelihood estimation, for the formula of the
   cumulative distribution function and other details.
 
+
 }
 %\note{
 %}
 \seealso{
-  \code{\link{frank}}.
+  \code{\link{bifrankcop}}.
+
+
 }
 \examples{
-\dontrun{
-N <- 100; alpha <- 8
-x <- seq(-0.30, 1.30, len = N)
-ox <- expand.grid(x, x)
-z <- dfrank(ox[, 1], ox[, 2], alpha = alpha)
-contour(x, x, matrix(z, N, N))
-z <- pfrank(ox[, 1], ox[, 2], alpha = alpha)
-contour(x, x, matrix(z, N, N))
+\dontrun{N <- 100; alpha <- exp(2)
+xx <- seq(-0.30, 1.30, len = N)
+ox <- expand.grid(xx, xx)
+zedd <- dbifrankcop(ox[, 1], ox[, 2], alpha = alpha)
+contour(xx, xx, matrix(zedd, N, N))
+zedd <- pbifrankcop(ox[, 1], ox[, 2], alpha = alpha)
+contour(xx, xx, matrix(zedd, N, N))
 
 alpha <- exp(4)
-plot(r <- rfrank(n = 3000, alpha = alpha))
+plot(rr <- rbifrankcop(n = 3000, alpha = alpha))
 par(mfrow = c(1, 2))
-hist(r[, 1]) # Should be uniform
-hist(r[, 2]) # Should be uniform
+hist(rr[, 1])  # Should be uniform
+hist(rr[, 2])  # Should be uniform
 }
 }
 \keyword{distribution}
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 77f5625..0419b16 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -25,15 +25,18 @@ frechet2(location = 0, lscale = "loge", lshape = logoff(offset = -2),
   Numeric. Location parameter.
   It is called \eqn{a} below.
 
+
   }
   \item{lscale, lshape}{
   Link functions for the parameters;
   see \code{\link{Links}} for more choices.
 
+
   }
   \item{iscale, ishape, zero, nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
+
   }
 
 
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index 775bcdd..d1ba520 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -73,7 +73,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \examples{
 \dontrun{ shape <- 5
-x <- seq(-0.1, 3.5, len = 401)
+x <- seq(-0.1, 3.5, length = 401)
 plot(x, dfrechet(x, shape = shape), type = "l", ylab = "", las = 1,
      main = "Frechet density divided into 10 equal areas; orange = cdf")
 abline(h = 0, col = "blue", lty = 2)
diff --git a/man/freund61.Rd b/man/freund61.Rd
index d99d498..98932b3 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -114,12 +114,14 @@ freund61(la = "loge",  lap = "loge",  lb = "loge", lbp = "loge",
   and the initial values correspond to the MLEs of an intercept model.
   Consequently, convergence may take only one iteration.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 
 \references{
@@ -162,6 +164,7 @@ A bivariate extension of the exponential distribution.
               [alpha' * beta' - alpha * beta]/
               [alpha' * beta' * (alpha + beta)^2]. }
 
+
 }
 %\section{Warning}{
 %}
@@ -172,22 +175,23 @@ A bivariate extension of the exponential distribution.
 
 }
 \examples{
-fdata <- data.frame(y1 = rexp(nn <- 200, rate = 4))
-fdata <- transform(fdata, y2 = rexp(nn, rate = 8))
-fit <-  vglm(cbind(y1, y2) ~ 1, fam = freund61, fdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit)
-vcov(fit)
-head(fitted(fit))
-summary(fit)
+fdata <- data.frame(y1 = rexp(nn <- 1000, rate = exp(1)))
+fdata <- transform(fdata, y2 = rexp(nn, rate = exp(2)))
+fit1 <- vglm(cbind(y1, y2) ~ 1, fam = freund61, fdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+vcov(fit1)
+head(fitted(fit1))
+summary(fit1)
 
 # y1 and y2 are independent, so fit an independence model
-fit2 <- vglm(cbind(y1, y2) ~ 1, fam = freund61(indep = TRUE),
-             fdata, trace = TRUE)
+fit2 <- vglm(cbind(y1, y2) ~ 1, freund61(indep = TRUE),
+             data = fdata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 constraints(fit2)
-pchisq(2 * (logLik(fit)-logLik(fit2)),  # p-value
-       df = df.residual(fit2) - df.residual(fit), lower.tail = FALSE)
+pchisq(2 * (logLik(fit1) - logLik(fit2)),  # p-value
+       df = df.residual(fit2) - df.residual(fit1), lower.tail = FALSE)
+lrtest(fit1, fit2)  # Better alternative
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/fsqrt.Rd b/man/fsqrt.Rd
index 1c2cc79..05119ad 100644
--- a/man/fsqrt.Rd
+++ b/man/fsqrt.Rd
@@ -80,18 +80,18 @@ fsqrt(theta, min = 0, max = 1, mux = sqrt(2),
 
 }
 \examples{
-p = seq(0.01, 0.99, by = 0.01)
+p <- seq(0.01, 0.99, by = 0.01)
 fsqrt(p)
-max(abs(fsqrt(fsqrt(p), inverse = TRUE) - p)) # Should be 0
+max(abs(fsqrt(fsqrt(p), inverse = TRUE) - p))  # Should be 0
 
-p = c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-fsqrt(p) # Has NAs
+p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
+fsqrt(p)  # Has NAs
 
 \dontrun{
-p = seq(0.01, 0.99, by = 0.01)
+p <- seq(0.01, 0.99, by = 0.01)
 par(mfrow = c(2, 2), lwd = (mylwd <- 2))
-y = seq(-4, 4, length = 100)
-for(d in 0:1) {
+y <- seq(-4, 4, length = 100)
+for (d in 0:1) {
   matplot(p, cbind(logit(p, deriv = d), fsqrt(p, deriv = d)),
           type = "n", col = "purple", ylab = "transformation", las = 1,
           main = if (d == 0) "Some probability link functions"
@@ -108,7 +108,7 @@ for(d in 0:1) {
     abline(v = 0.5, lty = "dashed")
 }
 
-for(d in 0) {
+for (d in 0) {
   matplot(y, cbind(logit(y, deriv = d, inverse = TRUE),
                    fsqrt(y, deriv = d, inverse = TRUE)),
           type = "n", col = "purple", xlab = "transformation", ylab = "p",
@@ -130,7 +130,7 @@ par(lwd = 1)
 
 # This is lucky to converge
 fit.h <- vglm(agaaus ~ bs(altitude), binomialff(link = fsqrt(mux = 5)),
-             data = hunua, trace = TRUE)
+              data = hunua, trace = TRUE)
 \dontrun{
 plotvgam(fit.h, se = TRUE, lcol = "orange", scol = "orange",
          main = "Orange is Hunua, Blue is Waitakere") }
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index 48df8ff..a54ff13 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -16,6 +16,7 @@ gamma1(link = "loge", zero = NULL)
   Link function applied to the (positive) \emph{shape} parameter.
   See \code{\link{Links}} for more choices and general information.
 
+
   }
   \item{zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
@@ -48,9 +49,9 @@ gamma1(link = "loge", zero = NULL)
   the 1-parameter gamma distribution, e.g.,
 
   
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index ce7c9f3..16fce87 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -10,10 +10,10 @@
 \usage{
 gamma2(lmu = "loge", lshape = "loge",
        imethod = 1,  ishape = NULL,
-       parallel = FALSE, apply.parint = FALSE,
-       deviance.arg = FALSE, zero = -2)
+       parallel = FALSE, deviance.arg = FALSE, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
+% apply.parint = FALSE,
 \arguments{
   \item{lmu, lshape}{
   Link functions applied to the (positive) \emph{mu} and \emph{shape}
@@ -69,8 +69,10 @@ gamma2(lmu = "loge", lshape = "loge",
 
 
   }
-  \item{parallel, apply.parint}{
-    Details at \code{\link{CommonVGAMffArguments}}.
+  \item{parallel}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+  If \code{parallel = TRUE} then the constraint is not applied to the intercept.
+
 
 
   }
@@ -155,7 +157,7 @@ McCullagh, P. and Nelder, J. A. (1989)
   \code{\link{gamma1}} for the 1-parameter gamma distribution,
   \code{\link{gamma2.ab}} for another parameterization of
   the 2-parameter gamma distribution,
-  \code{\link{bivgamma.mckay}} for \emph{a} bivariate gamma distribution,
+  \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution,
   \code{\link{expexp}},
   \code{\link[stats]{GammaDist}},
   \code{\link{golf}},
@@ -171,7 +173,6 @@ fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 
-
 # Essentially a 2-parameter gamma
 gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
 fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
diff --git a/man/gamma2.ab.Rd b/man/gamma2.ab.Rd
index 22bd6fd..5829f22 100644
--- a/man/gamma2.ab.Rd
+++ b/man/gamma2.ab.Rd
@@ -16,17 +16,20 @@ gamma2.ab(lrate = "loge", lshape = "loge",
   parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{expected}{
   Logical. Use Fisher scoring? The default is yes, otherwise
   Newton-Raphson is used.
 
+
   }
   \item{irate, ishape}{
   Optional initial values for \emph{rate} and \emph{shape}.
   A \code{NULL} means a value is computed internally.
   If a failure to converge occurs, try using these arguments.
 
+
   }
   \item{zero}{
   An integer specifying which
@@ -35,6 +38,7 @@ gamma2.ab(lrate = "loge", lshape = "loge",
   The default is to model \eqn{shape} as an intercept only.
   A value \code{NULL} means neither 1 or 2.
 
+
   }
 }
 \details{
@@ -75,9 +79,9 @@ gamma2.ab(lrate = "loge", lshape = "loge",
   the 2-parameter gamma distribution, e.g.,
   
 
-  Evans, M., Hastings, N. and Peacock, B. (2000)
-  \emph{Statistical Distributions},
-  New York: Wiley-Interscience, Third edition.
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
+\emph{Statistical Distributions},
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -98,23 +102,22 @@ gamma2.ab(lrate = "loge", lshape = "loge",
   \code{\link{gamma1}} for the 1-parameter gamma distribution,
   \code{\link{gamma2}} for another parameterization of
   the 2-parameter gamma distribution,
-  \code{\link{bivgamma.mckay}} for \emph{a} bivariate gamma distribution,
+  \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution,
   \code{\link{expexp}}.
 
 
 }
 \examples{
 # Essentially a 1-parameter gamma
-gdata <- data.frame(y = rgamma(n <- 100, shape =  exp(1)))
-fit1 <- vglm(y ~ 1, gamma1, gdata, trace = TRUE)
-fit2 <- vglm(y ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "c")
+gdata <- data.frame(y1 = rgamma(n <- 100, shape =  exp(1)))
+fit1 <- vglm(y1 ~ 1, gamma1, gdata, trace = TRUE)
+fit2 <- vglm(y1 ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 
-
 # Essentially a 2-parameter gamma
-gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
-fit2 <- vglm(y ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "c")
+gdata <- data.frame(y2 = rgamma(n = 500, rate = exp(1), shape = exp(2)))
+fit2 <- vglm(y2 ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 summary(fit2)
diff --git a/man/gammahyp.Rd b/man/gammahyp.Rd
index 8c373e7..0b05bd5 100644
--- a/man/gammahyp.Rd
+++ b/man/gammahyp.Rd
@@ -5,6 +5,8 @@
 \description{
   Estimate the parameter of a gamma hyperbola bivariate distribution
   by maximum likelihood estimation.
+
+
 }
 \usage{
 gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
@@ -15,17 +17,20 @@ gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
   Link function applied to the (positive) parameter \eqn{\theta}{theta}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{itheta}{
   Initial value for the parameter.
   The default is to estimate it internally.
 
+
   }
   \item{expected}{
   Logical. \code{FALSE} means the Newton-Raphson (using
   the observed information matrix) algorithm, otherwise the expected
   information matrix is used (Fisher scoring algorithm).
 
+
   }
 }
 \details{
@@ -43,13 +48,19 @@ gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
   means, which are \eqn{\theta \exp(\theta)}{theta * exp(theta)} and
   \eqn{1 + 1/\theta}{1 + 1/theta}.
 
+
   The default algorithm is Newton-Raphson because Fisher scoring tends to
   be much slower for this distribution.
+
+
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
+
+
 }
 
 \references{
@@ -59,22 +70,26 @@ Asymptotics and the theory of inference.
 \emph{Annals of Statistics},
 \bold{31}, 1695--1731.
 
+
 }
 \author{ T. W. Yee }
 \note{
   The response must be a two column matrix.
 
+
 }
 \seealso{
   \code{\link{exponential}}.
+
+
 }
 \examples{
-gdata <- data.frame(x = runif(nn <- 1000))
-gdata <- transform(gdata, theta = exp(-2+x))
+gdata <- data.frame(x2 = runif(nn <- 1000))
+gdata <- transform(gdata, theta = exp(-2 + x2))
 gdata <- transform(gdata, y1 = rexp(nn, rate = exp(-theta)/theta),
-                         y2 = rexp(nn, rate = theta) + 1)
-fit <- vglm(cbind(y1,y2) ~ x, fam = gammahyp(expected = TRUE), gdata)
-fit <- vglm(cbind(y1,y2) ~ x, fam = gammahyp, gdata, trace = TRUE, crit = "coef")
+                          y2 = rexp(nn, rate = theta) + 1)
+fit <- vglm(cbind(y1, y2) ~ x2, fam = gammahyp(expected = TRUE), gdata)
+fit <- vglm(cbind(y1, y2) ~ x2, fam = gammahyp, gdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/garma.Rd b/man/garma.Rd
index 40e4ebc..463cfd7 100644
--- a/man/garma.Rd
+++ b/man/garma.Rd
@@ -186,15 +186,15 @@ gdata <- data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57,  41,  27, 78,
  2, 30, 18, 17,  28,  9, 28,  20,  17, 12,
 19, 18, 14, 23,  18, 22, 18,  19,  26, 27,
 23, 24, 35, 22,  29, 28, 17,  30,  34, 17,
-20, 49, 29, 35,  49, 25, 55,  42,  29, 16)) # See Zeger and Qaqish (1988)
+20, 49, 29, 35,  49, 25, 55,  42,  29, 16))  # See Zeger and Qaqish (1988)
 gdata <- transform(gdata, spikenum = seq(interspike))
-bvalue <- 0.1 # .Machine$double.xmin # Boundary value
+bvalue <- 0.1  # .Machine$double.xmin # Boundary value
 fit <- vglm(interspike ~ 1, trace = TRUE, data = gdata,
             garma(loge(bvalue = bvalue),
                  p = 2, coefstart = c(4, 0.3, 0.4)))
 summary(fit)
 coef(fit, matrix = TRUE)
-Coef(fit) # A bug here
+Coef(fit)  # A bug here
 \dontrun{ with(gdata, plot(interspike, ylim = c(0, 120), las = 1,
      xlab = "Spike Number", ylab = "Inter-Spike Time (ms)", col = "blue"))
 with(gdata, lines(spikenum[-(1:fit at misc$plag)], fitted(fit), col = "orange"))
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index 2991dfa..d8aa612 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -116,7 +116,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
 % }
 
 \seealso{
-  \code{\link{normal1}},
+  \code{\link{uninormal}},
   \code{\link{huber2}},
   \code{\link{lqnorm}},
   \code{\link{binormal}},
@@ -143,8 +143,8 @@ coef(glmfit <- glm(y2 ~ x2, data = gdata, gaussian))
 vcov(fit)
 vcov(lmfit)
 
-t(weights(fit, type = "prior"))      # Unweighted observations
-head(weights(fit, type = "working")) # Identity matrices
+t(weights(fit, type = "prior"))       # Unweighted observations
+head(weights(fit, type = "working"))  # Identity matrices
 
 # Reduced-rank VLM (rank-1)
 fit2 <- rrvglm(cbind(y1, y2, y3) ~ x2, gaussianff, data = gdata)
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index c36fb7f..68e8dae 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -102,6 +102,7 @@ Fisher information matrix for the Feller-Pareto distribution.
 
 \author{ T. W. Yee }
 \note{
+
 If the self-starting initial values fail, try experimenting
 with the initial value arguments, especially those whose
 default value is not \code{NULL}.
@@ -133,7 +134,7 @@ More improvements could be made here.
 
 \examples{
 \dontrun{
-gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1))) # A special case!
+gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1)))  # A special case!
 fit <- vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
 fit <- vglm(y ~ 1, data = gdata, trace = TRUE,
             genbetaII(ishape1.a = 4, ishape2.p = 2.2, iscale = 7, ishape3.q = 2.3))
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index 0742075..c5dee3e 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -18,11 +18,13 @@ gengamma(lscale = "loge", ld = "loge", lk = "loge",
   \eqn{b}, \eqn{d} and \eqn{k}, respectively.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iscale, id, ik}{
   Initial value for \eqn{b}, \eqn{d} and \eqn{k}, respectively.
   The defaults mean an initial value is determined internally for each.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -31,6 +33,7 @@ gengamma(lscale = "loge", ld = "loge", lk = "loge",
   The default value means none are modelled as intercept-only terms.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -66,6 +69,7 @@ Rayleigh          \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Stacy, E. W. (1962)
diff --git a/man/gengammaUC.Rd b/man/gengammaUC.Rd
index 3a3cc93..e957c19 100644
--- a/man/gengammaUC.Rd
+++ b/man/gengammaUC.Rd
@@ -11,6 +11,7 @@
   scale parameter \code{scale},
   and parameters \code{d} and \code{k}.
 
+
 }
 \usage{
 dgengamma(x, scale = 1, d = 1, k = 1, log = FALSE)
@@ -28,6 +29,7 @@ rgengamma(n, scale = 1, d = 1, k = 1)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -36,8 +38,11 @@ rgengamma(n, scale = 1, d = 1, k = 1)
   \code{pgengamma} gives the distribution function,
   \code{qgengamma} gives the quantile function, and
   \code{rgengamma} generates random deviates.
+
+
 }
 \references{
+
 Stacy, E. W. and Mihram, G. A. (1965)
 Parameter estimation for a generalized gamma distribution.
 \emph{Technometrics}, \bold{7}, 349--358.
@@ -63,15 +68,15 @@ Parameter estimation for a generalized gamma distribution.
 
 }
 \examples{
-\dontrun{ x = seq(0, 14, by = 0.01); d = 1.5; Scale = 2; k = 6
+\dontrun{ x <- seq(0, 14, by = 0.01); d <- 1.5; Scale <- 2; k <- 6
 plot(x, dgengamma(x, Scale, d, k), type = "l", col = "blue", ylim = 0:1,
-     main = "Blue is density, red is cumulative distribution function",
+     main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
 lines(qgengamma(seq(0.05,0.95,by = 0.05), Scale, d, k),
       dgengamma(qgengamma(seq(0.05,0.95,by = 0.05), Scale, d, k),
                 Scale, d, k), col = "purple", lty = 3, type = "h")
-lines(x, pgengamma(x, Scale, d, k), type = "l", col = "red")
+lines(x, pgengamma(x, Scale, d, k), type = "l", col = "orange")
 abline(h = 0, lty = 2) }
 }
 \keyword{distribution}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 0e2752c..334b949 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -4,6 +4,7 @@
 \title{ Generalized Poisson distribution }
 \description{
   Estimation of the two parameters of a generalized Poisson distribution.
+
 }
 \usage{
 genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
@@ -20,16 +21,19 @@ genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
   The \eqn{\theta} parameter is positive, therefore the default is the
   log link.
 
+
   }
   \item{ilambda, itheta}{
   Optional initial values for \eqn{\lambda} and \eqn{\theta}.
   The default is to choose values internally.
 
+
   }
   \item{use.approx}{
   Logical. If \code{TRUE} then an approximation to the expected
   information matrix is used, otherwise Newton-Raphson is used.
 
+
   }
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
@@ -37,6 +41,7 @@ genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
   If failure to converge occurs try another value
   and/or else specify a value for \code{ilambda} and/or \code{itheta}.
 
+
   }
   \item{zero}{
   An integer vector, containing the value 1 or 2.
@@ -45,6 +50,7 @@ genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
   If set to \code{NULL} then both linear/additive predictors are modelled
   as functions of the explanatory variables.
 
+
   }
 }
 \details{
@@ -114,7 +120,7 @@ New York: Marcel Dekker.
 }
 \examples{
 gdata <- data.frame(x2 = runif(nn <- 200))
-gdata <- transform(gdata, y = rpois(nn, exp(2 - x2))) # Ordinary Poisson data
+gdata <- transform(gdata, y = rpois(nn, exp(2 - x2)))  # Ordinary Poisson data
 fit <- vglm(y ~ x2, genpoisson(zero = 1), gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 summary(fit)
diff --git a/man/genrayleigh.Rd b/man/genrayleigh.Rd
index a5b72c9..df0ccd0 100644
--- a/man/genrayleigh.Rd
+++ b/man/genrayleigh.Rd
@@ -81,9 +81,9 @@ genrayleigh(lshape = "loge", lscale = "loge",
 
 }
 \examples{
-shape = exp(1); scale = exp(1);
-rdata = data.frame(y = rgenray(n = 1000, shape, scale))
-fit = vglm(y ~ 1, genrayleigh, rdata, trace = TRUE)
+shape <- exp(1); scale <- exp(1)
+rdata <- data.frame(y = rgenray(n = 1000, shape, scale))
+fit <- vglm(y ~ 1, genrayleigh, rdata, trace = TRUE)
 c(with(rdata, mean(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/genrayleighUC.Rd b/man/genrayleighUC.Rd
index 92ada5a..665f552 100644
--- a/man/genrayleighUC.Rd
+++ b/man/genrayleighUC.Rd
@@ -9,6 +9,7 @@
   Density, distribution function, quantile function and random
   generation for the generalized Rayleigh distribution.
 
+
 }
 \usage{
 dgenray(x, shape, scale = 1, log = FALSE)
@@ -27,6 +28,7 @@ rgenray(n, shape, scale = 1)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -59,20 +61,20 @@ rgenray(n, shape, scale = 1)
 }
 \examples{
 \dontrun{
-shape = 0.5; scale = 1; nn = 501
-x = seq(-0.10, 3.0, len = nn)
+shape <- 0.5; scale <- 1; nn <- 501
+x <- seq(-0.10, 3.0, len = nn)
 plot(x, dgenray(x, shape, scale), type = "l", las = 1, ylim = c(0, 1.2),
      ylab = paste("[dp]genray(shape = ", shape, ", scale = ", scale, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 lines(x, pgenray(x, shape, scale), col = "orange")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qgenray(probs, shape, scale)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qgenray(probs, shape, scale)
 lines(Q, dgenray(Q, shape, scale), col = "purple", lty = 3, type = "h")
 lines(Q, pgenray(Q, shape, scale), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pgenray(Q, shape, scale) - probs)) # Should be 0
+max(abs(pgenray(Q, shape, scale) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/geometric.Rd b/man/geometric.Rd
index 0f11db8..2c0b39f 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -85,9 +85,9 @@ truncgeometric(upper.limit = Inf,
 
 }
 \references{
-  Evans, M., Hastings, N. and Peacock, B. (2000)
-  \emph{Statistical Distributions},
-  New York: Wiley-Interscience, Third edition.
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
+\emph{Statistical Distributions},
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
diff --git a/man/gev.Rd b/man/gev.Rd
index a549d86..d34ffba 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -286,7 +286,7 @@ coef(fit, matrix = TRUE)
 Coef(fit)
 vcov(fit)
 vcov(fit, untransform = TRUE)
-sqrt(diag(vcov(fit))) # Approximate standard errors
+sqrt(diag(vcov(fit)))  # Approximate standard errors
 rlplot(fit)
 }
 }
diff --git a/man/golf.Rd b/man/golf.Rd
index 72ac5be..2a57832 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -28,9 +28,9 @@ golf(theta, lambda = 1, cutpoint = NULL,
   If \code{golf()} is used as the link function in
   \code{\link{cumulative}} then, if the cutpoints are known, then
   one should choose
-  \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
+  \code{reverse = TRUE, parallel = FALSE ~ -1}.
   If the cutpoints are unknown, then choose
-  \code{reverse = TRUE, parallel = TRUE, apply.parint = FALSE}.
+  \code{reverse = TRUE, parallel = TRUE}.
 
 
   }
@@ -110,7 +110,7 @@ golf("p", lambda = 1, tag = TRUE)
 p <- seq(0.02, 0.98, len = 201)
 y <- golf(p, lambda = 1)
 y. <- golf(p, lambda = 1, deriv = 1)
-max(abs(golf(y, lambda = 1, inv = TRUE) - p)) # Should be 0
+max(abs(golf(y, lambda = 1, inv = TRUE) - p))  # Should be 0
 
 \dontrun{par(mfrow = c(2, 1), las = 1)
 plot(p, y, type = "l", col = "blue", main = "golf()")
@@ -133,7 +133,7 @@ gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints))
 with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
 with(gdata, table(cuty) / sum(table(cuty)))
 fit <- vglm(cuty ~ x2 + x3, cumulative(mv = TRUE,
-           reverse = TRUE, parallel = TRUE, apply.parint = TRUE,
+           reverse = TRUE, parallel = FALSE ~ -1,
            link = golf(cutpoint = cutpoints[2:3], lambda = lambda)),
            data = gdata, trace = TRUE)
 head(depvar(fit))
diff --git a/man/gompertzUC.Rd b/man/gompertzUC.Rd
index 9c793b6..5d4469b 100644
--- a/man/gompertzUC.Rd
+++ b/man/gompertzUC.Rd
@@ -12,6 +12,7 @@
   random generation for
   the Gompertz distribution.
 
+
 }
 \usage{
 dgompertz(x, shape, scale = 1, log = FALSE)
@@ -27,6 +28,7 @@ rgompertz(n, shape, scale = 1)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
   \item{shape, scale}{positive shape and scale parameters. }
 
@@ -43,6 +45,7 @@ rgompertz(n, shape, scale = 1)
 \details{
   See \code{\link{gompertz}} for details.
 
+
 }
 %\note{
 %
@@ -56,11 +59,11 @@ rgompertz(n, shape, scale = 1)
 }
 \examples{
 probs <- seq(0.01, 0.99, by = 0.01)
-Shape <- exp(1); Scale <- exp(1);
+Shape <- exp(1); Scale <- exp(1)
 max(abs(pgompertz(qgompertz(p = probs, Shape, Scale),
-                  Shape, Scale) - probs)) # Should be 0
+                  Shape, Scale) - probs))  # Should be 0
 
-\dontrun{ x <- seq(-0.1, 1.0, by = 0.01);
+\dontrun{ x <- seq(-0.1, 1.0, by = 0.01)
 plot(x, dgompertz(x, Shape, Scale), type = "l", col = "blue", las = 1,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles",
@@ -70,7 +73,7 @@ lines(x, pgompertz(x, Shape, Scale), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qgompertz(probs, Shape, Scale)
 lines(Q, dgompertz(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
-pgompertz(Q, Shape, Scale) - probs # Should be all zero
+pgompertz(Q, Shape, Scale) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/gpd.Rd b/man/gpd.Rd
index d60a2eb..d5def8b 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -216,7 +216,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   \code{\link{rgpd}},
   \code{\link{meplot}},
   \code{\link{gev}},
-  \code{\link{pareto1}},
+  \code{\link{paretoff}},
   \code{\link{vglm}},
   \code{\link{vgam}},
   \code{\link{s}}.
@@ -237,11 +237,11 @@ fit at extra$threshold # Note the threshold is stored here
 
 # Check the 90 percentile
 ii <- depvar(fit) < fitted(fit)[1, "90\%"]
-100 * table(ii) / sum(table(ii)) # Should be 90%
+100 * table(ii) / sum(table(ii))  # Should be 90%
 
 # Check the 95 percentile
 ii <- depvar(fit) < fitted(fit)[1, "95\%"]
-100 * table(ii) / sum(table(ii)) # Should be 95%
+100 * table(ii) / sum(table(ii))  # Should be 95%
 
 \dontrun{ plot(depvar(fit), col = "blue", las = 1,
                main = "Fitted 90\% and 95\% quantiles")
diff --git a/man/grain.us.Rd b/man/grain.us.Rd
index 35d2202..44688f8 100644
--- a/man/grain.us.Rd
+++ b/man/grain.us.Rd
@@ -40,7 +40,7 @@ Nested reduced-rank autoregressive models for multiple time series.
 }
 \examples{
 \dontrun{
-cgrain <- scale(grain.us, scale = FALSE) # Center the time series only
+cgrain <- scale(grain.us, scale = FALSE)  # Center the time series only
 fit <- vglm(cgrain ~ 1, rrar(Rank = c(4, 1)),
             epsilon = 1e-3, stepsize = 0.5, trace = TRUE, maxit = 50)
 summary(fit)
diff --git a/man/grc.Rd b/man/grc.Rd
index 4f3010b..dd1608a 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -10,17 +10,20 @@
 }
 \usage{
 grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
-    szero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
+    str0 = 1, summary.arg = FALSE, h.step = 1e-04, ...)
 rcim(y, family = poissonff, Rank = 0, Musual = NULL,
-     weights = NULL, which.lp = 1,
-     Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
-     rprefix = "Row.", cprefix = "Col.", offset = 0,
-     szero = if (!Rank) NULL else {
-       if (Musual == 1) 1 else setdiff(1:(Musual * ncol(y)),
-                    c(1 + (1:ncol(y)) * Musual, Index.corner))
-     },
+     weights = NULL, which.linpred = 1,
+     Index.corner = ifelse(is.null(str0), 0, max(str0)) + 1:Rank,
+     rprefix = "Row.", cprefix = "Col.", iprefix = "X2.",
+     offset = 0, str0 = if (Rank) 1 else NULL,
      summary.arg = FALSE, h.step = 0.0001,
-     rbaseline = 1, cbaseline = 1, ...)
+     rbaseline = 1, cbaseline = 1, 
+     has.intercept = TRUE,
+     M = NULL,
+     rindex = 2:nrow(y),
+     cindex = 2:ncol(y),
+     iindex = 2:nrow(y),
+     ...)
 }
 %- maybe also `usage' for other objects documented here.
 \arguments{
@@ -30,6 +33,7 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
   Output from \code{table()} is acceptable; it is converted into a matrix.
   Note that \code{y} should be at least 3 by 3 in dimension.
 
+
   }
   \item{family}{
   A \pkg{VGAM} family function.
@@ -60,6 +64,7 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
   A value of 0 means no interactions (i.e., main effects only);
   each row and column is represented by an indicator variable.
 
+
   }
   \item{weights}{
   Prior weights. Fed into 
@@ -69,11 +74,11 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
 
 
   }
-  \item{which.lp}{
+  \item{which.linpred}{
   Single integer.
-  Specifies which linear predictor is modelled as the sum of
-  an intercept, row effect, column effect plus an optional interaction term.
-  It should be one value from the set \code{1:Musual}.
+  Specifies which linear predictor is modelled as the sum of an
+  intercept, row effect, column effect plus an optional interaction
+  term. It should be one value from the set \code{1:Musual}.
 
 
   }
@@ -85,19 +90,25 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
 
 
   }
-  \item{rprefix, cprefix}{ 
-  Character, for rows and columns resp.
+  \item{rprefix, cprefix, iprefix}{ 
+  Character, for rows and columns and interactions respectively.
   For labelling the indicator variables.
 
+
   }
   \item{offset}{ 
   Numeric. Either a matrix of the right dimension, else
   a single numeric expanded into such a matrix.
 
+
   }
-  \item{szero}{ 
-  An integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\},
+  \item{str0}{ 
+  Ignored if \code{Rank = 0}, else
+  an integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\},
   specifying the row that is used as the structural zero.
+  Passed into \code{\link{rrvglm.control}} if \code{Rank > 0}.
+  Set \code{str0 = NULL} for none.
+
 
   }
   \item{summary.arg}{
@@ -105,11 +116,17 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
   If \code{TRUE}, \code{y} may be the output (fitted
   object) of \code{grc()}.
 
+
   }
   \item{h.step}{
   A small positive value that is passed into
-  \code{summary.rrvglm()}. Only used when \code{summary.arg = TRUE}. }
-  \item{\dots}{ Arguments that are passed into \code{rrvglm.control()}.
+  \code{summary.rrvglm()}. Only used when \code{summary.arg = TRUE}.
+
+
+  }
+  \item{\dots}{ Arguments that are passed
+  into \code{rrvglm.control()}.
+
 
   }
   \item{Musual}{
@@ -129,6 +146,35 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
   Baseline reference levels for the rows and columns.
   Currently stored on the object but not used.
 
+
+  }
+  \item{has.intercept}{
+  Logical. Include an intercept?
+
+
+  }
+  \item{M, cindex}{
+  \eqn{M}  is the usual \pkg{VGAM} \eqn{M}, viz. the number of linear/additive
+  predictors in total.
+  Also, \code{cindex} means column index, and these point to the columns
+  of \code{y} which are part of the vector of linear/additive predictor
+  \emph{main effects}.
+
+
+  For \code{family = multinomial} it is necessary to input these arguments
+  as \code{M = ncol(y)-1} and
+  \code{cindex = 2:(ncol(y)-1)}.
+
+
+% except for the possibly the first one (due to identifiability constraints).
+
+
+  }
+  \item{rindex, iindex}{
+  \code{rindex} means row index, and these are similar to \code{cindex}.
+  \code{iindex} means interaction index, and these are similar to \code{cindex}.
+
+
   }
 }
 \details{
@@ -140,7 +186,7 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
   Indeed, \code{A} and \code{C} have \code{Rank} columns.
 By default, the first column and row of the interaction matrix
 \code{A \%*\% t(C)} is chosen 
-to be structural zeros, because \code{szero = 1}.
+to be structural zeros, because \code{str0 = 1}.
 This means the first row of \code{A} are all zeros. 
 
 
@@ -159,7 +205,7 @@ The first row and column is baseline.
 The power of \code{rcim()} is that many \pkg{VGAM} family functions
 can be assigned to its \code{family} argument.
 For example, 
-\code{\link{normal1}} fits something in between a 2-way
+\code{\link{uninormal}} fits something in between a 2-way
 ANOVA with and without interactions,
 \code{\link{alaplace2}} with \code{Rank = 0} is something like
 \code{\link[stats]{medpolish}}.
@@ -186,13 +232,14 @@ result may not have meaning.
 
 }
 \references{
+
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
 \emph{Statistical Modelling},
 \bold{3}, 15--41.
 
 
-Yee, T. W. and Hadi, A. F. (2012)
+Yee, T. W. and Hadi, A. F. (2013)
 Row-column interaction models
 \emph{In preparation}.
 
@@ -235,9 +282,12 @@ assistance from Alfian F. Hadi.
 
 
   If \code{summary = TRUE}, then \code{y} can be a
-  \code{"grc"} object, in which case a summary can be
-  returned. That is, \code{grc(y, summary = TRUE)} is
+  \code{"grc"} object, in which case a summary can be returned.
+  That is, \code{grc(y, summary = TRUE)} is
   equivalent to \code{summary(grc(y))}.
+  It is not possible to plot a
+   \code{grc(y, summary = TRUE)} or
+  \code{rcim(y, summary = TRUE)} object.
 
 
 }
@@ -248,13 +298,13 @@ assistance from Alfian F. Hadi.
   Quite a lot of expertise is needed when fitting and in its
   interpretion thereof. For example, the constraint
   matrices applies the reduced-rank regression to the first
-  (see \code{which.lp})
+  (see \code{which.linpred})
   linear predictor and the other linear predictors are intercept-only and
   have a common value throughout the entire data set.
   This means that, by default, \code{family =} \code{\link{zipoissonff}} is
   appropriate but not
   \code{family =} \code{\link{zipoisson}}.
-  Else set \code{family =} \code{\link{zipoisson}} and \code{which.lp = 2}.
+  Else set \code{family =} \code{\link{zipoisson}} and \code{which.linpred = 2}.
   To understand what is going on, do examine the constraint
   matrices of the fitted object, and reconcile this with
   Equations (4.3) to (4.5) of Yee and Hastie (2003).
@@ -280,6 +330,7 @@ assistance from Alfian F. Hadi.
   \code{\link{Rcim}},
   \code{\link{Qvar}},
   \code{\link{plotrcim0}},
+  \code{\link{multinomial}},
   \code{\link{alcoff}},
   \code{\link{crashi}},
   \code{\link{auuc}},
@@ -299,12 +350,17 @@ grc2 <- grc(auuc, Rank = 2, Index.corner = c(2, 5))
 fitted(grc2)
 summary(grc2)
 
+model3 <- rcim(auuc, Rank = 1, fam = multinomial,
+               M = ncol(auuc)-1, cindex = 2:(ncol(auuc)-1), trace = TRUE)
+fitted(model3)
+summary(model3)
+
 
 # 2012 Summer Olympic Games in London
-\dontrun{ top10 <- head(oly12, n = 10)
+\dontrun{ top10 <- head(olym12, n = 10)
 grc.oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
 round(fitted(grc.oly1))
-round(resid(grc.oly1, type = "response"), dig = 1)  # Response residuals
+round(resid(grc.oly1, type = "response"), digits = 1)  # Response residuals
 summary(grc.oly1)
 Coef(grc.oly1)
 }
@@ -312,10 +368,10 @@ Coef(grc.oly1)
 
 # Roughly median polish
 rcim0 <- rcim(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
-round(fitted(rcim0), dig = 0)
-round(100 * (fitted(rcim0) - auuc) / auuc, dig = 0)  # Discrepancy
-rcim0 at y
-round(coef(rcim0, matrix = TRUE), dig = 2)
+round(fitted(rcim0), digits = 0)
+round(100 * (fitted(rcim0) - auuc) / auuc, digits = 0)  # Discrepancy
+depvar(rcim0)
+round(coef(rcim0, matrix = TRUE), digits = 2)
 Coef(rcim0, matrix = TRUE)
 # constraints(rcim0)
 names(constraints(rcim0))
@@ -328,10 +384,10 @@ round(100 * (fitted(rcim0) - fv) / fv)  # Hopefully should be all 0s
 \keyword{models}
 \keyword{regression}
 % plot(grc.oly1)
-% oly2 <- with(top10, grc(cbind(gold,silver,bronze), Rank = 2)) # Saturated model
+% oly2 <- with(top10, grc(cbind(gold,silver,bronze), Rank = 2))  # Saturated model
 % round(fitted(oly2))
 % round(fitted(oly2)) - with(top10, cbind(gold,silver,bronze))
-% summary(oly2) # Saturated model
+% summary(oly2)  # Saturated model
 
 
 
@@ -339,6 +395,17 @@ round(100 * (fitted(rcim0) - fv) / fv)  # Hopefully should be all 0s
 % Then \code{.grc.df} is deleted before exiting the function.
 
 
-% print(Coef(rcim0, matrix = TRUE), dig = 3)
+% print(Coef(rcim0, matrix = TRUE), digits = 3)
+
+
+% Prior to 201310:
+%    str0 = if (!Rank) NULL else {
+%      if (Musual == 1) 1 else setdiff(1:(Musual * ncol(y)),
+%                   c(1 + (1:ncol(y)) * Musual, Index.corner))
+%    },
+%    str0 = if (Rank > 0) 1 else NULL,
+%    Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
+
+
 
 
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index 0a7645c..7394d5d 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -212,7 +212,7 @@ sqrt(diag(vcov(summary(fit))))   # Standard errors
 y <- as.matrix(venice[, paste("r", 1:10, sep = "")])
 fit1 <- vgam(y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
              data = venice, trace = TRUE, na.action = na.pass)
-depvar(fit1)[4:5, ] # NAs used to pad the matrix
+depvar(fit1)[4:5, ]  # NAs used to pad the matrix
 
 \dontrun{
 # Plot the component functions
diff --git a/man/gumbelIIUC.Rd b/man/gumbelIIUC.Rd
index a83cb4b..e2ad859 100644
--- a/man/gumbelIIUC.Rd
+++ b/man/gumbelIIUC.Rd
@@ -57,7 +57,7 @@ rgumbelII(n, shape, scale = 1)
 probs <- seq(0.01, 0.99, by = 0.01)
 Shape <- exp( 0.5); Scale <- exp(1);
 max(abs(pgumbelII(qgumbelII(p = probs, Shape, Scale),
-                  Shape, Scale) - probs)) # Should be 0
+                  Shape, Scale) - probs))  # Should be 0
 
 \dontrun{ x <- seq(-0.1, 10, by = 0.01);
 plot(x, dgumbelII(x, Shape, Scale), type = "l", col = "blue", las = 1,
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index 7c1ced6..ef782cb 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -102,8 +102,8 @@ rgumbel(n, location = 0, scale = 1)
 \examples{
 mu <- 1; sigma <- 2;
 y <- rgumbel(n = 100, loc = mu, scale = sigma)
-c(mean(y), mu - sigma * digamma(1)) # Sample and population means
-c(var(y), sigma^2 * pi^2 / 6) # Sample and population variances
+c(mean(y), mu - sigma * digamma(1))  # Sample and population means
+c(var(y), sigma^2 * pi^2 / 6)  # Sample and population variances
 
 \dontrun{ x <- seq(-2.5, 3.5, by = 0.01)
 loc <- 0; sigma <- 1
diff --git a/man/hatvalues.Rd b/man/hatvalues.Rd
index 13d2002..9fb272d 100644
--- a/man/hatvalues.Rd
+++ b/man/hatvalues.Rd
@@ -252,8 +252,8 @@ dfbetavlm(model, maxit.new = 1,
 # Proportional odds model, p.179, in McCullagh and Nelder (1989)
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative, data = pneumo)
-hatvalues(fit) # n x M matrix, with positive values
-all.equal(sum(hatvalues(fit)), fit at rank) # Should be TRUE
+hatvalues(fit)  # n x M matrix, with positive values
+all.equal(sum(hatvalues(fit)), fit at rank)  # Should be TRUE
 \dontrun{ par(mfrow = c(1, 2))
 hatplot(fit, ylim = c(0, 1), las = 1, col = "blue") }
 }
diff --git a/man/hormone.Rd b/man/hormone.Rd
index 6b39d06..fec4c92 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -72,7 +72,7 @@ Thus calibration might be of interest for the data.
   New York, USA: Wiley.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -80,7 +80,7 @@ Thus calibration might be of interest for the data.
 }
 
 \seealso{
-  \code{\link[VGAM]{normal1}},
+  \code{\link[VGAM]{uninormal}},
   \code{\link[VGAM]{rrvglm}}.
 
 
@@ -94,18 +94,18 @@ data(hormone)
 summary(hormone)
 
 modelI <-rrvglm(Y ~ 1 + X, data = hormone, trace = TRUE,
-                normal1(zero = NULL, lsd = "identity", imethod = 2))
+                uninormal(zero = NULL, lsd = "identity", imethod = 2))
 
 # Alternative way to fit modelI
 modelI.other <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
-                     normal1(zero = NULL, lsd = "identity"))
+                     uninormal(zero = NULL, lsd = "identity"))
 
 # Inferior to modelI
 modelII <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
-                family = normal1(zero = NULL))
+                family = uninormal(zero = NULL))
 
 logLik(modelI)
-logLik(modelII) # Less than logLik(modelI)
+logLik(modelII)  # Less than logLik(modelI)
 
 
 # Reproduce the top 3 equations on p.65 of Carroll and Ruppert (1988).
@@ -114,15 +114,15 @@ logLik(modelII) # Less than logLik(modelI)
 # Equation (1)
 hormone <- transform(hormone, rX = 1 / X)
 clist <- list("(Intercept)" = diag(2), X = diag(2), rX = rbind(0, 1))
-fit1 <- vglm(Y ~ 1 + X + rX, family = normal1(zero = NULL),
+fit1 <- vglm(Y ~ 1 + X + rX, family = uninormal(zero = NULL),
              constraints = clist, data = hormone, trace = TRUE)
 coef(fit1, matrix = TRUE)
-summary(fit1) # Actually, the intercepts do not seem significant
+summary(fit1)  # Actually, the intercepts do not seem significant
 plot(Y ~ X, hormone, col = "blue")
 lines(fitted(fit1) ~ X, hormone, col = "orange")
 
 # Equation (2)
-fit2 <- rrvglm(Y ~ 1 + X, normal1(zero = NULL), hormone, trace = TRUE)
+fit2 <- rrvglm(Y ~ 1 + X, uninormal(zero = NULL), hormone, trace = TRUE)
 coef(fit2, matrix = TRUE)
 plot(Y ~ X, hormone, col = "blue")
 lines(fitted(fit2) ~ X, hormone, col = "red")
@@ -135,9 +135,9 @@ lines(fitted(fit2) - 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
 # Equation (3)
 # Does not fit well because the loge link for the mean is not good.
 fit3 <- rrvglm(Y ~ 1 + X, maxit = 300, data = hormone, trace = TRUE,
-               normal1(lmean = "loge", zero = NULL))
+               uninormal(lmean = "loge", zero = NULL))
 coef(fit3, matrix = TRUE)
-plot(Y ~ X, hormone, col = "blue") # Does not look okay.
+plot(Y ~ X, hormone, col = "blue")  # Does not look okay.
 lines(exp(predict(fit3)[, 1]) ~ X, hormone, col = "red")
 # Add +- 2 SEs
 lines(fitted(fit3) + 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
diff --git a/man/hspider.Rd b/man/hspider.Rd
index b4f7011..bbe32b9 100644
--- a/man/hspider.Rd
+++ b/man/hspider.Rd
@@ -32,50 +32,46 @@
 \details{
 The data, which originally came from Van der Aart and Smeek-Enserink
 (1975) consists of abundances (numbers trapped over a 60 week period)
-and 6 environmental variables.  There were 28 sites.
+and 6 environmental variables. There were 28 sites.
+
 
 This data set has been often used to illustrate ordination, e.g., using
-canonical correspondence analysis (CCA).  In the example below, the
+canonical correspondence analysis (CCA). In the example below, the
 data is used for constrained quadratic ordination (CQO; formerly called
 canonical Gaussian ordination or CGO), a numerically intensive method
-that has many superior qualities.  See \code{\link{cqo}} for details.
+that has many superior qualities.
+See \code{\link{cqo}} for details.
+
 
 }
 %\source{
 %}
 \references{
+
+
 Van der Aart, P. J. M. and Smeek-Enserink, N. (1975)
 Correlations between distributions of hunting spiders
 (Lycosidae, Ctenidae) and environmental characteristics
 in a dune area.
 \emph{Netherlands Journal of Zoology},
 \bold{25}, 1--45.
+
+
 }
 \examples{
-str(hspider)
+summary(hspider)
 
 \dontrun{
-# Fit a rank-1 Poisson CQO
-set.seed(111)  # This leads to the global solution
-hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
-# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
-# vvv                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
-# vvv          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-# vvv          fam = poissonff, data = hspider, Crow1posit=FALSE)
-# vvv nos = ncol(p1 at y)
-# vvv lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos) 
-# vvv Coef(p1)
-# vvv summary(p1)
-
-
+# Standardize the environmental variables:
+hspider[, 1:6] <- scale(subset(hspider, select = WaterCon:ReflLux))
 
 # Fit a rank-1 binomial CAO
-hsbin <- hspider # Binary species data
-hsbin[,-(1:6)] <- as.numeric(hsbin[,-(1:6)] > 0)
+hsbin <- hspider  # Binary species data
+hsbin[, -(1:6)] <- as.numeric(hsbin[, -(1:6)] > 0)
 set.seed(123)
-ahsb1 <- cao(cbind(Alopcune,Arctlute,Auloalbi,Zoraspin) ~
-            WaterCon + ReflLux, family = binomialff(mv = TRUE),
-            df1.nl = 2.2, Bestof=3, data = hsbin)
+ahsb1 <- cao(cbind(Alopcune, Arctlute, Auloalbi, Zoraspin) ~
+             WaterCon + ReflLux, family = binomialff(mv = TRUE),
+             df1.nl = 2.2, Bestof = 3, data = hsbin)
 par(mfrow = 2:1, las = 1)
 lvplot(ahsb1, type = "predictors", llwd = 2, ylab = "logit p", lcol = 1:9)
 persp(ahsb1, rug = TRUE, col = 1:10, lwd = 2)
@@ -85,4 +81,15 @@ coef(ahsb1)
 \keyword{datasets}
 
 
+%# Fit a rank-1 Poisson CQO
+%set.seed(111)  # This leads to the global solution
+%# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+%# vvv                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+%# vvv          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+%# vvv          fam = poissonff, data = hspider, Crow1posit=FALSE)
+%# vvv nos = ncol(p1 at y)
+%# vvv lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos) 
+%# vvv Coef(p1)
+%# vvv summary(p1)
+
 
diff --git a/man/huber.Rd b/man/huber.Rd
index 11d3254..2a7a906 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -26,6 +26,7 @@ huber2(llocation = "identity", lscale = "loge",
   Tuning constant.
   See \code{\link{rhuber}} for more information.
 
+
   }
   \item{imethod, zero}{ 
   See \code{\link{CommonVGAMffArguments}} for information.
@@ -68,7 +69,8 @@ huber2(llocation = "identity", lscale = "loge",
 }
 
 \author{
-  T. W. Yee. Help was given by Arash Ardalan.
+  T. W. Yee.
+  Help was given by Arash Ardalan.
 
 
 }
@@ -85,7 +87,7 @@ huber2(llocation = "identity", lscale = "loge",
 }
 \seealso{
     \code{\link{rhuber}},
-    \code{\link{normal1}},
+    \code{\link{uninormal}},
     \code{\link{gaussianff}},
     \code{\link{laplace}},
     \code{\link{CommonVGAMffArguments}}.
@@ -97,7 +99,7 @@ set.seed(1231); NN <- 30; coef1 <- 1; coef2 <- 10
 hdata <- data.frame(x2 = sort(runif(NN)))
 hdata <- transform(hdata, y  = rhuber(NN, mu = coef1 + coef2 * x2))
 
-hdata$x2[1] <- 0.0 # Add an outlier
+hdata$x2[1] <- 0.0  # Add an outlier
 hdata$y[1] <- 10  
 
 fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), hdata, trace = TRUE)
@@ -111,7 +113,7 @@ summary(fit.huber2)
 plot(y ~ x2, hdata, col = "blue", las = 1)
 lines(fitted(fit.huber2) ~ x2, hdata, col = "darkgreen", lwd = 2)
 
-fit.lm <- lm(y ~ x2, hdata) # Compare to a LM:
+fit.lm <- lm(y ~ x2, hdata)  # Compare to a LM:
 lines(fitted(fit.lm) ~ x2, hdata, col = "lavender", lwd = 3)
 
 # Compare to truth:
diff --git a/man/hunua.Rd b/man/hunua.Rd
index 3824729..e875df2 100644
--- a/man/hunua.Rd
+++ b/man/hunua.Rd
@@ -56,13 +56,13 @@
 }
 \examples{
 # Fit a GAM using vgam() and compare it with the Waitakere Ranges one
-fit.h <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
+fit.h <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
 \dontrun{
-plot(fit.h, se = TRUE, lcol = "red", scol = "red",
-     main = "Red is Hunua, Blue is Waitakere") }
+plot(fit.h, se = TRUE, lcol = "orange", scol = "orange",
+     llwd = 2, slwd = 2, main = "Orange is Hunua, Blue is Waitakere") }
 head(predict(fit.h, hunua, type = "response"))
 
-fit.w <- vgam(agaaus ~ s(altitude, df = 2), binomialff, waitakere)
+fit.w <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = waitakere)
 \dontrun{
 plot(fit.w, se = TRUE, lcol = "blue", scol = "blue", add = TRUE) }
 head(predict(fit.w, hunua, type = "response"))   # Same as above? 
diff --git a/man/hyperg.Rd b/man/hyperg.Rd
index 79a6b47..a8dbce4 100644
--- a/man/hyperg.Rd
+++ b/man/hyperg.Rd
@@ -8,6 +8,7 @@
   number of white balls or the total number of white and black balls
   are unknown.
 
+
 }
 \usage{
 hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
@@ -19,23 +20,29 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
     Must be a vector with positive values, and is recycled, if necessary,
     to the same length as the response.
     One of \code{N} and \code{D} must be specified.
+
+
   }
   \item{D}{ 
     Number of white balls in the urn.
     Must be a vector with positive values, and is recycled, if necessary,
     to the same length as the response.
     One of \code{N} and \code{D} must be specified.
+
+
   }
 
   \item{lprob}{ 
   Link function for the probabilities.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iprob}{ 
   Optional initial value for the probabilities.
   The default is to choose initial values internally.
 
+
   }
 }
 
@@ -78,9 +85,9 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
 
 }
 \references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -98,31 +105,31 @@ New York: Wiley-Interscience, Third edition.
 
 }
 \seealso{
-    \code{\link[stats]{dhyper}},
-    \code{\link{binomialff}}.
+  \code{\link[stats]{dhyper}},
+  \code{\link{binomialff}}.
 
 
 }
 \section{Warning }{
-    No checking is done to ensure that certain values are within range,
-    e.g., \eqn{k \leq N}{k <= N}.
+  No checking is done to ensure that certain values are within range,
+  e.g., \eqn{k \leq N}{k <= N}.
 
 }
 
 \examples{
 nn <- 100
-m <- 5   # number of white balls in the population
-k <- rep(4, len = nn)   # sample sizes
-n <- 4   # number of black balls in the population
+m <- 5  # Number of white balls in the population
+k <- rep(4, len = nn)  # Sample sizes
+n <- 4  # Number of black balls in the population
 y  <- rhyper(nn = nn, m = m, n = n, k = k)
-yprop <- y / k  # sample proportions
+yprop <- y / k  # Sample proportions
 
 # N is unknown, D is known. Both models are equivalent:
 fit <- vglm(cbind(y,k-y) ~ 1, hyperg(D = m), trace = TRUE, crit = "c")
-fit <- vglm(yprop ~ 1, hyperg(D=m), weight = k, trace = TRUE, crit = "c")
+fit <- vglm(yprop ~ 1, hyperg(D = m), weight = k, trace = TRUE, crit = "c")
 
 # N is known, D is unknown. Both models are equivalent:
-fit <- vglm(cbind(y,k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l")
+fit <- vglm(cbind(y, k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l")
 fit <- vglm(yprop ~ 1, hyperg(N = m+n), weight = k, trace = TRUE, crit = "l")
 
 coef(fit, matrix = TRUE)
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index d063433..bd82554 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -1,15 +1,17 @@
 \name{hypersecant}
 \alias{hypersecant}
 \alias{hypersecant.1}
+\alias{nef.hs}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Hyperbolic Secant Distribution Family Function }
 \description{
   Estimation of the parameter of the hyperbolic secant
   distribution.
 
+
 }
 \usage{
-hypersecant(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
+  hypersecant(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -18,33 +20,42 @@ hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
   Parameter link function applied to the parameter \eqn{\theta}{theta}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{init.theta}{
   Optional initial value for \eqn{\theta}{theta}.
   If failure to converge occurs, try some other value.
   The default means an initial value is determined internally.
 
+
   }
 }
 \details{
   The probability density function of the hyperbolic secant distribution
   is given by
-  \deqn{f(y)=\exp(\theta y + \log(\cos(\theta ))) / (2 \cosh(\pi y/2)),}{%
-        f(y) =exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2)),}
+  \deqn{f(y;\theta) = \exp(\theta y + \log(\cos(\theta ))) / (2 \cosh(\pi y/2)),}{%
+        f(y; theta) = exp(theta*y + log(cos(theta))) / (2*cosh(pi*y/2)),}
   for parameter \eqn{-\pi/2 < \theta < \pi/2}{pi/2 < theta < pi/2}
   and all real \eqn{y}.
-  The mean of \eqn{Y} is \eqn{\tan(\theta)}{tan(theta)} (returned as
-  the fitted values).
-
-
-  Another parameterization is used for \code{hypersecant.1()}.
-  This uses
-  \deqn{f(y)=(\cos(\theta)/\pi) \times y^{-0.5+\theta/\pi} \times
-             (1-y)^{-0.5-\theta/\pi},}{%
-        f(y) =(cos(theta)/pi) * y^(-0.5+theta/pi) * (1-y)^(-0.5-theta/pi),}
+  The mean of \eqn{Y} is \eqn{\tan(\theta)}{tan(theta)}
+  (returned as the fitted values).
+  Morris (1982) calls this model NEF-HS
+  (Natural Exponential Family-Hyperbolic Secant).
+  It is used to generate NEFs, giving rise to the class of NEF-GHS
+  (G for Generalized).
+
+
+
+  Another parameterization is used for \code{hypersecant.1()}:
+  let \eqn{Y = (logit U) / \pi}{Y = (logit U) / pi}.
+  Then this uses
+  \deqn{f(u;\theta)=(\cos(\theta)/\pi) \times
+                         u^{-0.5+\theta/\pi} \times
+                     (1-u)^{-0.5-\theta/\pi},}{%
+        f(u;theta) = (cos(theta)/pi) * u^(-0.5+theta/pi) * (1-u)^(-0.5-theta/pi),}
   for parameter \eqn{-\pi/2 < \theta < \pi/2}{pi/2 < theta < pi/2}
-  and \eqn{0 < y < 1}.
-  Then the mean of \eqn{Y} is \eqn{0.5 + \theta/\pi}{0.5 + theta/pi}
+  and \eqn{0 < u < 1}.
+  Then the mean of \eqn{U} is \eqn{0.5 + \theta/\pi}{0.5 + theta/pi}
   (returned as the fitted values) and the variance is
   \eqn{(\pi^2 - 4 \theta^2) / (8\pi^2)}{(pi^2 - 4*theta^2) / (8*pi^2)}.
 
@@ -62,10 +73,18 @@ hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 
 }
 \references{
+
   Jorgensen, B. (1997)
   \emph{The Theory of Dispersion Models}.
   London: Chapman & Hall.
-%  p.101, Eqn (3.37).
+% p.101, Eqn (3.37) for hypersecant().
+% p.101, Eqn (3.38) for hypersecant.1().
+
+
+Morris, C. N. (1982)
+Natural exponential families with quadratic variance functions.
+\emph{The Annals of Statistics},
+\bold{10}(1), 65--80.
 
 
 }
@@ -75,6 +94,7 @@ hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 
 %}
 \seealso{
+% \code{\link{nefghs}},
   \code{\link{elogit}}.
 
 
diff --git a/man/hzeta.Rd b/man/hzeta.Rd
index 2934190..3aebbaf 100644
--- a/man/hzeta.Rd
+++ b/man/hzeta.Rd
@@ -3,11 +3,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Haight's Zeta Family Function }
 \description{
-  Estimating the parameter of Haight's Zeta function.
+  Estimating the parameter of Haight's zeta distribution
+
 
 }
 \usage{
-hzeta(link = "loglog", ialpha = NULL, nsimEIM=100)
+hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -17,16 +18,19 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM=100)
   Here, a log-log link keeps the parameter greater than one, meaning
   the mean is finite.
 
+
   }
   \item{ialpha}{
   Optional initial value for the (positive) parameter. 
   The default is to obtain an initial value internally. Use this argument
   if the default fails.
 
+
   }
   \item{nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -44,12 +48,14 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM=100)
     The mean is infinite if \eqn{\alpha \leq 1}{alpha <= 1}, and
     the variance is infinite if \eqn{\alpha \leq 2}{alpha <= 2}.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{ 
 
@@ -64,17 +70,19 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM=100)
 %\note{ 
 %}
 
+
 \seealso{
-    \code{\link{Hzeta}},
-    \code{\link{zeta}},
-    \code{\link{zetaff}},
-    \code{\link{loglog}}.
+  \code{\link{Hzeta}},
+  \code{\link{zeta}},
+  \code{\link{zetaff}},
+  \code{\link{loglog}}.
+
 
 }
 \examples{
 alpha <- exp(exp(-0.1))  # The parameter
 hdata <- data.frame(y = rhzeta(n = 1000, alpha))
-fit <- vglm(y ~ 1, hzeta, hdata, trace = TRUE, crit = "c")
+fit <- vglm(y ~ 1, hzeta, hdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)  # Useful for intercept-only models; should be same as alpha
 c(with(hdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index 357cae5..7568dcf 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -4,10 +4,11 @@
 \alias{phzeta}
 \alias{qhzeta}
 \alias{rhzeta}
-\title{ Haight's Zeta Function  }
+\title{ Haight's Zeta Distribution  }
 \description{
   Density, distribution function, quantile function and random generation
-  for Haight's Zeta function distribution with parameter \code{alpha}.
+  for Haight's zeta distribution with parameter \code{alpha}.
+
 
 }
 \usage{
@@ -22,18 +23,25 @@ rhzeta(n, alpha)
    Vector of quantiles. For the density, it should be a vector with
    positive integer values in order for the probabilities to be positive.
 
+
   }
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. A single positive integer.}
+  \item{n}{number of observations.
+  Same as \code{\link[stats]{runif}}.
+
+
+  }
   \item{alpha}{ 
    The parameter value. Must contain positive values and is recycled to
    the length of \code{x} or \code{p} or \code{q} if necessary.
 
+
   }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 }
 \details{
@@ -42,6 +50,7 @@ rhzeta(n, alpha)
          f(x) = (2x-1)^(-alpha) - (2x+1)^(-alpha),}
    where \eqn{\alpha>0}{alpha>0} and \eqn{x=1,2,\ldots}{x=1,2,...}.
 
+
 }
 \value{
   \code{dhzeta} gives the density,
@@ -49,6 +58,7 @@ rhzeta(n, alpha)
   \code{qhzeta} gives the quantile function, and
   \code{rhzeta} generates random deviates.
 
+
 }
 \references{ 
 
@@ -58,17 +68,21 @@ rhzeta(n, alpha)
     3rd edition,
     Hoboken, New Jersey: Wiley.
 
+
 }
 \author{ T. W. Yee }
 \note{ 
-   Given some response data, the \pkg{VGAM} family function
-   \code{\link{hzeta}} estimates the parameter \code{alpha}.
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{hzeta}} estimates the parameter \code{alpha}.
+
+
 }
 
 \seealso{
-    \code{\link{hzeta}},
-    \code{\link{zeta}},
-    \code{\link{zetaff}}.
+  \code{\link{hzeta}},
+  \code{\link{zeta}},
+  \code{\link{zetaff}}.
+
 
 }
 \examples{
@@ -78,12 +92,11 @@ rhzeta(20, 2.1)
 round(1000 * dhzeta(1:8, 2))
 table(rhzeta(1000, 2))
 
-\dontrun{
-alpha <- 1.1; x <- 1:10
+\dontrun{ alpha <- 1.1; x <- 1:10
 plot(x, dhzeta(x, alpha = alpha), type = "h", ylim = 0:1, lwd = 2,
      sub = paste("alpha =", alpha), las = 1, col = "blue", ylab = "Probability",
-     main = "Haight's zeta: blue = density; red = distribution function")
-lines(x+0.1, phzeta(x, alpha = alpha), col = "red", lty = 3, lwd = 2, type = "h")
+     main = "Haight's zeta: blue = density; orange = distribution function")
+lines(x+0.1, phzeta(x, alpha = alpha), col = "orange", lty = 3, lwd = 2, type = "h")
 }
 }
 \keyword{distribution}
diff --git a/man/iam.Rd b/man/iam.Rd
index ec45b2c..d3a0a31 100644
--- a/man/iam.Rd
+++ b/man/iam.Rd
@@ -105,8 +105,8 @@ iam(j, k, M, both = FALSE, diag = TRUE)
 
 }
 \examples{
-iam(1, 2, M = 3) # The 4th column represents element (1,2) of a 3x3 matrix
-iam(NULL, NULL, M = 3, both = TRUE) # Return the row and column indices
+iam(1, 2, M = 3)  # The 4th column represents element (1,2) of a 3x3 matrix
+iam(NULL, NULL, M = 3, both = TRUE)  # Return the row and column indices
 
 dirichlet()@weight
 
@@ -114,12 +114,12 @@ M <- 4
 temp1 <- iam(NA, NA, M = M, both = TRUE)
 mat1 <- matrix(NA, M, M)
 mat1[cbind(temp1$row, temp1$col)] = 1:length(temp1$row)
-mat1 # More commonly used
+mat1  # More commonly used
 
 temp2 <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
 mat2 <- matrix(NA, M, M)
 mat2[cbind(temp2$row, temp2$col)] = 1:length(temp2$row)
-mat2 # Rarely used
+mat2  # Rarely used
 }
 \keyword{manip}
 \keyword{programming}
diff --git a/man/identity.Rd b/man/identity.Rd
index eb3287d..45c5001 100644
--- a/man/identity.Rd
+++ b/man/identity.Rd
@@ -1,6 +1,6 @@
 \name{identity}
 \alias{identity}
-\alias{nidentity}
+\alias{negidentity}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Identity Link Function }
 \description{
@@ -9,8 +9,8 @@
 
 }
 \usage{
- identity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
-nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+   identity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -37,7 +37,7 @@ nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
 
 
-  The function \code{nidentity} is the negative-identity link function and
+  The function \code{negidentity} is the negative-identity link function and
   corresponds to \eqn{g(\theta)=-\theta}{g(theta)=-theta}.
   This is useful for some models, e.g., in the literature supporting the
   \code{\link{egev}} function it seems that half of the authors use
@@ -57,7 +57,7 @@ nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
 
-  For \code{nidentity()}: the results are similar to \code{identity()}
+  For \code{negidentity()}: the results are similar to \code{identity()}
   except for a sign change in most cases.
 
 
@@ -75,7 +75,7 @@ nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
   \code{\link{loge}},
   \code{\link{logit}},
   \code{\link{probit}},
-  \code{\link{powl}}.
+  \code{\link{powerlink}}.
 
 
 }
@@ -83,9 +83,9 @@ nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 identity((-5):5) 
 identity((-5):5, deriv = 1)
 identity((-5):5, deriv = 2)
-nidentity((-5):5) 
-nidentity((-5):5, deriv = 1)
-nidentity((-5):5, deriv = 2)
+negidentity((-5):5) 
+negidentity((-5):5, deriv = 1)
+negidentity((-5):5, deriv = 2)
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index cd63fdc..612a090 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -6,28 +6,34 @@
   Estimates the two parameters of the inverse Gaussian distribution
   by maximum likelihood estimation.
 
+
 }
 \usage{
 inv.gaussianff(lmu = "loge", llambda = "loge",
                imethod = 1, ilambda = NULL,
-               parallel = FALSE, apply.parint = FALSE,
-               shrinkage.init = 0.99, zero = NULL)
+               parallel = FALSE, shrinkage.init = 0.99, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
+%apply.parint = FALSE,
 \arguments{
   \item{lmu, llambda}{
   Parameter link functions for the \eqn{\mu}{mu} and
   \eqn{\lambda}{lambda} parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
-  \item{ilambda, parallel, apply.parint}{ 
+  \item{ilambda, parallel}{ 
   See \code{\link{CommonVGAMffArguments}} for more information.
+  If \code{parallel = TRUE} then the constraint is not applied to the
+  intercept.
+
 
   }
   \item{imethod, shrinkage.init, zero}{ 
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -62,14 +68,15 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
 
 }
 \references{ 
+
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
 2nd edition, Volume 1, New York: Wiley.
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -85,7 +92,7 @@ New York: Wiley-Interscience, Third edition.
 
 \seealso{ 
   \code{\link{Inv.gaussian}},
-  \code{\link{wald}},
+  \code{\link{waldff}},
   \code{\link{bisa}}.
 
 
diff --git a/man/invbinomial.Rd b/man/invbinomial.Rd
index 2431f80..73d30cd 100644
--- a/man/invbinomial.Rd
+++ b/man/invbinomial.Rd
@@ -17,15 +17,18 @@ invbinomial(lrho = elogit(min = 0.5, max = 1),
   Link function for the \eqn{\rho}{rho} and \eqn{\lambda}{lambda} parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{irho, ilambda}{
   Numeric.
   Optional initial values for \eqn{\rho}{rho} and \eqn{\lambda}{lambda}.
 
+
   }
   \item{zero}{
   See \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -108,10 +111,10 @@ with(idata, c(mean(y), head(fitted(fit), 1)))
 summary(fit)
 coef(fit, matrix = TRUE)
 Coef(fit)
-sum(weights(fit)) # sum of the prior weights
-sum(weights(fit, type = "work")) # sum of the working weights
+sum(weights(fit))  # Sum of the prior weights
+sum(weights(fit, type = "work"))  # Sum of the working weights
 }
 \keyword{models}
 \keyword{regression}
 
-%fit <- vglm(y ~ 1, invbinomial(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
+%fit <- vglm(y ~ 1, invbinomial(ilambda = 1), trace = TRUE, crit = "c", checkwz = FALSE)
diff --git a/man/is.parallel.Rd b/man/is.parallel.Rd
index 1362279..33ccb1c 100644
--- a/man/is.parallel.Rd
+++ b/man/is.parallel.Rd
@@ -64,7 +64,7 @@ is.parallel.vglm(object, type = c("term", "lm"), \dots)
 fit <- vglm(educ ~ bs(age) * sex + ethnic,
             cumulative(parallel = TRUE), xs.nz[1:200, ])
 is.parallel(fit)
-is.parallel(fit, type = "lm") # For each column of the LM matrix
+is.parallel(fit, type = "lm")  # For each column of the LM matrix
 }
 }
 
diff --git a/man/kendall.tau.Rd b/man/kendall.tau.Rd
new file mode 100644
index 0000000..309162b
--- /dev/null
+++ b/man/kendall.tau.Rd
@@ -0,0 +1,119 @@
+\name{kendall.tau}
+\alias{kendall.tau}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+Kendall's Tau Statistic
+
+}
+\description{
+  Computes Kendall's Tau, which is a rank-based correlation measure,
+  between two vectors.
+
+
+}
+\usage{
+kendall.tau(x, y, exact = FALSE, max.n = 3000)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, y}{
+    Numeric vectors. Must be of equal length.
+    Ideally their values are continuous and not too discrete.
+    Let \code{length(x)} be \eqn{N}, say.
+
+
+}
+  \item{exact}{
+    Logical. If \code{TRUE} then the exact value is computed.
+    
+
+}
+\item{max.n}{
+  Numeric. If \code{exact = FALSE} and \code{length(x)}
+  is more than \code{max.n} then a random sample
+  of \code{max.n} pairs are chosen.
+
+
+}
+}
+\details{
+  Kendall's tau is a measure of dependency in a bivariate distribution.
+  Loosely, two random variables are \emph{concordant} if large values
+  of one random variable are associated with large values of the
+  other random variable.
+  Similarly, two random variables are \emph{disconcordant} if large values 
+  of one random variable are associated with small values of the 
+  other random variable.
+  More formally, if \code{(x[i] - x[j])*(y[i] - y[j]) > 0} then
+  that comparison is concordant \eqn{(i \neq j)}.
+  And if \code{(x[i] - x[j])*(y[i] - y[j]) < 0} then
+  that comparison is disconcordant \eqn{(i \neq j)}.
+  Out of \code{choose(N, 2}) comparisons,
+  let \eqn{c} and \eqn{d} be the
+  number of concordant and disconcordant pairs.
+  Then Kendall's tau can be estimated by \eqn{(c-d)/(c+d)}.
+  If there are ties then half the ties are deemed concordant and
+  half disconcordant so that \eqn{(c-d)/(c+d+t)} is used.
+
+  
+}
+\value{
+  Kendall's tau, which lies between \eqn{-1} and \eqn{1}.
+
+
+
+}
+%\references{
+
+
+%}
+%\author{
+%  T. W. Yee.
+
+
+%}
+%\note{
+%This function has not been tested thoroughly.
+
+
+%}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\section{Warning}{
+  If \code{length(x)} is large then
+  the cost is \eqn{O(N^2)}, which is expensive!
+  Under these circumstances
+  it is not advisable to set \code{exact = TRUE} or \code{max.n} to a very
+  large number.
+
+
+
+}
+\seealso{
+  \code{\link{binormalcop}},
+  \code{\link[stats]{cor}}.
+
+
+}
+\examples{
+N <- 5000; x <- 1:N; y <- runif(N)
+true.rho <- -0.8
+ymat <- rbinorm(N, cov12 =  true.rho)  # Bivariate normal, aka N_2
+x <- ymat[, 1]
+y <- ymat[, 2]
+
+\dontrun{plot(x, y, col = "blue")}
+
+kendall.tau(x, y)  # A random sample is taken here
+kendall.tau(x, y)  # A random sample is taken here
+
+kendall.tau(x, y, exact = TRUE)  # Costly if length(x) is large
+kendall.tau(x, y, max.n = N)     # Same as exact = TRUE
+
+(rhohat <- sin(kendall.tau(x, y) * pi / 2))  # This formula holds for N_2 actually
+true.rho  # rhohat should be near this value
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
diff --git a/man/koenker.Rd b/man/koenker.Rd
index 5fe87f8..28e320e 100644
--- a/man/koenker.Rd
+++ b/man/koenker.Rd
@@ -6,6 +6,7 @@
   Estimates the location and scale parameters of Koenker's
   distribution by maximum likelihood estimation.
 
+
 }
 \usage{
 koenker(percentile = 50, llocation = "identity", lscale = "loge",
@@ -18,15 +19,18 @@ koenker(percentile = 50, llocation = "identity", lscale = "loge",
   which are the quantiles and expectiles.
   They will be returned as `fitted values'.
 
+
   }
   \item{llocation, lscale}{
   See \code{\link{Links}} for more choices,
   and \code{\link{CommonVGAMffArguments}}.
 
+
   }
   \item{ilocation, iscale, imethod, zero}{ 
   See \code{\link{CommonVGAMffArguments}} for details.
 
+
   }
 }
 \details{
@@ -35,6 +39,7 @@ koenker(percentile = 50, llocation = "identity", lscale = "loge",
   here. Its canonical form has mean and mode at 0 and has a heavy
   tail (in fact, its variance is infinite).
 
+
   The standard (``canonical'') form of Koenker's 
   distribution can be endowed with a location and scale parameter.
   The standard form has a density
@@ -71,6 +76,7 @@ koenker(percentile = 50, llocation = "identity", lscale = "loge",
   \code{\link{rrvglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{ 
 
@@ -79,6 +85,7 @@ When are expectiles percentiles? (solution)
 \emph{Econometric Theory},
 \bold{9}, 526--527.
 
+
 }
 \author{ T. W. Yee }
 %\note{
@@ -95,10 +102,10 @@ When are expectiles percentiles? (solution)
 set.seed(123); nn <- 1000
 kdata <- data.frame(x2 = sort(runif(nn)))
 kdata <- transform(kdata, mylocat = 1 + 3 * x2,
-                        myscale = 1)
+                          myscale = 1)
 kdata <- transform(kdata, y = rkoenker(nn, loc = mylocat, scale = myscale))
 fit  <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), kdata, trace = TRUE)
-fit2 <- vglm(y ~ x2, studentt2(df = 2), kdata, trace = TRUE) # 'same' as fit
+fit2 <- vglm(y ~ x2, studentt2(df = 2), kdata, trace = TRUE)  # 'same' as fit
 
 coef(fit, matrix = TRUE)
 head(fitted(fit))
@@ -112,7 +119,7 @@ matplot(with(kdata, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
 legend("bottomright", lty = 1:3, lwd = 3, legend = colnames(fitted(fit)),
        col = 1:3) }
 
-fit at extra$percentile # Sample quantiles
+fit at extra$percentile  # Sample quantiles
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/koenkerUC.Rd b/man/koenkerUC.Rd
index d8744e0..ad34e11 100644
--- a/man/koenkerUC.Rd
+++ b/man/koenkerUC.Rd
@@ -10,6 +10,7 @@
   quantile/expectile function and random generation for the
   Koenker distribution.
 
+
 }
 \usage{
 dkoenker(x, location = 0, scale = 1, log = FALSE)
@@ -22,16 +23,22 @@ rkoenker(n, location = 0, scale = 1)
   \item{x, q}{
   Vector of expectiles/quantiles.
   See the terminology note below.
+
+
   }
   \item{p}{
   Vector of probabilities. % (tau or \eqn{\tau}).
   These should lie in \eqn{(0,1)}.
+
+
   }
   \item{n, log}{See \code{\link[stats:Uniform]{runif}}.}
   \item{location, scale}{
   Location and scale parameters.
   The latter should have positive values.
   Values of these vectors are recyled.
+
+
   }
 }
 \details{
@@ -41,6 +48,7 @@ rkoenker(n, location = 0, scale = 1)
   Further details about this distribution are given in
   \code{\link{koenker}}.
 
+
 }
 \value{
   \code{dkoenker(x)} gives the density function.
@@ -48,6 +56,7 @@ rkoenker(n, location = 0, scale = 1)
   \code{qkoenker(p)} gives the expectile and quantile function.
   \code{rkoenker(n)} gives \eqn{n} random variates.
 
+
 }
 \author{ T. W. Yee }
 
@@ -58,12 +67,13 @@ rkoenker(n, location = 0, scale = 1)
   \code{\link[stats:TDist]{dt}},
   \code{\link{koenker}}.
 
+
 }
 
 \examples{
 my_p <- 0.25; y <- rkoenker(nn <- 5000)
-(myexp = qkoenker(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+(myexp <- qkoenker(my_p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
 # Equivalently:
 I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
 I2 <- mean(y >  myexp) * mean(-myexp + y[y >  myexp])
@@ -76,15 +86,15 @@ I2 <- sum(-myexp + y[y >  myexp])
 myloc <- 1; myscale <- 2
 yy <- rkoenker(nn, myloc, myscale)
 (myexp <- qkoenker(my_p, myloc, myscale))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
-pkoenker(mean(yy), myloc, myscale) #  Should be 0.5
-abs(qkoenker(0.5, myloc, myscale) - mean(yy)) #  Should be 0
-abs(pkoenker(myexp, myloc, myscale) - my_p) #  Should be 0
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+pkoenker(mean(yy), myloc, myscale)  # Should be 0.5
+abs(qkoenker(0.5, myloc, myscale) - mean(yy))  # Should be 0
+abs(pkoenker(myexp, myloc, myscale) - my_p)  # Should be 0
 integrate(f = dkoenker, lower = -Inf, upper = Inf,
-          locat = myloc, scale = myscale) # Should be 1
+          locat = myloc, scale = myscale)  # Should be 1
 
 y <- seq(-7, 7, len = 201)
-max(abs(dkoenker(y) - dt(y / sqrt(2), df = 2) / sqrt(2))) # Should be 0
+max(abs(dkoenker(y) - dt(y / sqrt(2), df = 2) / sqrt(2)))  # Should be 0
 \dontrun{ plot(y, dkoenker(y), type = "l", col = "blue", las = 1,
      ylim = c(0, 0.4), main = "Blue = Koenker; orange = N(0, 1)")
 lines(y, dnorm(y), type = "l", col = "orange")
diff --git a/man/kumar.Rd b/man/kumar.Rd
index 3e89a20..17b5e43 100644
--- a/man/kumar.Rd
+++ b/man/kumar.Rd
@@ -101,7 +101,7 @@ kumar(lshape1 = "loge", lshape2 = "loge",
 
 }
 \examples{
-shape1 <- exp(1); shape2 <- exp(2);
+shape1 <- exp(1); shape2 <- exp(2)
 kdata <- data.frame(y = rkumar(n = 1000, shape1, shape2))
 fit <- vglm(y ~ 1, kumar, kdata, trace = TRUE)
 c(with(kdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index 568bedc..1fe19c3 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -67,7 +67,7 @@ Q <- qkumar(probs, shape1, shape2)
 lines(Q, dkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h")
 lines(Q, pkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pkumar(Q, shape1, shape2) - probs)) # Should be 0
+max(abs(pkumar(Q, shape1, shape2) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/lambertW.Rd b/man/lambertW.Rd
index 9443a65..0a4cf22 100644
--- a/man/lambertW.Rd
+++ b/man/lambertW.Rd
@@ -46,6 +46,8 @@ Maximum number of iterations of third-order Halley's method.
 
 }
 \references{
+
+
 Corless, R. M. and Gonnet, G. H. and
 Hare, D. E. G. and Jeffrey, D. J. and Knuth, D. E. (1996)
 On the Lambert \eqn{W} function.
diff --git a/man/laplace.Rd b/man/laplace.Rd
index 04a8dde..231f658 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -18,6 +18,7 @@ laplace(llocation = "identity", lscale = "loge",
   scale parameter \eqn{b}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{ilocation, iscale}{
   Optional initial values.
@@ -116,8 +117,8 @@ Coef(fit)
 with(ldata, median(y))
 
 ldata <- data.frame(x = runif(nn <- 1001))
-ldata <- transform(ldata, y = rlaplace(nn, loc = 2, scale = exp(-1+1*x)))
-coef(vglm(y ~ x, laplace(iloc = .2, imethod = 2, zero = 1), ldata,
+ldata <- transform(ldata, y = rlaplace(nn, loc = 2, scale = exp(-1 + 1*x)))
+coef(vglm(y ~ x, laplace(iloc = 0.2, imethod = 2, zero = 1), ldata,
           trace = TRUE), matrix = TRUE)
 }
 \keyword{models}
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index 0fbb855..c99d76c 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -21,18 +21,26 @@ rlaplace(n, location = 0, scale = 1)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. Positive integer of length 1.}
+  \item{n}{number of observations.
+  Same as in \code{\link[stats]{runif}}.
+
+  }
   \item{location}{
     the location parameter \eqn{a}, which is the mean.
+
+
   }
   \item{scale}{
   the scale parameter \eqn{b}.
   Must consist of positive values.
+
+
   }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -49,23 +57,30 @@ rlaplace(n, location = 0, scale = 1)
   \eqn{b>0}.
   The mean is \eqn{a}{a} and the variance is \eqn{2b^2}. 
 
+
   See \code{\link{laplace}}, the \pkg{VGAM} family function
   for estimating the two parameters by maximum likelihood estimation,
   for formulae and details.
   Apart from \code{n}, all the above arguments may be vectors and
   are recyled to the appropriate length if necessary.
 
+
 }
 \value{
   \code{dlaplace} gives the density,
   \code{plaplace} gives the distribution function,
   \code{qlaplace} gives the quantile function, and
   \code{rlaplace} generates random deviates.
+
+
 }
 \references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
+
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
+
+
 }
 \author{ T. W. Yee }
 %\note{
@@ -75,6 +90,8 @@ New York: Wiley-Interscience, Third edition.
 
 \seealso{
   \code{\link{laplace}}.
+
+
 }
 \examples{
 loc <- 1; b <- 2
@@ -84,18 +101,18 @@ loc      # population mean
 var(y)   # sample variance
 2 * b^2  # population variance
 
-\dontrun{ loc = 0; b = 1.5; x = seq(-5, 5, by = 0.01)
+\dontrun{ loc <- 0; b <- 1.5; x <- seq(-5, 5, by = 0.01)
 plot(x, dlaplace(x, loc, b), type = "l", col = "blue", ylim = c(0,1),
-     main = "Blue is density, red is cumulative distribution function",
+     main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
 lines(qlaplace(seq(0.05,0.95,by = 0.05), loc, b),
-      dlaplace(qlaplace(seq(0.05,0.95,by = 0.05), loc, b), loc, b),
+      dlaplace(qlaplace(seq(0.05, 0.95, by = 0.05), loc, b), loc, b),
       col = "purple", lty = 3, type = "h")
-lines(x, plaplace(x, loc, b), type = "l", col = "red")
+lines(x, plaplace(x, loc, b), type = "l", col = "orange")
 abline(h = 0, lty = 2) }
 
-plaplace(qlaplace(seq(0.05,0.95,by = 0.05), loc, b), loc, b)
+plaplace(qlaplace(seq(0.05, 0.95, by = 0.05), loc, b), loc, b)
 }
 \keyword{distribution}
 
diff --git a/man/latvar.Rd b/man/latvar.Rd
index cdec921..441b254 100644
--- a/man/latvar.Rd
+++ b/man/latvar.Rd
@@ -1,4 +1,4 @@
-\name{lv}
+\name{latvar}
 \alias{lv}
 \alias{latvar}
 %- Also NEED an '\alias' for EACH other topic documented here.
@@ -9,8 +9,8 @@
 
 }
 \usage{
-lv(object, ...)
 latvar(object, ...)
+    lv(object, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -67,10 +67,9 @@ Constrained additive ordination.
 }
 \author{ Thomas W. Yee }
 
-\note{
-  \code{\link{latvar}} and \code{\link{lv}} are identical and will remain
-  available for a short while.
-  But soon \code{\link{lv}} will be withdrawn.
+\section{Warning}{
+  \code{\link{latvar}} and \code{\link{lv}} are identical,
+  but the latter will be deprecated soon.
 
 
   Latent variables are not really applicable to
@@ -91,15 +90,15 @@ Constrained additive ordination.
 
 \examples{
 \dontrun{
-hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars
+hspider[, 1:6] <- scale(hspider[, 1:6])  # Standardized environmental vars
 set.seed(123)
 p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           family = poissonff, data = hspider, Rank = 1, df1.nl =
           c(Zoraspin = 2.5, 3), Bestof = 3, Crow1positive = TRUE)
 
-var(latvar(p1)) # Scaled to unit variance  # Scaled to unit variance
-c(latvar(p1))   # Estimated site scores
+var(latvar(p1))  # Scaled to unit variance  # Scaled to unit variance
+c(latvar(p1))    # Estimated site scores
 }
 }
 \keyword{models}
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index 570ced0..d4adbbb 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -6,6 +6,7 @@
   Estimates the two parameters of a (transformed) Leipnik distribution
   by maximum likelihood estimation.
 
+
 }
 \usage{
 leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
@@ -16,11 +17,13 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
   Link function for the \eqn{\mu}{mu} and \eqn{\lambda}{lambda} parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu, ilambda}{
   Numeric. Optional initial values for \eqn{\mu}{mu} and
   \eqn{\lambda}{lambda}.
 
+
   }
 }
 \details{
@@ -33,15 +36,17 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
   (y(1-y))^(-1/2) * (1 + (y-mu)^2 / (y*(1-y)))^(-lambda/2) / 
   Beta((lambda+1)/2, 1/2)}
   where \eqn{0 < y < 1} and \eqn{\lambda > -1}{lambda > -1}.
-  The mean is \eqn{\mu}{mu} (returned as the fitted values) and the variance is
-  \eqn{1/\lambda}{1/lambda}.
+  The mean is \eqn{\mu}{mu} (returned as the fitted values)
+  and the variance is \eqn{1/\lambda}{1/lambda}.
+
 
   Jorgensen (1997) calls the above the \bold{transformed} Leipnik
-  distribution, and if \eqn{y = (x+1)/2} and \eqn{\mu = (\theta+1)/2}{mu
-  = (theta+1)/2}, then the distribution of \eqn{X} as a function of
+  distribution, and if \eqn{y = (x+1)/2}
+  and \eqn{\mu = (\theta+1)/2}{mu = (theta+1)/2},
+  then the distribution of \eqn{X} as a function of
   \eqn{x} and \eqn{\theta}{theta} is known as the the (untransformed)
   Leipnik distribution.  Here, both \eqn{x} and \eqn{\theta}{theta}
-  are in \eqn{(-1,1)}.
+  are in \eqn{(-1, 1)}.
 
 
 }
@@ -83,14 +88,14 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
 \section{Warning }{
   If \code{llambda="identity"} then it is possible that the
   \code{lambda} estimate becomes less than \eqn{-1}, i.e., out of
-  bounds. One way to stop this is to choose \code{llambda="loge"},
+  bounds. One way to stop this is to choose \code{llambda = "loge"},
   however, \code{lambda} is then constrained to be positive.
 
 
 }
 
 \seealso{ 
-    \code{\link{mccullagh89}}.
+  \code{\link{mccullagh89}}.
 
 
 }
@@ -105,10 +110,12 @@ summary(fit)
 coef(fit, matrix = TRUE)
 Coef(fit)
 
-sum(weights(fit)) # sum of the prior weights
-sum(weights(fit, type = "w")) # sum of the working weights
+sum(weights(fit))  # Sum of the prior weights
+sum(weights(fit, type = "work"))  # Sum of the working weights
 }
 \keyword{models}
 \keyword{regression}
 
-%fit <- vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
+%fit <- vglm(y ~ 1, leipnik(ilambda = 1), tr = TRUE, cri = "c", checkwz = FALSE)
+
+
diff --git a/man/lerch.Rd b/man/lerch.Rd
index 9bfb4ab..fbf5509 100644
--- a/man/lerch.Rd
+++ b/man/lerch.Rd
@@ -107,7 +107,7 @@ plot(x, lerch(x, s = s, v = v), type = "l", col = "blue", las = 1,
 abline(v = 0, h = 1, lty = "dashed", col = "gray")
 
 s <- rnorm(n = 100)
-max(abs(zeta(s) - lerch(x = 1, s = s, v = 1))) # This fails (a bug); should be 0
+max(abs(zeta(s) - lerch(x = 1, s = s, v = 1)))  # This fails (a bug); should be 0
 }
 }
 \keyword{math}
diff --git a/man/levy.Rd b/man/levy.Rd
index 18a453c..bf2884a 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -6,6 +6,7 @@
 Estimates the two parameters of the Levy distribution
 by maximum likelihood estimation.
 
+
 }
 \usage{
 levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
@@ -43,11 +44,10 @@ levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
   whose density function has a tractable form. 
   The formula for the density is
  \deqn{f(y;\gamma,\delta) = \sqrt{\frac{\gamma}{2\pi}}
-       \exp \left( \frac{-\gamma}{2(y - \delta)} \right) /
-       (y - \delta)^{3/2} }{%
+       \exp \left( \frac{-\gamma}{2(y - \delta)}
+            \right) / (y - \delta)^{3/2} }{%
   f(y;gamma,delta) = sqrt(gamma / (2 pi))
-       exp( -gamma / (2(y - delta))) /
-       (y - \delta)^{3/2} }
+       exp( -gamma / (2(y - delta))) / (y - \delta)^{3/2} }
   where \eqn{\delta<y<\infty}{delta<y<Inf} and \eqn{\gamma>0}{gamma>0}.
   The mean does not exist.
 
@@ -85,20 +85,20 @@ levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
 }
 \examples{
 nn <- 1000; delta <- 0
-mygamma <- 1 # log link ==> 0 is the answer
-ldata <- data.frame(y = delta + mygamma/rnorm(nn)^2) # Levy(mygamma, delta)
+mygamma <- 1  # log link ==> 0 is the answer
+ldata <- data.frame(y = delta + mygamma/rnorm(nn)^2)  # Levy(mygamma, delta)
 
 # Cf. Table 1.1 of Nolan for Levy(1,0)
-with(ldata, sum(y > 1) / length(y)) # Should be 0.6827
-with(ldata, sum(y > 2) / length(y)) # Should be 0.5205
+with(ldata, sum(y > 1) / length(y))  # Should be 0.6827
+with(ldata, sum(y > 2) / length(y))  # Should be 0.5205
 
-fit <- vglm(y ~ 1, levy(delta = delta), ldata, trace = TRUE) # 1 parameter
+fit <- vglm(y ~ 1, levy(delta = delta), ldata, trace = TRUE)  # 1 parameter
 fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma),
-           ldata, trace = TRUE) # 2 parameters
+           ldata, trace = TRUE)  # 2 parameters
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
-head(weights(fit, type = "w"))
+head(weights(fit, type = "work"))
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
index 99466a9..a5721b7 100644
--- a/man/lgammaUC.Rd
+++ b/man/lgammaUC.Rd
@@ -31,6 +31,7 @@ rlgamma(n, location = 0, scale = 1, k = 1)
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -40,8 +41,10 @@ rlgamma(n, location = 0, scale = 1, k = 1)
   \code{qlgamma} gives the quantile function, and
   \code{rlgamma} generates random deviates.
 
+
 }
 \references{
+
 Kotz, S. and Nadarajah, S. (2000)
 \emph{Extreme Value Distributions: Theory and Applications},
 pages 48--49,
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 1cabedd..2cb4ba0 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -21,12 +21,14 @@ lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
   and the positive scale parameter \eqn{b}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{link, lshape}{
   Parameter link function applied to 
   the positive shape parameter \eqn{k}. 
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{init.k, ishape}{
   Initial value for \eqn{k}.
@@ -34,10 +36,12 @@ lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
   If failure to converge occurs, try some other value.
   The default means an initial value is determined internally.
 
+
   }
   \item{ilocation, iscale}{ Initial value for \eqn{a} and \eqn{b}.
   The defaults mean an initial value is determined internally for each.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -46,6 +50,7 @@ lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
   The default value means none are modelled as intercept-only terms.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -75,8 +80,10 @@ lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 \references{
+
 Kotz, S. and Nadarajah, S. (2000)
 \emph{Extreme Value Distributions: Theory and Applications},
 pages 48--49,
@@ -93,10 +100,10 @@ New York: Wiley.
 
 \author{ T. W. Yee }
 \note{ 
-  The standard log-gamma distribution can be viewed as a generalization
-  of the standard type 1 extreme value density: when \eqn{k = 1}
-  the distribution of \eqn{-Y} is the standard type 1 extreme value
-  distribution.
+  The standard log-gamma distribution can be viewed as a
+  generalization of the standard type 1 extreme value density:
+  when \eqn{k = 1} the distribution of \eqn{-Y} is the standard
+  type 1 extreme value distribution.
 
 
   The standard log-gamma distribution is fitted with \code{lgammaff}
@@ -112,6 +119,7 @@ New York: Wiley.
   \code{\link{gamma1}},
   \code{\link[base:Special]{lgamma}}.
 
+
 }
 \examples{
 ldata <- data.frame(y = rlgamma(100, k = exp(1)))
@@ -120,7 +128,7 @@ summary(fit)
 coef(fit, matrix = TRUE)
 Coef(fit)
 
-ldata <- data.frame(x = runif(nn <- 5000))     # Another example
+ldata <- data.frame(x = runif(nn <- 5000))  # Another example
 ldata <- transform(ldata, loc = -1 + 2 * x, Scale = exp(1))
 ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, k = exp(0)))
 fit2 <- vglm(y ~ x, lgamma3ff(zero = 2:3), ldata, trace = TRUE, crit = "c")
diff --git a/man/lindUC.Rd b/man/lindUC.Rd
index ec46d0c..2819bc8 100644
--- a/man/lindUC.Rd
+++ b/man/lindUC.Rd
@@ -16,19 +16,26 @@
 \usage{
 dlind(x, theta, log = FALSE)
 plind(q, theta)
-%qlind(p, theta)
 rlind(n, theta)
 }
+%qlind(p, theta)
 \arguments{
   \item{x, q}{vector of quantiles.}
 %  \item{p}{vector of probabilities.}
-  \item{n}{number of observations. }
+  \item{n}{number of observations. 
+
+
+  }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
+  }
+  \item{theta}{positive parameter.
+
+
   }
-  \item{theta}{positive parameter. }
 
 }
 \value{
@@ -43,6 +50,7 @@ rlind(n, theta)
 \details{
   See \code{\link{lindley}} for details.
 
+
 }
 %\note{
 %
@@ -50,6 +58,7 @@ rlind(n, theta)
 \seealso{
   \code{\link{lindley}}.
 
+
 }
 \examples{
 theta <- exp(-1); x <- seq(0.0, 17, length = 700)
@@ -63,7 +72,7 @@ abline(h = 1, col = "grey", lty = "dashed") }
 
 
 % probs <- seq(0.01, 0.99, by = 0.01)
-% max(abs(plind(qlind(p = probs, theta), theta) - probs)) # Should be 0
+% max(abs(plind(qlind(p = probs, theta), theta) - probs))  # Should be 0
 
 
 
diff --git a/man/lindley.Rd b/man/lindley.Rd
index 8b7696d..eb9c839 100644
--- a/man/lindley.Rd
+++ b/man/lindley.Rd
@@ -6,6 +6,7 @@
   Estimates the (1-parameter) Lindley distribution
   by maximum likelihood estimation.
 
+
 }
 \usage{
 lindley(link = "loge", itheta = NULL, zero = NULL)
@@ -17,16 +18,20 @@ lindley(link = "loge", itheta = NULL, zero = NULL)
   Link function applied to the (positive) parameter.
   See \code{\link{Links}} for more choices.
 
+
   }
 
 % \item{earg}{
 % List. Extra argument for the link.
 % See \code{earg} in \code{\link{Links}} for general information.
+
+
 % }
 
   \item{itheta, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
+
   }
 }
 \details{
diff --git a/man/lino.Rd b/man/lino.Rd
index 5f6bf30..9e83b34 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -114,7 +114,7 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
 }
 
 \examples{
-ldata1 <- data.frame(y = rbeta(n = 1000, exp(0.5), exp(1))) # ~ standard beta
+ldata1 <- data.frame(y = rbeta(n = 1000, exp(0.5), exp(1)))  # ~ standard beta
 fit <- vglm(y ~ 1, lino, ldata1, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
index 3ae7215..b4d2e43 100644
--- a/man/linoUC.Rd
+++ b/man/linoUC.Rd
@@ -83,7 +83,7 @@ probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qlino(probs, shape1 = shape1, shape2 = shape2, lambda = lambda)
 lines(Q, dlino(Q, shape1 = shape1, shape2 = shape2, lambda = lambda),
       col = "purple", lty = 3, type = "h")
-plino(Q, shape1 = shape1, shape2 = shape2, l = lambda) - probs # Should be all 0
+plino(Q, shape1 = shape1, shape2 = shape2, l = lambda) - probs  # Should be all 0
 }
 }
 \keyword{distribution}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 5a6c690..80abc2a 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -100,7 +100,7 @@ fit0 <- vglm(BMI ~ bs(age, df = 4), lms.bcg, bmi.nz, trace = TRUE)
 coef(fit0, matrix = TRUE)
 \dontrun{
 par(mfrow = c(1, 1))
-plotvgam(fit0, se = TRUE) # Plot mu function (only)
+plotvgam(fit0, se = TRUE)  # Plot mu function (only)
 }
 
 # Use a trick: fit0 is used for initial values for fit1.
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 16a55f9..eb7f50e 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -242,9 +242,9 @@ contains further information and examples.
 \dontrun{ require(VGAMdata)
 mysubset <- subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
 mysubset <- transform(mysubset, BMI = weight / height^2)
-BMIdata <- mysubset[, c("age", "BMI")]
-BMIdata <- na.omit(BMIdata)
-BMIdata <- subset(BMIdata, BMI < 80 & age < 65) # Delete an outlier
+BMIdata <- na.omit(mysubset)
+BMIdata <- subset(BMIdata, BMI < 80 & age < 65,
+                   select = c(age, BMI))  # Delete an outlier
 summary(BMIdata)
 
 fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata)
@@ -252,12 +252,12 @@ fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata)
 head(predict(fit))
 head(fitted(fit))
 head(BMIdata)
-head(cdf(fit)) # Person 56 is probably overweight, given his age
-100 * colMeans(c(depvar(fit)) < fitted(fit)) # Empirical proportions
+head(cdf(fit))  # Person 56 is probably overweight, given his age
+100 * colMeans(c(depvar(fit)) < fitted(fit))  # Empirical proportions
 
 # Convergence problems? Try this trick: fit0 is a simpler model used for fit1
-fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), BMIdata)
-fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata, 
+fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1, 3)), data = BMIdata)
+fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = BMIdata,
             etastart = predict(fit0))
 }
 
@@ -281,3 +281,6 @@ aa at post$deplot  # Contains density function values
 \keyword{models}
 \keyword{regression}
 
+% BMIdata <- subset(mysubset, select = c(age, BMI))
+% BMIdata <- mysubset[, c("age", "BMI")]
+
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index 40ea48c..e252992 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -173,7 +173,7 @@ par(mfrow = c(1, 1), lwd = 2)
     main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
 aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
 aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE)
-with(aa at post, deplot) # Contains density function values; == a at post$deplot
+with(aa at post, deplot)  # Contains density function values; == a at post$deplot
 }
 }
 \keyword{models}
diff --git a/man/log1pexp.Rd b/man/log1pexp.Rd
new file mode 100644
index 0000000..e588800
--- /dev/null
+++ b/man/log1pexp.Rd
@@ -0,0 +1,66 @@
+\name{log1pexp}
+\alias{log1pexp}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+Logarithms with an Unit Offset and Exponential Term
+
+
+}
+\description{
+Computes \code{log(1 + exp(x))} accurately.
+
+}
+\usage{
+log1pexp(x)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x}{
+A vector of reals (numeric).
+Complex numbers not allowed since \code{\link{log1p}} does
+not handle these.
+
+
+}
+}
+\details{
+  Computes \code{log(1 + exp(x))} accurately.
+  An adjustment is made when \code{x} is positive and large in value.
+
+
+}
+\value{
+  Returns \code{log(1 + exp(x))}.
+
+
+}
+%\references{
+%
+%}
+%\author{
+%T. W. Yee
+%
+%}
+%\note{
+%
+%}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+  \code{\link[base:log]{log1p}},
+  \code{\link[base:log]{exp}}.
+
+
+}
+\examples{
+x <-  c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf)
+log1pexp(x)
+log(1 + exp(x))  # Naive; suffers from overflow
+x <- -c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf)
+log1pexp(x)
+log(1 + exp(x))  # Naive; suffers from inaccuracy
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
diff --git a/man/logF.Rd b/man/logF.Rd
new file mode 100644
index 0000000..5573bf7
--- /dev/null
+++ b/man/logF.Rd
@@ -0,0 +1,110 @@
+\name{logF}
+\alias{logF}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Natural Exponential Family Generalized Hyperbolic Secant Distribution
+        Family Function
+
+}
+\description{
+   Maximum likelihood estimation of
+   the 1-parameter log F distribution.
+
+
+}
+\usage{
+ logF(lshape1 = "loge", lshape2 = "loge",
+      ishape1 = NULL, ishape2 = 1, imethod = 1) 
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lshape1, lshape2}{
+% Character.
+  Parameter link functions for
+  the shape parameters.
+  Called \eqn{\alpha}{alpha} and \eqn{\beta}{beta} respectively.
+  See \code{\link{Links}} for more choices.
+
+
+  }
+  \item{ishape1, ishape2}{
+  Optional initial values for the shape parameters.
+  If given, it must be numeric and values are recycled to the
+  appropriate length.
+  The default is to choose the value internally.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+  }
+  \item{imethod}{
+  Initialization method.
+  Either the value 1, 2, or \ldots.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+  }
+}
+\details{
+  The density for this distribution is
+ \deqn{f(y; \alpha, \beta) = \exp(\alpha y) / [B(\alpha,\beta)
+                             (1 + e^y)^{\alpha + \beta}] }{%
+       f(y;  alpha,  beta) =  exp(\alpha y) / [B(\alpha,\beta) *
+                             (1 + exp(y))^(\alpha + \beta)] }
+  where \eqn{y} is real,
+  \eqn{\alpha > 0},
+  \eqn{\beta > 0},
+  \eqn{B(., .)} is the beta function \code{\link[base:Special]{beta}}.
+
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}}
+  and \code{\link{vgam}}.
+
+
+}
+\references{
+
+  Jones, M. C. (2008).
+  On a class of distributions with simple exponential tails.
+  \emph{Statistica Sinica},
+  \bold{18}(3), 1101--1110.
+% Section 3.2.
+
+
+}
+\author{ Thomas W. Yee }
+%\section{Warning}{
+%
+%}
+
+%\note{ 
+%}
+
+\seealso{
+  \code{\link{dlogF}},
+  \code{\link{logff}}.
+
+
+}
+
+\examples{
+nn <- 1000
+ldata <- data.frame(y1 = rnorm(nn, m = +1, sd = exp(2)),  # Not proper data
+                    x2 = rnorm(nn, m = -1, sd = exp(2)),
+                    y2 = rnorm(nn, m = -1, sd = exp(2)))  # Not proper data
+fit1 <- vglm(y1 ~ 1 , logF, data = ldata, trace = TRUE)
+fit2 <- vglm(y2 ~ x2, logF, data = ldata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+summary(fit2)
+vcov(fit2)
+
+head(fitted(fit1))
+with(ldata, mean(y1))
+max(abs(head(fitted(fit1)) - with(ldata, mean(y1))))
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/logF.UC.Rd b/man/logF.UC.Rd
new file mode 100644
index 0000000..f07745f
--- /dev/null
+++ b/man/logF.UC.Rd
@@ -0,0 +1,75 @@
+\name{dlogF}
+\alias{dlogF}
+% \alias{qnefghs}
+\title{ log F Distribution }
+\description{
+  Density, 
+  for the log F distribution.
+
+% quantile function
+
+
+}
+\usage{
+dlogF(x, shape1, shape2, log = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x}{
+   Vector of quantiles.
+
+
+  }
+  \item{shape1, shape2}{Positive shape parameters.
+
+
+  }
+% \item{p}{vector of probabilities.}
+% \item{n}{number of observations. A single positive integer.}
+
+  \item{log}{
+  if \code{TRUE} then the log density is returned,
+  else the density.
+
+
+  }
+
+}
+\details{
+   The details are given in \code{\link{logF}}.
+
+
+}
+\value{
+  \code{dlogF} gives the density.
+% \code{pnefghs} gives the distribution function, and
+% \code{qnefghs} gives the quantile function, and
+% \code{rnefghs} generates random deviates.
+
+
+}
+
+%\references{ 
+%
+%
+%
+%}
+
+\author{ T. W. Yee }
+%\note{ 
+%
+%}
+
+\seealso{
+  \code{\link{hypersecant}}.
+
+
+}
+\examples{
+\dontrun{ shape1 <- 1.5; shape2 <- 0.5; x <- seq(-5, 8, length = 1001)
+plot(x, dlogF(x, shape1, shape2), type = "l",
+     las = 1, col = "blue", ylab = "pdf",
+     main = "log F density function")
+}
+}
+\keyword{distribution}
diff --git a/man/logUC.Rd b/man/logUC.Rd
index 251a6f9..6fbb2f5 100644
--- a/man/logUC.Rd
+++ b/man/logUC.Rd
@@ -12,6 +12,7 @@
 
 % quantile function
 
+
 }
 \usage{
 dlog(x, prob, log = FALSE)
@@ -23,6 +24,8 @@ rlog(n, prob, Smallno = 1.0e-6)
   \item{x, q}{
    Vector of quantiles. For the density, it should be a vector with
    positive integer values in order for the probabilities to be positive.
+
+
   }
 % \item{p}{vector of probabilities.}
   \item{n}{number of observations. A single positive integer.}
@@ -30,21 +33,28 @@ rlog(n, prob, Smallno = 1.0e-6)
    The parameter value \eqn{c} described in in \code{\link{logff}}.
    Here it is called \code{prob} because \eqn{0<c<1} is the range.
    For \code{rlog()} this parameter must be of length 1.
+
+
   }
   \item{log, log.p}{
   Logical.
   If \code{log.p = TRUE} then all probabilities \code{p} are
   given as \code{log(p)}.
+
+
   }
   \item{Smallno}{
   Numeric, a small value used by the rejection method for determining
   the upper limit of the distribution.
   That is, \code{plog(U, prob) > 1-Smallno} where \code{U} is the upper limit.
+
+
   }
 }
 \details{
    The details are given in \code{\link{logff}}.
 
+
 }
 \value{
   \code{dlog} gives the density,
@@ -55,15 +65,16 @@ rlog(n, prob, Smallno = 1.0e-6)
 }
 \references{ 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
+
 
 }
 \author{ T. W. Yee }
 \note{ 
-   Given some response data, the \pkg{VGAM} family function
-   \code{\link{logff}} estimates the parameter \code{prob}.
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{logff}} estimates the parameter \code{prob}.
   For \code{plog()}, if argument \code{q} contains large values
   and/or \code{q} is long in length
   then the memory requirements may be very high.
@@ -85,7 +96,7 @@ rlog(20, 0.5)
 \dontrun{ prob <- 0.8; x <- 1:10
 plot(x, dlog(x, prob = prob), type = "h", ylim = 0:1,
      sub = "prob=0.8", las = 1, col = "blue", ylab = "Probability",
-     main="Logarithmic distribution: blue=density; red=distribution function")
-lines(x + 0.1, plog(x, prob = prob), col = "red", lty = 3, type = "h") }
+     main = "Logarithmic distribution: blue=density; orange=distribution function")
+lines(x + 0.1, plog(x, prob = prob), col = "orange", lty = 3, type = "h") }
 }
 \keyword{distribution}
diff --git a/man/logc.Rd b/man/logc.Rd
index 767fcf5..7822998 100644
--- a/man/logc.Rd
+++ b/man/logc.Rd
@@ -85,9 +85,9 @@ logc(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 }
 \examples{
 \dontrun{
-logc(seq(-0.2, 1.1, by = 0.1)) # Has NAs
+logc(seq(-0.2, 1.1, by = 0.1))  # Has NAs
 }
-logc(seq(-0.2, 1.1, by = 0.1), bvalue = 1 - .Machine$double.eps) # Has no NAs
+logc(seq(-0.2, 1.1, by = 0.1), bvalue = 1 - .Machine$double.eps)  # Has no NAs
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/loge.Rd b/man/loge.Rd
index e14ca76..5394939 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -1,18 +1,22 @@
 \name{loge}
 \alias{loge}
-\alias{nloge}
+\alias{negloge}
+\alias{logneg}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Log link function }
+\title{ Log link function, and variants }
 \description{
   Computes the log transformation, including its inverse and the first
   two derivatives.
 
+
 }
 \usage{
 loge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
      short = TRUE, tag = FALSE)
-nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
-      short = TRUE, tag = FALSE)
+negloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+        short = TRUE, tag = FALSE)
+logneg(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+       short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -45,10 +49,16 @@ nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 
   The function \code{loge} computes
-  \eqn{\log(\theta)}{log(theta)} whereas \code{nloge} computes
+  \eqn{\log(\theta)}{log(theta)} whereas \code{negloge} computes
   \eqn{-\log(\theta)=\log(1/\theta)}{-log(theta)=log(1/theta)}.
 
 
+  The function \code{logneg} computes
+  \eqn{\log(-\theta)}{log(-theta)}, hence is suitable for parameters
+  that are negative, e.g.,
+  a trap-shy effect in \code{\link{posbernoulli.b}}.
+
+
 }
 \value{
   The following concerns \code{loge}.
@@ -89,15 +99,17 @@ nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
     \code{\link{loglog}},
     \code{\link[base:Log]{log}},
     \code{\link{logoff}},
-    \code{\link{lambertW}}.
+    \code{\link{lambertW}},
+    \code{\link{posbernoulli.b}}.
 
 
 }
 \examples{
 \dontrun{ loge(seq(-0.2, 0.5, by = 0.1))
  loge(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin)
-nloge(seq(-0.2, 0.5, by = 0.1))
-nloge(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin) }
+negloge(seq(-0.2, 0.5, by = 0.1))
+negloge(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin) }
+logneg(seq(-0.5, -0.2, by = 0.1))
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/logff.Rd b/man/logff.Rd
index ebd862c..d621858 100644
--- a/man/logff.Rd
+++ b/man/logff.Rd
@@ -16,16 +16,19 @@ logff(link = "logit", init.c = NULL, zero = NULL)
   which lies between 0 and 1.
   See \code{\link{Links}} for more choices and information.
 
+
   }
   \item{init.c}{
   Optional initial value for the \eqn{c} parameter.
   If given, it often pays to start with a larger value, e.g., 0.95.
   The default is to choose an initial value internally.
 
+
   }
   \item{zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -57,9 +60,9 @@ Chapter 7 of
     Hoboken, New Jersey: Wiley.
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -86,7 +89,7 @@ New York: Wiley-Interscience, Third edition.
   \code{\link[base:Log]{log}},
   \code{\link{loge}},
   \code{\link{logoff}},
-  \code{\link{explogarithmic}}.
+  \code{\link{explogff}}.
 
 
 }
@@ -110,7 +113,7 @@ fit <- vglm(nindiv ~ 1, logff, data = corbet, weights = ofreq)
 coef(fit, matrix = TRUE)
 chat <- Coef(fit)["c"]
 pdf2 <- dlog(x = with(corbet, nindiv), prob = chat)
-print(with(corbet, cbind(nindiv, ofreq, fitted = pdf2 * sum(ofreq))), dig = 1)
+print(with(corbet, cbind(nindiv, ofreq, fitted = pdf2 * sum(ofreq))), digits = 1)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index 43dba2a..511d605 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -8,6 +8,7 @@
   Estimates the location and scale parameters of the logistic
   distribution by maximum likelihood estimation.
 
+
 }
 \usage{
 logistic1(llocation = "identity", scale.arg = 1, imethod = 1)
@@ -22,18 +23,22 @@ logistic2(llocation = "identity", lscale = "loge",
   See \code{\link{Links}} for more choices, and
   \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{scale.arg}{
   Known positive scale parameter (called \eqn{s} below).
 
+
   }
   \item{ilocation, iscale}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -77,14 +82,15 @@ logistic2(llocation = "identity", lscale = "loge",
 
 }
 \references{ 
+
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
 2nd edition, Volume 1, New York: Wiley.  Chapter 15.
 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
@@ -104,6 +110,7 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
 \note{
   Fisher scoring is used, and the Fisher information matrix is diagonal.
 
+
 }
 %\section{Warning }{
 %}
@@ -114,16 +121,17 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
   \code{\link{cumulative}},
   \code{\link{bilogistic4}}.
 
+
 }
 \examples{
 # Location unknown, scale known
 ldata <- data.frame(x2 = runif(nn <- 500))
-ldata <- transform(ldata, y1 = rlogis(nn, loc = 1+5*x2, scale = exp(2)))
+ldata <- transform(ldata, y1 = rlogis(nn, loc = 1 + 5*x2, scale = exp(2)))
 fit1 <- vglm(y1 ~ x2, logistic1(scale = 4), ldata, trace = TRUE, crit = "c")
 coef(fit1, matrix = TRUE)
 
 # Both location and scale unknown
-ldata <- transform(ldata, y2 = rlogis(nn, loc = 1+5*x2, scale = exp(0+1*x2)))
+ldata <- transform(ldata, y2 = rlogis(nn, loc = 1 + 5*x2, scale = exp(0 + 1*x2)))
 fit2 <- vglm(cbind(y1, y2) ~ x2, logistic2, ldata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 vcov(fit2)
diff --git a/man/logit.Rd b/man/logit.Rd
index 4dcb6f4..cf82e94 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -129,21 +129,21 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 \examples{
 p <- seq(0.01, 0.99, by = 0.01)
 logit(p)
-max(abs(logit(logit(p), inverse = TRUE) - p)) # Should be 0
+max(abs(logit(logit(p), inverse = TRUE) - p))  # Should be 0
 
 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-logit(p) # Has NAs
-logit(p, bvalue = .Machine$double.eps) # Has no NAs
+logit(p)  # Has NAs
+logit(p, bvalue = .Machine$double.eps)  # Has no NAs
 
 p <- seq(0.9, 2.2, by = 0.1)
 elogit(p, min = 1, max = 2,
           bminvalue = 1 + .Machine$double.eps,
-          bmaxvalue = 2 - .Machine$double.eps) # Has no NAs
+          bmaxvalue = 2 - .Machine$double.eps)  # Has no NAs
 
 \dontrun{ par(mfrow = c(2,2), lwd = (mylwd <- 2))
 y <- seq(-4, 4, length = 100)
 p <- seq(0.01, 0.99, by = 0.01)
-for(d in 0:1) {
+for (d in 0:1) {
   matplot(p, cbind(logit(p, deriv = d), probit(p, deriv = d)),
           type = "n", col = "purple", ylab = "transformation", las = 1,
           main = if (d ==  0) "Some probability link functions"
@@ -160,7 +160,7 @@ for(d in 0:1) {
     abline(v = 0.5, lty = "dashed")
 }
 
-for(d in 0) {
+for (d in 0) {
   matplot(y, cbind(logit(y, deriv = d, inverse = TRUE),
                    probit(y, deriv = d, inverse = TRUE)), las = 1,
           type = "n", col = "purple", xlab = "transformation", ylab = "p",
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index 6d99040..d209b7f 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -204,7 +204,7 @@ mymu <- function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
 alldat <- transform(alldat, y = rnbinom(n, mu = mymu(x2), size = my.k))
 mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
 fitp <- vglm(y ~ bs(x2, df = mydof), data=alldat, trace = TRUE,
-            loglaplace1(tau = mytau, parallelLoc = TRUE)) # halfstepping is usual
+            loglaplace1(tau = mytau, parallelLoc = TRUE))  # halfstepping is usual
  
 \dontrun{
 par(las = 1)  # Plot on a log1p() scale
@@ -212,10 +212,10 @@ mylwd <- 1.5
 with(alldat, plot(x2, jitter(log1p(y), factor = 1.5), col = "red", pch = "o",
      main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75))
 with(alldat, matlines(x2, log1p(fitted(fitp)), col = "blue", lty = 1, lwd = mylwd))
-finexgrid <- seq(0, 1, len=201)
-for(ii in 1:length(mytau))
-    lines(finexgrid, col = "darkgreen", lwd = mylwd,
-          log1p(qnbinom(p = mytau[ii], mu = mymu(finexgrid), si = my.k)))
+finexgrid <- seq(0, 1, len = 201)
+for (ii in 1:length(mytau))
+  lines(finexgrid, col = "darkgreen", lwd = mylwd,
+        log1p(qnbinom(p = mytau[ii], mu = mymu(finexgrid), si = my.k)))
 }
 fitp at extra  # Contains useful information
 
@@ -243,22 +243,22 @@ with(alldat, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen"))
 
 # Plot the data + fitted quantiles (on the original scale)
 myylim <- with(alldat, range(y2))
-with(alldat, plot(x2, y2, col = "blue", ylim = myylim, las = 1, pch = ".", cex=2.5))
+with(alldat, plot(x2, y2, col = "blue", ylim = myylim, las = 1, pch = ".", cex = 2.5))
 with(alldat, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l"))
-truecol <- rep(1:3, len=fit1 at misc$M) # Add the 'truth'
-smallxgrid <- seq(0, 1, len=501)
-for(ii in 1:length(mytau))
-    lines(smallxgrid, col=truecol[ii], lwd=2,
-          qbinom(p = mytau[ii], prob = mymu(smallxgrid), size=ssize) / ssize)
+truecol <- rep(1:3, len = fit1 at misc$M)  # Add the 'truth'
+smallxgrid <- seq(0, 1, len = 501)
+for (ii in 1:length(mytau))
+    lines(smallxgrid, col = truecol[ii], lwd = 2,
+          qbinom(p = mytau[ii], prob = mymu(smallxgrid), size = ssize) / ssize)
 
 
 # Plot on the eta (== logit()/probit()/...) scale
 with(alldat, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l"))
 # Add the 'truth'
-for(ii in 1:length(mytau)) {
-    true.quant <- qbinom(p = mytau[ii], pr = mymu(smallxgrid), si=ssize)/ssize
-    lines(smallxgrid, theta2eta(theta=true.quant, link=linkFunctionChar),
-          col=truecol[ii], lwd=2)
+for (ii in 1:length(mytau)) {
+  true.quant <- qbinom(p = mytau[ii], pr = mymu(smallxgrid), si = ssize) / ssize
+  lines(smallxgrid, theta2eta(theta = true.quant, link = linkFunctionChar),
+        col = truecol[ii], lwd = 2)
 }
 }
 }
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index 38811a8..cab41ed 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -7,7 +7,7 @@
 
 }
 \usage{
-loglinb2(exchangeable = FALSE, zero = NULL)
+loglinb2(exchangeable = FALSE, zero = 3)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -91,20 +91,21 @@ McCullagh, P. and Nelder, J. A. (1989)
 coalminers <- transform(coalminers, Age = (age - 42) / 5)
 
 # Get the n x 4 matrix of counts 
-fit.temp <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, coalminers)
-counts <- round(c(weights(fit.temp, type = "prior")) * depvar(fit.temp))
+fit0 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, data = coalminers)
+counts <- round(c(weights(fit0, type = "prior")) * depvar(fit0))
 
 # Create a n x 2 matrix response for loglinb2()
 # bwmat <- matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow = TRUE)
 bwmat <- cbind(bln = c(0,0,1,1), wheeze = c(0,1,0,1))
 matof1 <- matrix(1, nrow(counts), 1)
-newminers <- data.frame(bln = kronecker(matof1, bwmat[,1]),
-                       wheeze = kronecker(matof1, bwmat[,2]),
-                       wt = c(t(counts)),
-                       Age = with(coalminers, rep(age, rep(4, length(age)))))
+newminers <- data.frame(bln    = kronecker(matof1, bwmat[, 1]),
+                        wheeze = kronecker(matof1, bwmat[, 2]),
+                        wt     = c(t(counts)),
+                        Age    = with(coalminers, rep(age, rep(4, length(age)))))
 newminers <- newminers[with(newminers, wt) > 0,]
 
-fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2, weight = wt, data = newminers)
+fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2(zero = NULL),
+            weight = wt, data = newminers)
 coef(fit, matrix = TRUE)  # Same! (at least for the log odds-ratio) 
 summary(fit)
 
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
index ef6eb08..8c8b273 100644
--- a/man/loglinb3.Rd
+++ b/man/loglinb3.Rd
@@ -7,7 +7,7 @@
 
 }
 \usage{
-loglinb3(exchangeable = FALSE, zero = NULL)
+loglinb3(exchangeable = FALSE, zero = 4:6)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -98,7 +98,7 @@ contains further information and examples.
 
 }
 \examples{
-fit <- vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, hunua)
+fit <- vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, data = hunua)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 summary(fit)
diff --git a/man/loglog.Rd b/man/loglog.Rd
index 63a5c6b..1a9e91c 100644
--- a/man/loglog.Rd
+++ b/man/loglog.Rd
@@ -82,12 +82,12 @@ loglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 }
 \examples{
 x <- seq(0.8, 1.5, by = 0.1)
-loglog(x) # Has NAs
-loglog(x, bvalue = 1.0 + .Machine$double.eps) # Has no NAs
+loglog(x)  # Has NAs
+loglog(x, bvalue = 1.0 + .Machine$double.eps)  # Has no NAs
 
 x <- seq(1.01, 10, len = 100)
 loglog(x)
-max(abs(loglog(loglog(x), inverse = TRUE) - x)) # Should be 0
+max(abs(loglog(loglog(x), inverse = TRUE) - x))  # Should be 0
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index fa25f6e..5e9ab66 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -21,6 +21,7 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
   Both of these are on the log scale. 
   See \code{\link{Links}} for more choices.
 
+
   }
 
 
@@ -93,6 +94,7 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
 
 }
 \references{
+
 Kleiber, C. and Kotz, S. (2003)
 \emph{Statistical Size Distributions in Economics and
              Actuarial Sciences},
@@ -106,34 +108,45 @@ Hoboken, NJ, USA: Wiley-Interscience.
 %  3-parameter lognormal distribution with \eqn{\lambda}{lambda} equal
 %  to zero---see \code{\link{lognormal3}}.
 %
+%
 %}
 
+
+\section{Warning}{
+  Regularity conditions are not satisfied for the 3-parameter case:
+  results may be erroneous.
+  May withdraw it in later versions.
+
+
+}
+
+
 \seealso{
 % \code{\link{lognormal3}},
   \code{\link[stats]{rlnorm}},
-  \code{\link{normal1}},
+  \code{\link{uninormal}},
   \code{\link{CommonVGAMffArguments}}.
 
 
 }
 
 \examples{
-ldat <- data.frame(y = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
-fit <- vglm(y ~ 1, lognormal, ldat, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit)
+ldata <- data.frame(y1 = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
+fit1 <- vglm(y1 ~ 1, lognormal, ldata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1)
 
-ldat2 <- data.frame(x2 = runif(nn <- 1000))
-ldat2 <- transform(ldat2, y = rlnorm(nn, mean = 0.5, sd = exp(x2)))
-fit <- vglm(y ~ x2, lognormal(zero = 1), ldat2, trace = TRUE, crit = "c")
-coef(fit, matrix = TRUE)
-Coef(fit)
+ldata2 <- data.frame(x2 = runif(nn <- 1000))
+ldata2 <- transform(ldata2, y2 = rlnorm(nn, mean = 0.5, sd = exp(x2)))
+fit2 <- vglm(y2 ~ x2, lognormal(zero = 1), ldata2, trace = TRUE, crit = "c")
+coef(fit2, matrix = TRUE)
+Coef(fit2)
 
 lambda <- 4
-ldat3 <- data.frame(y = lambda + rlnorm(n = 1000, mean = 1.5, sd = exp(-0.8)))
-fit <- vglm(y ~ 1, lognormal3, ldat3, trace = TRUE, crit = "c")
-coef(fit, matrix = TRUE)
-summary(fit)
+ldata3 <- data.frame(y3 = lambda + rlnorm(n = 1000, mean = 1.5, sd = exp(-0.8)))
+fit3 <- vglm(y3 ~ 1, lognormal3, ldata3, trace = TRUE, crit = "c")
+coef(fit3, matrix = TRUE)
+summary(fit3)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/lomaxUC.Rd b/man/lomaxUC.Rd
index 61d1fba..07a875e 100644
--- a/man/lomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -69,7 +69,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \examples{
 probs <- seq(0.1, 0.9, by = 0.1)
-max(abs(plomax(qlomax(p = probs, shape3.q =  1), shape3.q = 1) - probs)) # Should be 0
+max(abs(plomax(qlomax(p = probs, shape3.q =  1), shape3.q = 1) - probs))  # Should be 0
 
 \dontrun{ par(mfrow = c(1, 2))
 x <- seq(-0.01, 5, len = 401)
diff --git a/man/lrtest.Rd b/man/lrtest.Rd
index ee455fa..f59ff75 100644
--- a/man/lrtest.Rd
+++ b/man/lrtest.Rd
@@ -155,7 +155,7 @@ c(all.equal(ans1, ans2), all.equal(ans1, ans3), all.equal(ans1, ans4))
 (mypval <- pchisq(testStatistic, df = length(coef(fit2)) - length(coef(fit1)),
                   lower.tail = FALSE))
 
-(ans4 <- lrtest(fit3, fit1)) # Test proportional odds (parallelism) assumption
+(ans4 <- lrtest(fit3, fit1))  # Test proportional odds (parallelism) assumption
 }
 
 \keyword{htest}
diff --git a/man/lvplot.Rd b/man/lvplot.Rd
index 5b93bc9..a054af1 100644
--- a/man/lvplot.Rd
+++ b/man/lvplot.Rd
@@ -59,7 +59,7 @@ Constrained additive ordination.
 \seealso{
   \code{\link{lvplot.qrrvglm}},
   \code{lvplot.cao},
-  \code{\link{lv}},
+  \code{\link{latvar}},
   \code{\link{trplot}}.
 
 
@@ -67,7 +67,7 @@ Constrained additive ordination.
 
 \examples{
 \dontrun{
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 set.seed(123)
 p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
           WaterCon + BareSand + FallTwig +
@@ -76,7 +76,6 @@ p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
           df1.nl = c(Zoraspin = 2.5, 3), Crow1positive = TRUE)
 index <- 1:ncol(depvar(p1))
 lvplot(p1, lcol = index, pcol = index, y = TRUE, las = 1)
-
 }
 }
 \keyword{models}
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index e397120..22cf5f2 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -10,8 +10,8 @@ y-axis are the first and second ordination axes respectively.
 
 }
 \usage{
-lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
-    add = FALSE, plot.it = TRUE, 
+lvplot.qrrvglm(object, varI.latvar = FALSE, reference = NULL,
+    add = FALSE, show.plot = TRUE, 
     rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"), 
     xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""), 
     ylab = if (Rank == 1) switch(type, predictors = "Predictors", 
@@ -31,8 +31,13 @@ lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{object}{ A CQO or UQO object. }
-  \item{varlvI}{
+  \item{object}{
+  A CQO object.
+% A CQO or UQO object.
+
+
+  }
+  \item{varI.latvar}{
   Logical that is fed into \code{\link{Coef.qrrvglm}}. 
 
   }
@@ -44,7 +49,7 @@ lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
   plot is made.
 
  }
-  \item{plot.it}{ Logical. Plot it?
+  \item{show.plot}{ Logical. Plot it?
 
  }
   \item{rug}{ Logical. If \code{TRUE}, a rug plot is plotted at the
@@ -122,7 +127,7 @@ For rank-2 models, points are the optima.
       = 90\% of the maximum will be plotted about each optimum.
       If \code{ellipse} is a negative value, then the function checks
       that the model is an equal-tolerances model and
-      \code{varlvI = FALSE}, and if so, plots circles with
+      \code{varI.latvar = FALSE}, and if so, plots circles with
       radius \code{-ellipse}. For example, setting \code{ellipse = -1}
       will result in circular contours that have unit radius (in latent
       variable units).  If \code{ellipse} is \code{NULL} or \code{FALSE}
@@ -337,15 +342,17 @@ nn <- 200
 cdata <- data.frame(x2 = rnorm(nn),   # Has mean 0 (needed when ITol=TRUE)
                     x3 = rnorm(nn),   # Has mean 0 (needed when ITol=TRUE)
                     x4 = rnorm(nn))   # Has mean 0 (needed when ITol=TRUE)
-cdata <- transform(cdata, lv1 =  x2 + x3 - 2*x4,
-                          lv2 = -x2 + x3 + 0*x4)
-# Nb. lv2 is weakly correlated with lv1
-cdata <- transform(cdata, lambda1 = exp(6 - 0.5 * (lv1-0)^2 - 0.5 * (lv2-0)^2),
-                          lambda2 = exp(5 - 0.5 * (lv1-1)^2 - 0.5 * (lv2-1)^2),
-                          lambda3 = exp(5 - 0.5 * (lv1+2)^2 - 0.5 * (lv2-0)^2))
-cdata <- transform(cdata, spp1 = rpois(nn, lambda1),
-                          spp2 = rpois(nn, lambda2),
-                          spp3 = rpois(nn, lambda3))
+cdata <- transform(cdata, latvar1 =  x2 + x3 - 2*x4,
+                          latvar2 = -x2 + x3 + 0*x4)
+# Nb. latvar2 is weakly correlated with latvar1
+cdata <- transform(cdata,
+            lambda1 = exp(6 - 0.5 * (latvar1-0)^2 - 0.5 * (latvar2-0)^2),
+            lambda2 = exp(5 - 0.5 * (latvar1-1)^2 - 0.5 * (latvar2-1)^2),
+            lambda3 = exp(5 - 0.5 * (latvar1+2)^2 - 0.5 * (latvar2-0)^2))
+cdata <- transform(cdata,
+            spp1 = rpois(nn, lambda1),
+            spp2 = rpois(nn, lambda2),
+            spp3 = rpois(nn, lambda3))
 set.seed(111)
 # vvv p2 <- cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff, 
 # vvv          data = cdata,
@@ -362,10 +369,10 @@ lvplot(p2, sites = TRUE, spch = "*", scol = "darkgreen", scex = 1.5,
        C = TRUE, Cadj = c(-.3,-.3,1), Clwd = 2, Ccex = 1.4, Ccol = "red",
        main = paste("Contours at Abundance = 140 with",
                   "convex hull of the site scores")) }
-# vvv var(lv(p2)) # A diagonal matrix, i.e., uncorrelated latent variables
-# vvv var(lv(p2, varlvI = TRUE)) # Identity matrix
-# vvv Tol(p2)[,,1:2] # Identity matrix
-# vvv Tol(p2, varlvI = TRUE)[,,1:2] # A diagonal matrix
+# vvv var(latvar(p2))  # A diagonal matrix, i.e., uncorrelated latent variables
+# vvv var(latvar(p2, varI.latvar = TRUE))  # Identity matrix
+# vvv Tol(p2)[,,1:2]  # Identity matrix
+# vvv Tol(p2, varI.latvar = TRUE)[,,1:2]  # A diagonal matrix
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
index 3fb8cb7..c8e176d 100644
--- a/man/lvplot.rrvglm.Rd
+++ b/man/lvplot.rrvglm.Rd
@@ -12,7 +12,7 @@
 }
 \usage{
 lvplot.rrvglm(object, 
-              A = TRUE, C = TRUE, scores = FALSE, plot.it = TRUE,
+              A = TRUE, C = TRUE, scores = FALSE, show.plot = TRUE,
               groups = rep(1, n), gapC = sqrt(sum(par()$cxy^2)), 
               scaleA = 1, 
               xlab = "Latent Variable 1", ylab = "Latent Variable 2", 
@@ -37,7 +37,7 @@ lvplot.rrvglm(object,
   \item{scores}{ Logical. Allow the plotting of the \eqn{n} scores? 
                  The scores are the values of the latent variables for each
                  observation. }
-  \item{plot.it}{ Logical. Plot it? If \code{FALSE}, no plot is produced
+  \item{show.plot}{ Logical. Plot it? If \code{FALSE}, no plot is produced
   and the matrix of scores (\eqn{n} latent variable values) is returned.
   If \code{TRUE}, the rank of \code{object} need not be 2.
   }
@@ -98,6 +98,8 @@ lvplot.rrvglm(object,
   \item{\dots}{ Arguments passed into the \code{plot} function
   when setting up the entire plot. Useful arguments here include
   \code{xlim} and \code{ylim}.
+
+
  }
 }
 \details{
@@ -111,18 +113,23 @@ lvplot.rrvglm(object,
   As the result is a biplot, its interpretation is based on the inner
   product.
 
+
 }
 \value{
   The matrix of scores (\eqn{n} latent variable values) is returned
   regardless of whether a plot was produced or not.
 
+
 }
 
 \references{
+
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
 \emph{Statistical Modelling},
 \bold{3}, 15--41.
+
+
 }
 \author{ Thomas W. Yee }
 \note{
@@ -130,22 +137,28 @@ Reduced-rank vector generalized linear models.
 %  to the normalization code of \code{\link{rrvglm}} to allow uncorrelated
 %  latent variables etc.
 
+
    The functions \code{\link{lvplot.rrvglm}} and
    \code{\link{biplot.rrvglm}} are equivalent.
 
+
    In the example below the predictor variables are centered, which
    is a good idea.
+
+
 }
 
 \seealso{
-    \code{\link{lvplot}},
-    \code{\link[graphics]{par}},
-    \code{\link{rrvglm}},
-    \code{\link{Coef.rrvglm}},
-    \code{\link{rrvglm.control}}.
+  \code{\link{lvplot}},
+  \code{\link[graphics]{par}},
+  \code{\link{rrvglm}},
+  \code{\link{Coef.rrvglm}},
+  \code{\link{rrvglm.control}}.
+
+
 }
 \examples{
-nn <- nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
+nn <- nrow(pneumo)  # x1, x2 and x3 are some unrelated covariates
 pneumo <- transform(pneumo, slet = scale(log(exposure.time)),
                               x1 = rnorm(nn), x2 = rnorm(nn), x3 = rnorm(nn))
 fit <- rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3,
diff --git a/man/machinists.Rd b/man/machinists.Rd
new file mode 100644
index 0000000..d28af90
--- /dev/null
+++ b/man/machinists.Rd
@@ -0,0 +1,80 @@
+\name{machinists}
+\alias{machinists}
+\docType{data}
+\title{ Machinists Accidents }
+
+\description{
+  A small count data set
+  involving 414 machinists from a three months study,
+  of accidents around the end of WWI.
+
+}
+\usage{
+data(machinists)
+}
+\format{
+  A data frame with the following variables.
+
+  \describe{
+   
+    \item{accidents}{
+      The number of accidents 
+
+    }
+    \item{ofreq}{
+      Observed frequency, i.e., the number of machinists
+      with that many accidents
+
+    }
+
+  }
+}
+\details{
+  The data was collected over a period of three months.
+  There were 414 machinists in total.
+  Also, there were data collected over six months, but it
+  is not given here.
+
+
+}
+\source{
+
+Incidence of Industrial Accidents.
+Report No. 4 (Industrial Fatigue Research Board),
+Stationery Office, London, 1919.
+
+
+}
+\references{
+
+Greenwood, M. and Yule, G. U. (1920).
+An Inquiry into the Nature of Frequency Distributions
+Representative of Multiple Happenings with Particular
+Reference to the Occurrence of Multiple Attacks of Disease
+or of Repeated Accidents.
+\emph{Journal of the Royal Statistical Society},
+\bold{83}, 255--279.
+
+
+}
+\seealso{
+  \code{\link[VGAM]{negbinomial}},
+  \code{\link[VGAM]{poissonff}}.
+
+
+}
+\examples{
+machinists
+mean(with(machinists, rep(accidents, times = ofreq)))
+ var(with(machinists, rep(accidents, times = ofreq)))
+\dontrun{ barplot(with(machinists, ofreq),
+          names.arg = as.character(with(machinists, accidents)),
+          main = "Machinists accidents",
+          col = "lightblue", las = 1,
+          ylab = "Frequency", xlab = "accidents") }
+}
+\keyword{datasets}
+
+
+%
+%
diff --git a/man/makehamUC.Rd b/man/makehamUC.Rd
index 31c3b51..a81b788 100644
--- a/man/makehamUC.Rd
+++ b/man/makehamUC.Rd
@@ -76,7 +76,7 @@ Gompertz-Makeham distribution.
 probs <- seq(0.01, 0.99, by = 0.01)
 Shape <- exp(-1); Scale <- exp(1); eps = Epsilon <- exp(-1)
 max(abs(pmakeham(qmakeham(p = probs, Shape, sca = Scale, eps = Epsilon),
-                 Shape, sca = Scale, eps = Epsilon) - probs)) # Should be 0
+                 Shape, sca = Scale, eps = Epsilon) - probs))  # Should be 0
 
 \dontrun{ x <- seq(-0.1, 2.0, by = 0.01);
 plot(x, dmakeham(x, Shape, sca = Scale, eps = Epsilon), type = "l",
diff --git a/man/margeff.Rd b/man/margeff.Rd
index af04ad6..bdadf0f 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -111,14 +111,14 @@ mynewdata <- with(pneumo, data.frame(let = let[ii]+hh))
 (newp <- predict(fit, newdata = mynewdata, type = "response"))
 
 # Compare the difference. Should be the same as hh --> 0.
-round(dig = 3, (newp-fitted(fit)[ii, ])/hh) # Finite-difference approxn
-round(dig = 3, margeff(fit, subset = ii)["let",])
+round(digits = 3, (newp-fitted(fit)[ii, ])/hh)  # Finite-difference approxn
+round(digits = 3, margeff(fit, subset = ii)["let",])
 
 # Other examples
-round(dig = 3, margeff(fit))
-round(dig = 3, margeff(fit, subset = 2)["let",])
-round(dig = 3, margeff(fit, subset = c(FALSE, TRUE))["let",,]) # recycling
-round(dig = 3, margeff(fit, subset = c(2, 4, 6, 8))["let",,])
+round(digits = 3, margeff(fit))
+round(digits = 3, margeff(fit, subset = 2)["let",])
+round(digits = 3, margeff(fit, subset = c(FALSE, TRUE))["let",,])  # recycling
+round(digits = 3, margeff(fit, subset = c(2, 4, 6, 8))["let",,])
 }
 
 
diff --git a/man/mbinomial.Rd b/man/matched.binomial.Rd
similarity index 92%
rename from man/mbinomial.Rd
rename to man/matched.binomial.Rd
index 71ae2f7..a4fadfc 100644
--- a/man/mbinomial.Rd
+++ b/man/matched.binomial.Rd
@@ -1,5 +1,5 @@
-\name{mbinomial}
-\alias{mbinomial}
+\name{matched.binomial}
+\alias{matched.binomial}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The Matched Binomial Distribution Family Function }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-mbinomial(mvar = NULL, link = "logit",
-          parallel = TRUE, smallno = .Machine$double.eps^(3/4))
+matched.binomial(mvar = NULL, link = "logit",
+                 parallel = TRUE, smallno = .Machine$double.eps^(3/4))
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -140,8 +140,8 @@ mbinomial(mvar = NULL, link = "logit",
 # Cf. Hastie and Tibshirani (1990) p.209. The variable n must be even.
 # Here, the intercept for each matched set accounts for x3 which is
 # the confounder or matching variable.
-n <- 700 # Requires a big machine with lots of memory. Expensive wrt time
-n <- 100 # This requires a reasonably big machine.
+n <- 700  # Requires a big machine with lots of memory. Expensive wrt time
+n <- 100  # This requires a reasonably big machine.
 mydat <- data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each = 2))
 xmat <- with(mydat, cbind(x2, x3))
 mydat <- transform(mydat, eta = -0.1 + 0.2 * x2 + 0.3 * x3)
@@ -152,17 +152,17 @@ y <- cbind(y1, 1 - y1)
 mydat <- transform(mydat, y = c(y1, 1-y1),
                          ID = factor(c(row(etamat))))
 fit <- vglm(y ~ 1 + ID + x2, trace = TRUE,
-            mbinomial(mvar = ~ ID - 1), data = mydat)
+            matched.binomial(mvar = ~ ID - 1), data = mydat)
 dimnames(coef(fit, matrix = TRUE))
 coef(fit, matrix = TRUE)
 summary(fit)
 head(fitted(fit))
-objsizemb <- function(object) round(object.size(object) / 2^20, dig = 2)
-objsizemb(fit) # in Mb
+objsizemb <- function(object) round(object.size(object) / 2^20, digits = 2)
+objsizemb(fit)  # in Mb
 
 VLMX <- model.matrix(fit, type = "vlm")  # The big model matrix
 dim(VLMX)
-objsizemb(VLMX) # in Mb
+objsizemb(VLMX)  # in Mb
 rm(VLMX) }
 }
 \keyword{models}
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index 9058b84..c612bc6 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -56,13 +56,16 @@ maxwell(link = "loge", zero = NULL)
   Fisher-scoring and Newton-Raphson are the same here.
   A related distribution is the Rayleigh distribution.
   This \pkg{VGAM} family function handles multiple responses.
+  This \pkg{VGAM} family function can be mimicked by
+  \code{poisson.points(ostatistic = 1.5, dimension = 2)}.
 
 
 }
 
 \seealso{ 
   \code{\link{Maxwell}},
-  \code{\link{rayleigh}}.
+  \code{\link{rayleigh}},
+  \code{\link{poisson.points}}.
 
 
 }
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index 73a3c3a..bd9e2c5 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -80,7 +80,7 @@ Q <- qmaxwell(probs, a = a)
 lines(Q, dmaxwell(Q, a), col = "purple", lty = 3, type = "h")
 lines(Q, pmaxwell(Q, a), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pmaxwell(Q, a) - probs)) # Should be zero
+max(abs(pmaxwell(Q, a) - probs))  # Should be zero
 }
 }
 \keyword{distribution}
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index 7da6c99..3c543db 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -110,7 +110,7 @@ all else fails.
 %}
 
 \examples{
-mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
+mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2))  # Limit as theta = 0, nu = Inf
 fit <- vglm(y ~ 1, mccullagh89, mdata, trace = TRUE)
 head(fitted(fit))
 with(mdata, mean(y))
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index b90ccca..d56de90 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -118,7 +118,7 @@ coef(fit, matrix = TRUE)
 
 # Compare the results with the truth
 round(rbind('Estimated' = Coef(fit),
-            'Truth' = c(phi, lambda1, lambda2)), dig = 2)
+            'Truth' = c(phi, lambda1, lambda2)), digits = 2)
 
 with(mdata, hist(Y, prob = TRUE, main = "Orange = estimate, blue = truth"))
 abline(v = 1 / Coef(fit)[c(2, 3)],  lty = 2, col = "orange", lwd = 2)
diff --git a/man/mix2normal1.Rd b/man/mix2normal.Rd
similarity index 90%
rename from man/mix2normal1.Rd
rename to man/mix2normal.Rd
index 2d848f2..7bf8c7b 100644
--- a/man/mix2normal1.Rd
+++ b/man/mix2normal.Rd
@@ -1,5 +1,5 @@
-\name{mix2normal1}
-\alias{mix2normal1}
+\name{mix2normal}
+\alias{mix2normal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Mixture of Two Univariate Normal Distributions }
 \description{
@@ -8,9 +8,9 @@
 
 }
 \usage{
-mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
+mix2normal(lphi = "logit", lmu = "identity", lsd = "loge",
             iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
-            qmu = c(0.2, 0.8), equalsd = TRUE, nsimEIM = 100, zero = 1)
+            qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -26,7 +26,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
 % \item{ephi, emu1, emu2, esd1, esd2}{
 % List. Extra argument for each of the links.
 % See \code{earg} in \code{\link{Links}} for general information.
-% If \code{equalsd = TRUE} then \code{esd1} must equal \code{esd2}.
+% If \code{eq.sd = TRUE} then \code{esd1} must equal \code{esd2}.
 % }
 
   \item{iphi}{
@@ -60,7 +60,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
 
 
   }
-  \item{equalsd}{
+  \item{eq.sd}{
     Logical indicating whether the two standard deviations should be 
     constrained to be equal. If \code{TRUE} then the appropriate
     constraint matrices will be used.
@@ -100,7 +100,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
   By default, the five linear/additive predictors are
   \eqn{(logit(\phi), \mu_1, \log(\sigma_1), \mu_2, \log(\sigma_2))^T}{
        (logit(phi),   mu1,   log(sd1), mu2, log(sd2))^T}.
-  If \code{equalsd = TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2}
+  If \code{eq.sd = TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2}
   is enforced.
 
 
@@ -166,14 +166,14 @@ London: Chapman & Hall.
   distributions are not well separated.
   The default control argument \code{trace = TRUE} is to encourage
   monitoring convergence.
-  Having \code{equalsd = TRUE} often makes the overall optimization problem
+  Having \code{eq.sd = TRUE} often makes the overall optimization problem
   easier.
 
 
 }
 
 \seealso{
-  \code{\link{normal1}},
+  \code{\link{uninormal}},
   \code{\link[stats:Normal]{Normal}},
   \code{\link{mix2poisson}}.
 
@@ -186,17 +186,17 @@ sd1 <- sd2 <- exp(3)
 (phi <- logit(-1, inverse = TRUE))
 mdata <- data.frame(y = ifelse(runif(nn) < phi, rnorm(nn, mu1, sd1),
                                                 rnorm(nn, mu2, sd2)))
-fit <- vglm(y ~ 1, mix2normal1(equalsd = TRUE), mdata)
+fit <- vglm(y ~ 1, mix2normal(eq.sd = TRUE), mdata)
 
 # Compare the results
 cfit <- coef(fit)
 round(rbind('Estimated' = c(logit(cfit[1], inverse = TRUE),
-      cfit[2], exp(cfit[3]), cfit[4]),
-      'Truth' = c(phi, mu1, sd1, mu2)), dig = 2)
+            cfit[2], exp(cfit[3]), cfit[4]),
+            'Truth' = c(phi, mu1, sd1, mu2)), digits = 2)
 
 # Plot the results
 xx <- with(mdata, seq(min(y), max(y), len = 200))
-plot(xx, (1-phi)*dnorm(xx, mu2, sd2), type = "l", xlab = "y",
+plot(xx, (1-phi) * dnorm(xx, mu2, sd2), type = "l", xlab = "y",
      main = "Orange = estimate, blue = truth", col = "blue", ylab = "Density")
 phi.est <- logit(coef(fit)[1], inverse = TRUE)
 sd.est <- exp(coef(fit)[3])
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index 2671cf5..e60f539 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -117,7 +117,7 @@ mix2poisson(lphi = "logit", llambda = "loge",
 \seealso{
   \code{\link[stats:Poisson]{rpois}},
   \code{\link{poissonff}},
-  \code{\link{mix2normal1}}.
+  \code{\link{mix2normal}}.
 
 
 }
@@ -125,7 +125,7 @@ mix2poisson(lphi = "logit", llambda = "loge",
 \examples{
 \dontrun{ # Example 1: simulated data
 nn <- 1000
-mu1 <- exp(2.5) # also known as lambda1
+mu1 <- exp(2.5)  # also known as lambda1
 mu2 <- exp(3)
 (phi <- logit(-0.5, inverse = TRUE))
 mdata <- data.frame(y = rpois(nn, ifelse(runif(nn) < phi, mu1, mu2)))
@@ -133,7 +133,7 @@ fit <- vglm(y ~ 1, mix2poisson, mdata)
 coef(fit, matrix = TRUE)
 
 # Compare the results with the truth
-round(rbind('Estimated' = Coef(fit), 'Truth' = c(phi, mu1, mu2)), dig = 2)
+round(rbind('Estimated' = Coef(fit), 'Truth' = c(phi, mu1, mu2)), digits = 2)
 
 ty <- with(mdata, table(y))
 plot(names(ty), ty, type = "h", main = "Orange=estimate, blue=truth",
diff --git a/man/mlogit.Rd b/man/mlogit.Rd
index b5a1b32..9c35158 100644
--- a/man/mlogit.Rd
+++ b/man/mlogit.Rd
@@ -1,7 +1,7 @@
 \name{mlogit}
 \alias{mlogit}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Multinomial Logit Link Function }
+\title{ Multi-logit Link Function }
 \description{
   Computes the mlogit transformation, including its inverse and the
   first two derivatives.
@@ -41,14 +41,19 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
   The \code{mlogit()} link function is a generalization of the
   \code{\link{logit}} link to \eqn{M} levels/classes.
   It forms the basis of the \code{\link{multinomial}} logit model.
+  It is sometimes called the \emph{multi-logit} link
+  or the \emph{multinomial logit} link.
+  When its inverse function is computed it returns values which
+  are positive and add to unity.
 
 
 }
 \value{
-  For \code{mlogit} with \code{deriv = 0}, the mlogit of \code{theta}, i.e.,
-  \code{log(theta[,j]/theta[,M+1])} when \code{inverse = FALSE},
+  For \code{mlogit} with \code{deriv = 0}, the mlogit of \code{theta},
+  i.e.,
+  \code{log(theta[, j]/theta[, M+1])} when \code{inverse = FALSE},
   and if \code{inverse = TRUE} then
-  \code{exp(theta[,j])/(1+rowSums(exp(theta)))}.
+  \code{exp(theta[, j])/(1+rowSums(exp(theta)))}.
 
 
   For \code{deriv = 1}, then the function returns
@@ -82,23 +87,25 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
 \seealso{ 
     \code{\link{Links}},
     \code{\link{multinomial}},
-    \code{\link{logit}}.
+    \code{\link{logit}},
+    \code{\link{normal.vcm}},
+    \code{\link{CommonVGAMffArguments}}.
 
 
  }
 \examples{
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vglm(cbind(normal, mild, severe) ~ let,
-            multinomial, trace = TRUE, pneumo) # For illustration only!
+            multinomial, trace = TRUE, pneumo)  # For illustration only!
 fitted(fit)
 predict(fit)
 
 mlogit(fitted(fit))
-mlogit(fitted(fit)) - predict(fit) # Should be all 0s
+mlogit(fitted(fit)) - predict(fit)  # Should be all 0s
 
-mlogit(predict(fit), inverse = TRUE)
-mlogit(predict(fit), inverse = TRUE, refLevel = 1) # For illustration only
-mlogit(predict(fit), inverse = TRUE) - fitted(fit) # Should be all 0s
+mlogit(predict(fit), inverse = TRUE)  # rowSums() add to unity
+mlogit(predict(fit), inverse = TRUE, refLevel = 1)  # For illustration only
+mlogit(predict(fit), inverse = TRUE) - fitted(fit)  # Should be all 0s
 
 mlogit(fitted(fit), deriv = 1)
 mlogit(fitted(fit), deriv = 2)
diff --git a/man/mmt.Rd b/man/mmt.Rd
new file mode 100644
index 0000000..b2662bd
--- /dev/null
+++ b/man/mmt.Rd
@@ -0,0 +1,59 @@
+\name{mmt}
+\alias{mmt}
+\docType{data}
+\title{ mmt daily maximum temperatures}
+\description{
+  Melbourne daily maximum temperatures in degrees Celsius
+  over the ten-year period 1981--1990.
+
+
+}
+\usage{
+data(mmt)
+}
+\format{
+  A vector with 3650 observations.
+
+}
+\details{
+  This is a time series data from Melbourne, Australia.
+  It is commonly used to give a difficult quantile regression problem since
+  the data is bimodal.
+  That is, a hot day is likely to be followed by either an
+  equally hot day or one much cooler.
+  However, an independence assumption is typically made.
+
+
+}
+%\source{
+%\url{http://www.london2012.com/medals/medal-count/}.
+%
+%
+%}
+\references{
+  Hyndman, R. J. and Bashtannyk, D. M. and Grunwald, G. K. (1996).
+  Estimating and visualizing conditional densities.
+  \emph{J. Comput. Graph. Statist.},
+  \bold{5}(4),  315--336.
+
+
+}
+\seealso{
+  \code{\link[VGAM]{lms.bcn}}.
+
+
+}
+
+\examples{
+summary(mmt)
+\dontrun{ par(mfrow = c(1, 1), mar = c(5, 4, 0.2, 0.1) + 0.1, las = 1)
+melb <- data.frame(today     = mmt[-1],
+                   yesterday = mmt[-length(mmt)])
+plot(today ~ yesterday, data = melb,
+     xlab = "Yesterday's Max Temperature",
+     ylab = "Today's Max Temperature", cex = 1.4, type = "n")
+points(today ~ yesterday, data = melb, pch = 0, cex = 0.50, col = "blue")
+abline(a = 0, b = 1, lty = 3)
+}
+}
+\keyword{datasets}
diff --git a/man/model.framevlm.Rd b/man/model.framevlm.Rd
index dd6d074..2345d89 100644
--- a/man/model.framevlm.Rd
+++ b/man/model.framevlm.Rd
@@ -79,12 +79,12 @@ check1 <- head(model.frame(fit))
 check1
 check2 <- model.frame(fit, data = head(pneumo))
 check2
-all.equal(unlist(check1), unlist(check2)) # Should be TRUE
+all.equal(unlist(check1), unlist(check2))  # Should be TRUE
 
 q0 <- head(predict(fit))
 q1 <- head(predict(fit, newdata = pneumo))
 q2 <- predict(fit, newdata = head(pneumo))
-all.equal(q0, q1) # Should be TRUE
-all.equal(q1, q2) # Should be TRUE
+all.equal(q0, q1)  # Should be TRUE
+all.equal(q1, q2)  # Should be TRUE
 }
 \keyword{models}
diff --git a/man/model.matrixvlm.Rd b/man/model.matrixvlm.Rd
index a219c9e..053a278 100644
--- a/man/model.matrixvlm.Rd
+++ b/man/model.matrixvlm.Rd
@@ -3,7 +3,7 @@
 \title{Construct the Design Matrix of a VLM Object}
 \usage{
 model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
-                lapred.index = NULL, \dots)
+                linpred.index = NULL, \dots)
 }
 \arguments{
   \item{object}{an object of a class that inherits from the
@@ -21,13 +21,14 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
 
 
   }
-  \item{lapred.index}{
-    Single integer. The index for a linear/additive predictor,
-    it must have a value from the set \code{1:M}, and
-    \code{type = "lm"}  must be assigned.
+  \item{linpred.index}{
+    Single integer.
+    The index for a linear/additive predictor,
+    it must have a value from the set \code{1:M},
+    and \code{type = "lm"}  must be assigned.
     Then it returns a subset of the VLM matrix corresponding to
-    the \code{lapred.index}th linear/additive predictor; this
-    is a LM-type matrix.
+    the \code{linpred.index}th linear/additive predictor;
+    this is a LM-type matrix.
 
 
   }    
@@ -100,8 +101,8 @@ fit <- vglm(cbind(normal, mild, severe) ~ poly(c(scale(let)), 2),
 class(fit)
 fit at x # Not saved on the object
 model.matrix(fit)
-model.matrix(fit, lapred.index = 1, type = "lm")
-model.matrix(fit, lapred.index = 2, type = "lm")
+model.matrix(fit, linpred.index = 1, type = "lm")
+model.matrix(fit, linpred.index = 2, type = "lm")
 
 (Check1 <- head(model.matrix(fit, type = "lm")))
 (Check2 <- model.matrix(fit, data = head(pneumo), type = "lm"))
@@ -110,7 +111,7 @@ all.equal(c(Check1), c(Check2))
 q0 <- head(predict(fit))
 q1 <- head(predict(fit, newdata = pneumo))
 q2 <- predict(fit, newdata = head(pneumo))
-all.equal(q0, q1) # Should be TRUE
-all.equal(q1, q2) # Should be TRUE
+all.equal(q0, q1)  # Should be TRUE
+all.equal(q1, q2)  # Should be TRUE
 }
 \keyword{models}
diff --git a/man/moffset.Rd b/man/moffset.Rd
index 5ae1590..86abcfb 100644
--- a/man/moffset.Rd
+++ b/man/moffset.Rd
@@ -10,8 +10,8 @@ Matrix Offset
 
 }
 \usage{
-moffset(mat, roffset = 0, coffset = 0, postfix = "")
-
+moffset(mat, roffset = 0, coffset = 0, postfix = "",
+        rprefix = "Row.", cprefix = "Col.")
 }
 \arguments{
 \item{mat}{
@@ -21,6 +21,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   i.e., going down successive columns, as the columns go
   from left to right. Wrapping of values is done.
 
+
 }
 \item{roffset, coffset}{
   Numeric or character.
@@ -33,6 +34,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   dataset start from 6:00 am, and this wraps around to 
   include midnight to 05.59 am on the next day.
 
+
 }
 \item{postfix}{
   Character.
@@ -40,6 +42,12 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   to the end of each name.
   The default is no change.
 
+
+}
+\item{rprefix, cprefix}{
+  Same as \code{\link{rcim}}.
+
+
 }
 }
 
@@ -55,9 +63,9 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   \code{\link{alcoff}},
   where it is useful to define the \emph{effective day} as starting
   at some other hour than midnight, e.g., 6.00am.
-  This is because partying on Friday night continues on into Saturday morning,
-  therefore it is more interpretable to use the effective day when
-  considering a daily effect.
+  This is because partying on Friday night continues on into
+  Saturday morning, therefore it is more interpretable to use
+  the effective day when considering a daily effect.
 
 
   This is a data preprocessing function for \code{\link{rcim}}
@@ -71,11 +79,13 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   values in one column can be moved to a previous column.
   See the examples below.
 
+
 }
 
 \value{
   A matrix of the same dimensional as its input.
 
+
 }
 
 
@@ -83,6 +93,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   T. W. Yee,
   Alfian F. Hadi.
 
+
 }
 \note{
 % This function was originally for a 24 x 7 dimensional matrix
@@ -93,6 +104,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
 
   The input \code{mat} should have row names and column names.
 
+
 }
 \seealso{
   \code{\link{Rcim}}, 
@@ -101,25 +113,26 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
   \code{\link{alcoff}},
   \code{\link{crashi}}.
 
+
 }
 \examples{
 moffset(alcoff, 3, 2, "*")  # Some day's data is moved to previous day.
 Rcim(alcoff, 3 + 1, 2 + 1)  # Data does not move as much.
 alcoff  # Original data
-moffset(alcoff, 3, 2, "*") - Rcim(alcoff, 3+1, 2+1) # Note the differences
+moffset(alcoff, 3, 2, "*") - Rcim(alcoff, 3+1, 2+1)  # Note the differences
 
 # An 'effective day' data set:
 alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*")
 fit.o <- rcim(alcoff)    # default baselines are first row and col
 fit.e <- rcim(alcoff.e)  # default baselines are first row and col 
 
-\dontrun{ par(mfrow = c(2, 2), mar = c(9,4,2,1))
+\dontrun{ par(mfrow = c(2, 2), mar = c(9, 4, 2, 1))
 plot(fit.o, rsub = "Not very interpretable", csub = "Not very interpretable")
 plot(fit.e, rsub = "More interpretable", csub = "More interpretable")
 }
 
 # Some checking
-all.equal(moffset(alcoff), alcoff)      # Should be no change
+all.equal(moffset(alcoff), alcoff)  # Should be no change
 moffset(alcoff, 1, 1, "*")
 moffset(alcoff, 2, 3, "*")
 moffset(alcoff, 1, 0, "*")
diff --git a/man/morgenstern.Rd b/man/morgenstern.Rd
index b1d6390..0b620f3 100644
--- a/man/morgenstern.Rd
+++ b/man/morgenstern.Rd
@@ -27,6 +27,7 @@ morgenstern(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
   If a convergence failure occurs try assigning a different value.
   Assigning a value will override the argument \code{imethod}.
 
+
   }
   \item{tola0}{
   Positive numeric.
@@ -35,12 +36,14 @@ morgenstern(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
   This is an attempt to fix a numerical problem when the estimate
   is too close to zero.
 
+
   }
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
   specifies the initialization method. If failure to converge occurs
   try the other value, or else specify a value for \code{ia}.
 
+
   }
 }
 \details{
@@ -101,7 +104,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 \seealso{
   \code{\link{fgm}},
-  \code{\link{gumbelIbiv}}.
+  \code{\link{bigumbelI}}.
 
 
 }
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 1112362..6a30845 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -240,27 +240,24 @@ data(iris)
 \dontrun{ fit <- vglm(Species ~ ., multinomial, iris)
 coef(fit, matrix = TRUE) }
 
-
 # Example 2a: a simple example 
-ycounts <- t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8))) # Counts
+ycounts <- t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8)))  # Counts
 fit <- vglm(ycounts ~ 1, multinomial)
 head(fitted(fit))   # Proportions
 fit at prior.weights   # NOT recommended for extraction of prior weights
-weights(fit, type = "prior", matrix = FALSE) # The better method
+weights(fit, type = "prior", matrix = FALSE)  # The better method
 depvar(fit)         # Sample proportions; same as fit at y
 constraints(fit)    # Constraint matrices
 
 # Example 2b: Different reference level used as the baseline 
 fit2 <- vglm(ycounts ~ 1, multinomial(refLevel = 2))
 coef(fit2, matrix = TRUE)
-coef(fit , matrix = TRUE) # Easy to reconcile this output with fit2
-
-
+coef(fit , matrix = TRUE)  # Easy to reconcile this output with fit2
 
 # Example 3: The response is a factor.
 nn <- 10
 dframe3 <- data.frame(yfactor = gl(3, nn, labels = c("Control", "Trt1", "Trt2")),
-                     x2 = runif(3 * nn))
+                      x2 = runif(3 * nn))
 myrefLevel <- with(dframe3, yfactor[12])
 fit3a <- vglm(yfactor ~ x2, multinomial(refLevel = myrefLevel), dframe3)
 fit3b <- vglm(yfactor ~ x2, multinomial(refLevel = 2), dframe3)
@@ -268,7 +265,6 @@ coef(fit3a, matrix = TRUE)  # "Treatment1" is the reference level
 coef(fit3b, matrix = TRUE)  # "Treatment1" is the reference level
 margeff(fit3b)
 
-
 # Example 4: Fit a rank-1 stereotype model 
 data(car.all)
 fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all)
@@ -276,9 +272,13 @@ coef(fit4)   # Contains the C matrix
 constraints(fit4)$HP       # The A matrix 
 coef(fit4, matrix = TRUE)  # The B matrix
 Coef(fit4)@C               # The C matrix 
-ccoef(fit4)                # Better to get the C matrix this way
+concoef(fit4)              # Better to get the C matrix this way
 Coef(fit4)@A               # The A matrix 
-svd(coef(fit4, matrix = TRUE)[-1, ])$d    # This has rank 1; = C %*% t(A) 
+svd(coef(fit4, matrix = TRUE)[-1, ])$d  # This has rank 1; = C %*% t(A) 
+# Classification (but watch out for NAs in some of the variables):
+apply(fitted(fit4), 1, which.max)  # Classification
+apply(predict(fit4, car.all, type = "response"), 1, which.max)  # Classification
+colnames(fitted(fit4))[apply(fitted(fit4), 1, which.max)]  # Classification
 
 
 # Example 5: The use of the xij argument (aka conditional logit model)
@@ -292,16 +292,16 @@ gotowork <- data.frame(cost.bus  = runif(nn), time.bus  = runif(nn),
                        cost.train= runif(nn), time.train= runif(nn),
                        cost.car  = runif(nn), time.car  = runif(nn),
                        cost.walk = runif(nn), time.walk = runif(nn))
-gotowork <- round(gotowork, dig = 2) # For convenience
+gotowork <- round(gotowork, digits = 2)  # For convenience
 gotowork <- transform(gotowork,
                       Cost.bus   = cost.bus   - cost.walk,
                       Cost.car   = cost.car   - cost.walk,
                       Cost.train = cost.train - cost.walk,
-                      Cost       = cost.train - cost.walk, # for labelling
+                      Cost       = cost.train - cost.walk,  # for labelling
                       Time.bus   = time.bus   - time.walk,
                       Time.car   = time.car   - time.walk,
                       Time.train = time.train - time.walk,
-                      Time       = time.train - time.walk) # for labelling
+                      Time       = time.train - time.walk)  # for labelling
 fit <- vglm(ycounts ~ Cost + Time,
             multinomial(parall = TRUE ~ Cost + Time - 1),
             xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
@@ -315,7 +315,7 @@ coef(fit)
 coef(fit, matrix = TRUE)
 constraints(fit)
 summary(fit)
-max(abs(predict(fit) - predict(fit, new = gotowork))) # Should be 0
+max(abs(predict(fit) - predict(fit, new = gotowork)))  # Should be 0
 }
 \keyword{models}
 \keyword{regression}
@@ -324,7 +324,7 @@ max(abs(predict(fit) - predict(fit, new = gotowork))) # Should be 0
 
 % 20100915; this no longer works:
 % # Example 2c: Different input to Example 2a but same result
-% w <- apply(ycounts, 1, sum) # Prior weights
+% w <- apply(ycounts, 1, sum)  # Prior weights
 % yprop <- ycounts / w    # Sample proportions
 % fitprop <- vglm(yprop ~ 1, multinomial, weights=w)
 % head(fitted(fitprop))   # Proportions
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index 266328e..a466c2c 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -19,6 +19,7 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
   See \code{\link{Links}} for more choices
   and information.
 
+
   }
   \item{ishape, iscale}{
   Optional initial values for the shape and scale parameters.
@@ -28,6 +29,7 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
   \code{initialize} slot, however, setting another numerical
   value is recommended if convergence fails or is too slow.
 
+
   }
 }
 \details{
@@ -94,7 +96,7 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
 \examples{
 nn <- 1000; shape <- exp(0); Scale <- exp(1)
 ndata <- data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape)))
-fit <- vglm(y1 ~ 1, nakagami, ndata, trace = TRUE, crit = "c")
+fit <- vglm(y1 ~ 1, nakagami, ndata, trace = TRUE, crit = "coef")
 ndata <- transform(ndata, y2 = rnaka(nn, shape = shape, scale = Scale))
 fit <- vglm(y2 ~ 1, nakagami(iscale = 3), ndata, trace = TRUE)
 head(fitted(fit))
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index 72d5fba..8efc6e9 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -28,21 +28,25 @@ rnaka(n, shape, scale = 1, Smallno = 1.0e-6)
   For \code{rnaka}, arguments \code{shape} and \code{scale} must be of
   length 1.
 
+
   }
   \item{Smallno}{
   Numeric, a small value used by the rejection method for determining
   the upper limit of the distribution.
   That is, \code{pnaka(U) > 1-Smallno} where \code{U} is the upper limit.
 
+
   }
   \item{\ldots}{
   Arguments that can be passed into \code{\link[stats]{uniroot}}.
 
+
   }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 }
 \value{
@@ -51,6 +55,7 @@ rnaka(n, shape, scale = 1, Smallno = 1.0e-6)
   \code{qnaka} gives the quantile function, and
   \code{rnaka} generates random deviates.
 
+
 }
 \author{ T. W. Yee }
 \details{
@@ -85,7 +90,7 @@ legend(2, 0.6, col = c("orange","blue","green"), lty = rep(1, len = 3),
        legend = paste("shape =", c(1, 2, 3))) }
 
 probs <- seq(0.1, 0.9, by = 0.1)
-pnaka(qnaka(p = probs, shape = 2), shape = 2) - probs # Should be all 0
+pnaka(qnaka(p = probs, shape = 2), shape = 2) - probs  # Should be all 0
 }
 \keyword{distribution}
 
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 8d2f45f..c79654d 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -74,7 +74,7 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
 }
 \references{
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -99,6 +99,9 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
   see the example below.
 
 
+  Standard errors may be unreliable.
+
+
 }
 \note{
 
@@ -128,11 +131,11 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
 \examples{
 nbcanlink("mu", short = FALSE)
 
-mymu <- 1:10 # Test some basic operations:
+mymu <- 1:10  # Test some basic operations:
 kmatrix <- matrix(runif(length(mymu)), length(mymu), 1)
 eta1 <- nbcanlink(mymu, size = kmatrix)
 ans2 <- nbcanlink(eta1, size = kmatrix, inverse = TRUE)
-max(abs(ans2 - mymu)) # Should be 0
+max(abs(ans2 - mymu))  # Should be 0
 
 \dontrun{ mymu <- c(seq(0.5, 10, length = 101))
 kmatrix <- matrix(10, length(mymu), 1)
@@ -143,7 +146,7 @@ plot(nbcanlink(mymu, size = kmatrix) ~ mymu, las = 1,
 set.seed(123)
 ndata <- data.frame(x2 = runif(nn <- 1000 ))
 size1 <- exp(1); size2 <- exp(2)
-ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 < 0
+ndata <- transform(ndata, eta1 = -1 - 2 * x2,  # eta1 < 0
                           size1 = size1,
                           size2 = size2)
 ndata <- transform(ndata,
@@ -155,8 +158,8 @@ head(ndata)
 summary(ndata)
 
 fit <- vglm(cbind(y1, y2) ~ x2, negbinomial("nbcanlink", imethod = 3),
-            stepsize = 0.5, ndata, # Deliberately slow the convergence rate
-            maxit = 100, trace = TRUE) # Warning: may converge to a local soln
+            stepsize = 0.5, ndata,  # Deliberately slow the convergence rate
+            maxit = 100, trace = TRUE)  # Warning: may converge to a local soln
 coef(fit, matrix = TRUE)
 summary(fit)
 }
@@ -167,4 +170,4 @@ summary(fit)
 % abline(h = 0, col = "lightgray", lty = "dashed", lwd = 2.0)
 % The variance-covariance matrix may be wrong when the
 % canonical link is used.
-% vcov(fit) # May be wrong
+% vcov(fit)  # May be wrong
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 5be7abe..9da72a7 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -25,7 +25,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
   The cutpoints should be non-negative integers.
   If \code{nbolf()} is used as the link function in
   \code{\link{cumulative}} then one should choose
-  \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
+  \code{reverse = TRUE, parallel = TRUE}.
 
 
   }
@@ -105,7 +105,7 @@ nbolf("p", cutpoint = 2, k = 1, tag = TRUE)
 p <- seq(0.02, 0.98, by = 0.01)
 y <- nbolf(p,cutpoint = 2, k = 1)
 y. <- nbolf(p,cutpoint = 2, k = 1, deriv = 1)
-max(abs(nbolf(y,cutpoint = 2, k = 1, inv = TRUE) - p)) # Should be 0
+max(abs(nbolf(y,cutpoint = 2, k = 1, inv = TRUE) - p))  # Should be 0
 
 \dontrun{ par(mfrow = c(2, 1), las = 1)
 plot(p, y, type = "l", col = "blue", main = "nbolf()")
@@ -127,7 +127,7 @@ cuty <- Cut(y1, breaks = cutpoints)
 table(cuty) / sum(table(cuty))
 fit <- vglm(cuty ~ x2 + x3, trace = TRUE,
             cumulative(reverse = TRUE, mv = TRUE,
-                       parallel = TRUE, apply.parint = TRUE,
+                       parallel = TRUE,
                        link = nbolf(cutpoint = cutpoints[2:3], k = k)))
 head(depvar(fit))
 head(fitted(fit))
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index d239712..e568963 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -29,7 +29,7 @@ polya(lprob = "logit", lsize = "loge",
   \code{size} and \code{prob} arguments of 
   \code{\link[stats:NegBinomial]{rnbinom}} respectively.
   Common alternatives for \code{lsize} are
-  \code{\link{nloge}} and
+  \code{\link{negloge}} and
   \code{\link{reciprocal}}.
 
 
@@ -170,7 +170,7 @@ polya(lprob = "logit", lsize = "loge",
   It is common for some to use \eqn{\alpha=1/k}{alpha=1/k} as the
   ancillary or heterogeneity parameter;
   so common alternatives for \code{lsize} are
-  \code{\link{nloge}} and
+  \code{\link{negloge}} and
   \code{\link{reciprocal}}.
 
 
@@ -223,7 +223,7 @@ polya(lprob = "logit", lsize = "loge",
   estimate of the index parameter is fraught (see Lawless,
   1987). In general, the \code{\link{quasipoissonff}} is
   more robust.  Other alternatives to \code{negbinomial} are
-  to fit a NB-1 or RR-NB (aka NB-P) model; see Yee (2012).
+  to fit a NB-1 or RR-NB (aka NB-P) model; see Yee (2014).
   Also available are the NB-C, NB-H and NB-G.
   Assigning values to the \code{isize} argument may lead
   to a local solution, and smaller values are preferred
@@ -262,7 +262,7 @@ Fitting the negative binomial distribution to biological data.
 \bold{9}, 174--200.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -410,13 +410,13 @@ coef(fit1, matrix = TRUE)
 
 # Example 3: large counts so definitely use the nsimEIM argument
 ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(12+x2), size = exp(1)))
-with(ndata, range(y3)) # Large counts
+with(ndata, range(y3))  # Large counts
 fit2 <- vglm(y3 ~ x2, negbinomial(nsimEIM = 100), ndata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 
 # Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
-nn <- 1000 # Number of observations
-phi0 <- 10 # Specify this; should be greater than unity
+nn <- 1000  # Number of observations
+phi0 <- 10  # Specify this; should be greater than unity
 delta0 <- 1 / (phi0 - 1)
 mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
 mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
@@ -431,22 +431,22 @@ cnb1 <- coef(nb1, matrix = TRUE)
 mydiff <- (cnb1["(Intercept)", "log(size)"] -
            cnb1["(Intercept)", "log(mu)"])
 delta0.hat <- exp(mydiff)
-(phi.hat <- 1 + 1 / delta0.hat) # MLE of phi
+(phi.hat <- 1 + 1 / delta0.hat)  # MLE of phi
 summary(nb1)
 # Obtain a 95 percent confidence interval for phi0:
 myvec <- rbind(-1, 1, 0, 0)
 (se.mydiff <- sqrt(t(myvec) \%*\%  vcov(nb1) \%*\%  myvec))
 ci.mydiff <- mydiff + c(-1.96, 1.96) * se.mydiff
 ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff)
-(ci.phi0 <- 1 + 1 / rev(ci.delta0)) # The 95 percent conf. interval for phi0
+(ci.phi0 <- 1 + 1 / rev(ci.delta0))  # The 95 percent conf. interval for phi0
 
-Confint.nb1(nb1) # Quick way to get it
+Confint.nb1(nb1)  # Quick way to get it
 
-summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper # cf. moment estimator
+summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper  # cf. moment estimator
 }
 \keyword{models}
 \keyword{regression}
 
 
-%y1 = MASS:::rnegbin(n, mu=exp(3+x), theta=exp(1)) # k is theta
+%y1 = MASS:::rnegbin(n, mu=exp(3+x), theta=exp(1))  # k is theta
 %y2 = MASS:::rnegbin(n, mu=exp(2-x), theta=exp(0))
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index 30d7b87..fa54a13 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -76,7 +76,7 @@ Hilbe, J. M. (2011)
 Cambridge: Cambridge University Press.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -110,27 +110,27 @@ Cambridge: Cambridge University Press.
 # Simulated data with various multiple responses
 size1 <- exp(1); size2 <- exp(2); size3 <- exp(0); size4 <- Inf
 ndata <- data.frame(x2 = runif(nn <- 1000))
-ndata <- transform(ndata, eta1  = -1 - 2 * x2, # eta1 must be negative
+ndata <- transform(ndata, eta1  = -1 - 2 * x2,  # eta1 must be negative
                           size1 = size1)
 ndata <- transform(ndata,
                    mu1  = nbcanlink(eta1, size = size1, inv = TRUE))
 ndata <- transform(ndata,
-                   y1 = rnbinom(nn, mu = mu1,         size = size1), # NB-C
+                   y1 = rnbinom(nn, mu = mu1,         size = size1),  # NB-C
                    y2 = rnbinom(nn, mu = exp(2 - x2), size = size2),
-                   y3 = rnbinom(nn, mu = exp(3 + x2), size = size3), # NB-G
+                   y3 = rnbinom(nn, mu = exp(3 + x2), size = size3),  # NB-G
                    y4 = rpois  (nn, la = exp(1 + x2)))
 
 # Also known as NB-C with size known (Hilbe, 2011)
 fit1 <- vglm(y1 ~ x2, negbinomial.size(size = size1, lmu = "nbcanlink"),
              ndata, trace = TRUE, crit = "coef")
 coef(fit1, matrix = TRUE)
-head(fit1 at misc$size) # size saved here
+head(fit1 at misc$size)  # size saved here
 
 fit2 <- vglm(cbind(y2, y3, y4) ~ x2,
              negbinomial.size(size = c(size2, size3, size4)),
              ndata, trace = TRUE)
 coef(fit2, matrix = TRUE)
-head(fit2 at misc$size) # size saved here
+head(fit2 at misc$size)  # size saved here
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
new file mode 100644
index 0000000..7ba3db6
--- /dev/null
+++ b/man/normal.vcm.Rd
@@ -0,0 +1,271 @@
+\name{normal.vcm}
+\alias{normal.vcm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Univariate Normal Distribution as a Varying-Coefficient Model }
+\description{
+  Maximum likelihood estimation of all the coefficients of a LM
+  where each of the usual regression coefficients is modelled
+  with other explanatory variables via parameter link functions.
+  Thus this is a basic varying-coefficient model.
+
+
+}
+\usage{
+normal.vcm(link.list = list("(Default)" = "identity"),
+           earg.list = list("(Default)" = list()),
+           lsd = "loge", lvar = "loge",
+           esd = list(), evar = list(),
+           var.arg = FALSE, imethod = 1,
+           icoefficients = NULL, isd = NULL, zero = "M")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{link.list, earg.list}{
+  Link functions and extra arguments
+  applied to the coefficients of the LM, excluding
+  the standard deviation/variance.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+  The default is for an identity link to be applied to
+  each of the regression coefficients.
+
+
+  }
+
+
+  \item{lsd, esd, lvar, evar}{
+  Link function and extra argument
+  applied to
+  the standard deviation/variance.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+  Same as \code{\link{uninormal}}.
+
+
+  }
+
+
+  \item{icoefficients}{
+  Optional initial values for the coefficients.
+  Recycled to length \eqn{M-1} (does not include the
+  standard deviation/variance).
+  Try using this argument if there is a link function that is not
+  programmed explicitly to handle range restrictions in
+  the \code{initialize} slot.
+
+
+
+  }
+  \item{var.arg, imethod, isd}{
+  Same as \code{\link{uninormal}}.
+
+
+
+  }
+  \item{zero}{
+  See \code{\link{CommonVGAMffArguments}} for more information.
+  The default applies to the last one,
+  viz. the standard deviation/variance.
+
+
+  }
+
+}
+\details{
+This function allows all the usual LM regression coefficients to be
+modelled as functions of other explanatory variables via parameter link
+functions. For example, we may want some of them to be positive. Or we
+may want a subset of them to be positive and add to unity. So a class
+of such models have been named \emph{varying-coefficient models} (VCMs).
+
+
+  The usual linear model is specified through argument
+  \code{form2}.  As with all other \pkg{VGAM} family
+  functions, the linear/additive predictors are specified
+  through argument \code{formula}.
+
+
+  The \code{\link{mlogit}} link allows a subset of the
+  coefficients to be positive and add to unity.  Either
+  none or more than one call to \code{\link{mlogit}}
+  is allowed. The last variable will be used as the
+  baseline/reference group, and therefore excluded from
+  the estimation.
+
+  
+  By default,
+  the log of the standard deviation is the last
+  linear/additive predictor. It is recommended that this
+  parameter be estimated as intercept-only, for numerical
+  stability.
+
+
+  Technically,
+  the Fisher information matrix is of unit-rank for all but
+  the last parameter (the standard deviation/variance).
+  Hence an approximation is used that pools over all the
+  observations.
+
+
+  This \pkg{VGAM} family function cannot handle multiple responses.
+  Also, this function will probably not have the
+  full capabilities of the class of varying-coefficient models as
+  described by Hastie and Tibshirani (1993). However, it should
+  be able to manage some simple models, especially involving the
+  following links:
+  \code{\link{identity}},
+  \code{\link{loge}},
+  \code{\link{logoff}},
+  \code{\link{loglog}},
+  \code{\link{logit}},
+  \code{\link{probit}},
+  \code{\link{cauchit}}.
+  \code{\link{cloglog}},
+  \code{\link{rhobit}},
+  \code{\link{fisherz}}.
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+}
+\references{
+
+  Hastie, T. and Tibshirani, R. (1993)
+  Varying-coefficient models.
+  \emph{J. Roy. Statist. Soc. Ser. B},
+  \bold{55}, 757--796.
+
+
+
+}
+
+\author{ T. W. Yee }
+\section{Warning}{
+  This \pkg{VGAM} family function is fragile.
+  One should monitor convergence, and possibly enter initial values
+  especially when there are non-\code{\link{identity}}-link functions.
+  If the initial value of the standard deviation/variance is too
+  small then numerical problems may occur.
+  One trick is to fit an intercept-only only model and feed its
+  \code{predict()} output into argument \code{etastart} of a
+  more complicated model.
+  The use of the \code{zero} argument is recommended in order
+  to keep models as simple as possible.
+
+
+% 20130730; No longer a bug:
+% Inference for an ordinary LM here differs from \code{\link[stats]{lm}}.
+% In particular, the SEs differ.
+
+
+
+}
+
+\note{
+  The standard deviation/variance parameter is best modelled as
+  intercept-only.
+
+
+  Yet to do: allow an argument such as \code{parallel} that enables
+  many of the coefficients to be equal.
+  Fix a bug: \code{Coef()} does not work for intercept-only models.
+
+
+}
+\seealso{
+    \code{\link{uninormal}},
+    \code{\link[stats:lm]{lm}}.
+
+%    \code{link[locfit]{ethanol}}.
+
+
+}
+\examples{
+ndata <- data.frame(x2 = runif(nn <- 2000))
+# Note that coeff1 + coeff2 + coeff5 == 1. So try a "mlogit" link.
+myoffset <- 10
+ndata <- transform(ndata,
+           coeff1 = 0.25,  # "mlogit" link
+           coeff2 = 0.25,  # "mlogit" link
+           coeff3 = exp(-0.5),  # "loge" link
+           coeff4 = logoff(+0.5, offset = myoffset, inverse = TRUE),  # "logoff" link
+           coeff5 = 0.50,  # "mlogit" link
+           coeff6 = 1.00,  # "identity" link
+           v2 = runif(nn),
+           v3 = runif(nn),
+           v4 = runif(nn),
+           v5 = rnorm(nn),
+           v6 = rnorm(nn))
+ndata <- transform(ndata,
+           Coeff1 =          0.25 - 0 * x2,
+           Coeff2 =          0.25 - 0 * x2,
+           Coeff3 =   logit(-0.5  - 1 * x2, inverse = TRUE),
+           Coeff4 =  loglog( 0.5  - 1 * x2, inverse = TRUE),
+           Coeff5 =          0.50 - 0 * x2,
+           Coeff6 =          1.00 + 1 * x2)
+ndata <- transform(ndata,
+                   y1 = coeff1 * 1 +
+                        coeff2 * v2 +
+                        coeff3 * v3 +
+                        coeff4 * v4 +
+                        coeff5 * v5 +
+                        coeff6 * v6 + rnorm(nn, sd = exp(0)),
+                   y2 = Coeff1 * 1 +
+                        Coeff2 * v2 +
+                        Coeff3 * v3 +
+                        Coeff4 * v4 +
+                        Coeff5 * v5 +
+                        Coeff6 * v6 + rnorm(nn, sd = exp(0)))
+
+# An intercept-only model
+fit1 <- vglm(y1 ~ 1,
+             form2 = ~ 1 + v2 + v3 + v4 + v5 + v6,
+             normal.vcm(link.list = list("(Intercept)" = "mlogit",
+                                         "v2"          = "mlogit",
+                                         "v3"          = "loge",
+                                         "v4"          = "logoff",
+                                         "(Default)"   = "identity",
+                                         "v5"          = "mlogit"),
+                        earg.list = list("(Intercept)" = list(),
+                                         "v2"          = list(),
+                                         "v4"          = list(offset = myoffset),
+                                         "v3"          = list(),
+                                         "(Default)"   = list(),
+                                         "v5"          = list()),
+                        zero = c(1:2, 6)),
+             data = ndata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+summary(fit1)
+# This works only for intercept-only models:
+mlogit(rbind(coef(fit1, matrix = TRUE)[1, c(1, 2)]), inverse = TRUE)
+
+# A model with covariate x2 for the regression coefficients
+fit2 <- vglm(y2 ~ 1 + x2,
+             form2 = ~ 1 + v2 + v3 + v4 + v5 + v6,
+             normal.vcm(link.list = list("(Intercept)" = "mlogit",
+                                         "v2"          = "mlogit",
+                                         "v3"          = "logit",
+                                         "v4"          = "loglog",
+                                         "(Default)"   = "identity",
+                                         "v5"          = "mlogit"),
+                        earg.list = list("(Intercept)" = list(),
+                                         "v2"          = list(),
+                                         "v3"          = list(),
+                                         "v4"          = list(),
+                                         "(Default)"   = list(),
+                                         "v5"          = list()),
+                        zero = c(1:2, 6)),
+             data = ndata, trace = TRUE)
+
+coef(fit2, matrix = TRUE)
+summary(fit2)
+}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 8d4e074..6e7839e 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,6 +2,30 @@
 \alias{notdocumentedyet}
 %
 %
+% 201311;
+\alias{family.name}
+\alias{family.name.vlm}
+\alias{family.name.vglmff}
+% 201309;
+\alias{I.col}
+\alias{BIC}
+\alias{check.omit.constant}
+%
+% 201308;
+%\alias{dbiclaytoncop}
+%\alias{rbiclaytoncop}
+%\alias{biclaytoncop}
+%
+% 201307;
+\alias{posnormal.control}
+\alias{recnormal.control}
+%\alias{kendall.tau}
+%\alias{binormalcop}
+%\alias{dbinormcop}
+%\alias{pbinormcop}
+%\alias{rbinormcop}
+%\alias{expint, expexpint, expint.E1}
+%
 % 201302;
 % \alias{pgamma.deriv.unscaled}
 % \alias{pgamma.deriv}
@@ -11,9 +35,8 @@
 \alias{binom2.rho.ss}
 %
 % 20121105;
+% \alias{posbernoulli.b.control}
 \alias{N.hat.posbernoulli}
-\alias{aux.posbernoulli}
-\alias{posbern.aux}
 \alias{Rank}
 \alias{Rank.rrvglm}
 \alias{Rank.qrrvglm}
@@ -72,6 +95,7 @@
 %
 % 20120112
 \alias{AIC}
+\alias{AICc}
 \alias{coef}
 \alias{logLik}
 \alias{plot}
@@ -89,11 +113,11 @@
 \alias{show.summary.qrrvglm}
 % \alias{show.summary.rc.exponential}
 \alias{show.summary.rrvglm}
-\alias{show.summary.uqo}
+%\alias{show.summary.uqo}
 \alias{show.summary.vgam}
 \alias{show.summary.vglm}
 \alias{show.summary.vlm}
-\alias{show.uqo}
+%\alias{show.uqo}
 \alias{show.vanova}
 \alias{show.vsmooth.spline}
 %
@@ -149,7 +173,7 @@
 \alias{Confint.rrnb}
 \alias{Confint.nb1}
 %\alias{gala}
-\alias{mmt}
+% \alias{mmt}
 %
 %
 %
@@ -171,7 +195,8 @@
 %
 %
 %20110411
-\alias{dbinorm}
+%\alias{dbinorm}
+\alias{dnorm2}
 %
 %20090330
 \alias{dclogloglap}
@@ -212,9 +237,9 @@
 \alias{Opt.qrrvglm}
 % \alias{R170.or.later}
 \alias{Tol.Coef.qrrvglm}
-\alias{Tol.Coef.uqo}
+%\alias{Tol.Coef.uqo}
 \alias{Tol.qrrvglm}
-\alias{Tol.uqo}
+%\alias{Tol.uqo}
 \alias{a2m}
 \alias{abbott}
 % \alias{acat.deriv}
@@ -253,10 +278,12 @@
 % \alias{cao.fit}
 \alias{car.all}
 \alias{care.exp}
-\alias{ccoef.Coef.cao}
-\alias{ccoef.Coef.qrrvglm}
-\alias{ccoef.cao}
-\alias{ccoef.qrrvglm}
+%
+\alias{concoef.Coef.cao}
+\alias{concoef.Coef.qrrvglm}
+\alias{concoef.cao}
+\alias{concoef.qrrvglm}
+%
 \alias{cdf}
 \alias{cdf.lms.bcg}
 \alias{cdf.lms.bcn}
@@ -283,7 +310,7 @@
 \alias{deplot.lms.yjn2}
 \alias{deplot.vglm}
 \alias{deviance}
-\alias{deviance.uqo}
+%\alias{deviance.uqo}
 \alias{deviance.vglm}
 \alias{deviance.vlm}
 %\alias{df.residual}
@@ -291,8 +318,8 @@
 \alias{dimm}
 % \alias{dneg.binomial}
 \alias{dnorm2}
-\alias{dotC}
-\alias{dotFortran}
+%\alias{dotC}
+%\alias{dotFortran}
 % \alias{dpsi.dlambda.yjn}
 % \alias{drop1.vgam}
 % \alias{drop1.vglm}
@@ -308,14 +335,12 @@
 \alias{eta2theta}
 %\alias{explink}
 % \alias{extract.arg}
-%\alias{felix}
-%\alias{dfelix}
 \alias{fff.control}
 \alias{fill2}
 \alias{fill3}
 \alias{fitted}
 \alias{fitted.values}
-\alias{fitted.values.uqo}
+%\alias{fitted.values.uqo}
 \alias{fittedvsmooth.spline}
 %
 \alias{variable.names}
@@ -366,9 +391,11 @@
 \alias{lms.yjn.control}
 \alias{lmscreg.control}
 \alias{logLik.vlm}
+\alias{logLik.qrrvglm}
 % \alias{lv.Coef.cao} 20090505
 \alias{latvar.Coef.qrrvglm}
-\alias{lv.cao}
+\alias{latvar.cao}
+\alias{latvar.rrvglm}
 \alias{latvar.qrrvglm}
 \alias{lvplot.cao}
 \alias{m2adefault}
@@ -376,9 +403,10 @@
 % \alias{matrix.power}
 \alias{mbesselI0}
 \alias{mix2exp.control}
-\alias{mix2normal1.control}
+\alias{mix2normal.control}
 \alias{mix2poisson.control}
 \alias{model.matrix.qrrvglm}
+\alias{model.matrixvgam}
 % \alias{mux11}
 % \alias{mux111}
 % \alias{mux15}
@@ -413,7 +441,7 @@
 \alias{persp.cao}
 \alias{plot.cao}
 \alias{plotpreplotvgam}
-\alias{plotvglm}
+%\alias{plotvglm}
 \alias{plotvlm}
 \alias{plotvsmooth.spline}
 % \alias{pnorm2} done 20120910
@@ -426,7 +454,7 @@
 \alias{predict.mlm}
 % \alias{predictqrrvglm}
 \alias{predict.rrvglm}
-\alias{predict.uqo}
+%\alias{predict.uqo}
 \alias{predict.vgam}
 \alias{predict.vlm}
 \alias{predictcao}
@@ -487,7 +515,7 @@
 % \alias{rrr.end.expression}
 % \alias{rrr.init.expression}
 % \alias{rrr.normalize}
-\alias{rrvglm.control.Gaussian}
+% \alias{rrvglm.control.Gaussian}
 % \alias{rrvglm.fit}
 \alias{ResSS.vgam}
 \alias{s.vam}
@@ -507,7 +535,7 @@
 \alias{summary.rc.exponential}
 \alias{summaryrcim}
 \alias{summary.rrvglm}
-\alias{summary.uqo}
+%\alias{summary.uqo}
 \alias{summaryvgam}
 \alias{summaryvglm}
 \alias{summaryvlm}
@@ -540,6 +568,7 @@
 % \alias{vgam.match}
 % \alias{vgam.nlchisq}
 % \alias{vgety}
+\alias{vgam.fit}
 \alias{vglm.fit}
 \alias{vglm.garma.control}
 \alias{vglm.multinomial.control}
@@ -569,7 +598,7 @@
 %
 %
 %
-\alias{Coef.uqo-class}
+%\alias{Coef.uqo-class}
 \alias{cao-class}
 \alias{rcim0-class}
 \alias{rcim-class}
@@ -583,7 +612,7 @@
 %%% 20101216 \alias{summary.rcim-class}
 %\alias{summary.rcim-class}
 %\alias{summaryrcim-class}
-\alias{uqo-class}
+%\alias{uqo-class}
 \alias{vcov.qrrvglm-class}
 \alias{vlm-class}
 \alias{vlmsmall-class}
@@ -602,7 +631,7 @@
 
 }
 %\usage{
-%normal1(lmean = "identity", lsd = "loge", zero = NULL)
+%uninormal(lmean = "identity", lsd = "loge", zero = NULL)
 %}
 %- maybe also 'usage' for other objects documented here.
 %\arguments{
@@ -633,7 +662,7 @@
 %}
 %\seealso{
 %    \code{gaussianff},
-%    \code{\link{posnormal1}}.
+%    \code{\link{posnormal}}.
 %}
 %\examples{
 %}
diff --git a/man/olym.Rd b/man/olym.Rd
index d457c5a..9802abf 100644
--- a/man/olym.Rd
+++ b/man/olym.Rd
@@ -63,14 +63,15 @@ summary(olym12)
 myylim <- c(0, 55)
 with(head(olym08, n = 8),
 barplot(rbind(gold, silver, bronze),
-        col = c("gold", "grey", "brown"), # No "silver" or "bronze"!
+        col = c("gold", "grey", "brown"),  # No "silver" or "bronze"!
+#               "gold", "grey71", "chocolate4",
         names.arg = country, cex.names = 0.5, ylim = myylim,
         beside = TRUE, main = "2008 Summer Olympic Final Medal Count",
         ylab = "Medal count", las = 1,
         sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze"))
 with(head(olym12, n = 8),
 barplot(rbind(gold, silver, bronze),
-        col = c("gold", "grey", "brown"), # No "silver" or "bronze"!
+        col = c("gold", "grey", "brown"),  # No "silver" or "bronze"!
         names.arg = country, cex.names = 0.5, ylim = myylim,
         beside = TRUE, main = "2012 Summer Olympic Final Medal Count",
         ylab = "Medal count", las = 1,
diff --git a/man/ordpoisson.Rd b/man/ordpoisson.Rd
index a6afba8..2ece5e0 100644
--- a/man/ordpoisson.Rd
+++ b/man/ordpoisson.Rd
@@ -128,19 +128,19 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
 
 }
 \examples{
-set.seed(123) # Example 1
+set.seed(123)  # Example 1
 x2 <- runif(n <- 1000); x3 <- runif(n)
 mymu <- exp(3 - 1 * x2 + 2 * x3)
 y1 <- rpois(n, lambda = mymu)
 cutpts <- c(-Inf, 20, 30, Inf)
-fcutpts <- cutpts[is.finite(cutpts)] # finite cutpoints
+fcutpts <- cutpts[is.finite(cutpts)]  # finite cutpoints
 ystar <- cut(y1, breaks = cutpts, labels = FALSE)
 \dontrun{
 plot(x2, x3, col = ystar, pch = as.character(ystar))
 }
 table(ystar) / sum(table(ystar))
 fit <- vglm(ystar ~ x2 + x3, fam = ordpoisson(cutpoi = fcutpts))
-head(depvar(fit)) # This can be input if countdata = TRUE
+head(depvar(fit))  # This can be input if countdata = TRUE
 head(fitted(fit))
 head(predict(fit))
 coef(fit, matrix = TRUE)
@@ -148,7 +148,7 @@ fit at extra
 
 # Example 2: multivariate and there are no obsns between some cutpoints
 cutpts2 <- c(-Inf, 0, 9, 10, 20, 70, 200, 201, Inf)
-fcutpts2 <- cutpts2[is.finite(cutpts2)] # finite cutpoints
+fcutpts2 <- cutpts2[is.finite(cutpts2)]  # finite cutpoints
 y2 <- rpois(n, lambda = mymu)   # Same model as y1
 ystar2 <- cut(y2, breaks = cutpts2, labels = FALSE)
 table(ystar2) / sum(table(ystar2))
@@ -159,7 +159,7 @@ fit <- vglm(cbind(ystar,ystar2) ~ x2 + x3, fam =
 coef(fit, matrix = TRUE)
 fit at extra
 constraints(fit)
-summary(depvar(fit)) # Some columns have all zeros
+summary(depvar(fit))  # Some columns have all zeros
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index 9fc6282..e1dcfbd 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -9,9 +9,11 @@
   by maximum likelihood estimation. Some special cases of this
   distribution are also handled.
 
+
 }
 \usage{
-paretoIV(location = 0, lscale = "loge", linequality = "loge", lshape = "loge",
+paretoIV(location = 0, lscale = "loge", linequality = "loge",
+         lshape = "loge",
          iscale = 1, iinequality = 1, ishape = NULL, imethod = 1)
 paretoIII(location = 0, lscale = "loge", linequality = "loge",
           iscale = NULL, iinequality = NULL)
@@ -24,6 +26,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   Location parameter, called \eqn{a} below.
   It is assumed known.
 
+
   }
   \item{lscale, linequality, lshape}{
   Parameter link functions for the
@@ -34,6 +37,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   A log link is the default for all because all these parameters are
   positive.
 
+
   }
   \item{iscale, iinequality, ishape}{
   Initial values for the parameters.
@@ -41,12 +45,14 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   If convergence failure occurs, use these arguments to input
   some alternative initial values.
 
+
   }
   \item{imethod}{
   Method of initialization for the shape parameter.
   Currently only values 1 and 2 are available.
   Try the other value if convergence failure occurs.
 
+
   }
 }
 \details{
@@ -103,9 +109,9 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   and 
   the Pareto(I) distribution \eqn{PI(b,s)} is \eqn{PIV(b,b,g=1,s)}.
   Thus the Burr distribution can be fitted using the
-  \code{\link{nloge}} link
+  \code{\link{negloge}} link
   function and using the default \code{location=0} argument.
-  The Pareto(I) distribution can be fitted using \code{\link{pareto1}}
+  The Pareto(I) distribution can be fitted using \code{\link{paretoff}}
   but there is a slight change in notation: \eqn{s=k} and
   \eqn{b=\alpha}{b=alpha}.
 
@@ -151,7 +157,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 %}
 \seealso{
   \code{\link{ParetoIV}},
-  \code{\link{pareto1}},
+  \code{\link{paretoff}},
   \code{\link{gpd}}.
 
 
@@ -159,8 +165,9 @@ Fairland, Maryland: International Cooperative Publishing House.
 \examples{
 pdata <- data.frame(y = rparetoIV(2000, scale = exp(1),
                                   ineq = exp(-0.3), shape = exp(1)))
-\dontrun{par(mfrow = c(2, 1)); with(pdata, hist(y)); with(pdata, hist(log(y))) }
-fit <- vglm(y ~ 1, paretoIV, pdata, trace = TRUE)
+\dontrun{par(mfrow = c(2, 1))
+with(pdata, hist(y)); with(pdata, hist(log(y))) }
+fit <- vglm(y ~ 1, paretoIV, data = pdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/paretoIVUC.Rd b/man/paretoIVUC.Rd
index dca45c7..0edbad8 100644
--- a/man/paretoIVUC.Rd
+++ b/man/paretoIVUC.Rd
@@ -24,6 +24,7 @@
   Density, distribution function, quantile function and random generation
   for the Pareto(IV/III/II) distributions.
 
+
 }
 \usage{
 dparetoIV(x, location = 0, scale = 1, inequality = 1, shape = 1, log = FALSE)
@@ -38,7 +39,7 @@ dparetoII(x, location = 0, scale = 1, shape = 1, log = FALSE)
 pparetoII(q, location = 0, scale = 1, shape = 1)
 qparetoII(p, location = 0, scale = 1, shape = 1)
 rparetoII(n, location = 0, scale = 1, shape = 1)
-dparetoI(x, scale = 1, shape = 1)
+dparetoI(x, scale = 1, shape = 1, log = FALSE)
 pparetoI(q, scale = 1, shape = 1)
 qparetoI(p, scale = 1, shape = 1)
 rparetoI(n, scale = 1, shape = 1)
@@ -46,7 +47,10 @@ rparetoI(n, scale = 1, shape = 1)
 \arguments{
   \item{x, q}{vector of quantiles. }
   \item{p}{vector of probabilities. }
-  \item{n}{number of observations. Must be a single positive integer. }
+  \item{n}{number of observations. Must be a single positive integer.
+
+
+  }
   \item{location}{the location parameter. }
   \item{scale, shape, inequality}{the (positive) scale,
   inequality and shape parameters. }
@@ -62,6 +66,8 @@ rparetoI(n, scale = 1, shape = 1)
   functions beginning with the letter \code{p} give the distribution function,
   functions beginning with the letter \code{q} give the quantile function, and
   functions beginning with the letter \code{r} generates random deviates.
+
+
 }
 \references{
 
@@ -86,7 +92,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 
 }
 \note{
-  The functions \code{[dpqr]paretoI} are the same as \code{[dpqr]pareto1}
+  The functions \code{[dpqr]paretoI} are the same as \code{[dpqr]pareto}
   except for a slight change in notation: \eqn{s=k} and
   \eqn{b=\alpha}{b=alpha}; see \code{\link{Pareto}}.
 
@@ -101,12 +107,12 @@ Fairland, Maryland: International Cooperative Publishing House.
 \examples{
 \dontrun{
 x <- seq(-0.2, 4, by = 0.01)
-loc <- 0; Scale <- 1; ineq <- 1; shape <- 1.0;
+loc <- 0; Scale <- 1; ineq <- 1; shape <- 1.0
 plot(x, dparetoIV(x, loc, Scale, ineq, shape), type = "l", col = "blue",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", ylim = 0:1, las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
-Q <- qparetoIV(seq(0.05,0.95,by = 0.05), loc, Scale, ineq, shape)
+Q <- qparetoIV(seq(0.05, 0.95,by = 0.05), loc, Scale, ineq, shape)
 lines(Q, dparetoIV(Q, loc, Scale, ineq, shape), col = "purple", lty = 3, type = "h")
 lines(x, pparetoIV(x, loc, Scale, ineq, shape), col = "orange")
 abline(h = 0, lty = 2)
diff --git a/man/pareto1.Rd b/man/paretoff.Rd
similarity index 73%
rename from man/pareto1.Rd
rename to man/paretoff.Rd
index 9fc28b4..fc7f375 100644
--- a/man/pareto1.Rd
+++ b/man/paretoff.Rd
@@ -1,6 +1,6 @@
-\name{pareto1}
-\alias{pareto1}
-\alias{tpareto1}
+\name{paretoff}
+\alias{paretoff}
+\alias{truncpareto}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Pareto and Truncated Pareto Distribution Family Functions }
 \description{
@@ -11,8 +11,8 @@
 
 }
 \usage{
- pareto1(lshape = "loge", location = NULL)
-tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
+paretoff(lshape = "loge", location = NULL)
+truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -23,12 +23,23 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
 
   }
+  \item{location}{
+  Numeric.
+  The parameter \eqn{\alpha}{alpha} below.
+  If the user inputs a number then it is assumed known with this value.
+  The default means it is estimated by maximum likelihood
+  estimation, which means \code{min(y)} is used,
+  where \code{y} is the response vector.
+
+
+  }
   \item{lower, upper}{
   Numeric.
   Lower and upper limits for the truncated Pareto distribution.
   Each must be positive and of length 1.
   They are called \eqn{\alpha}{alpha} and \eqn{U} below.
 
+
   }
   \item{ishape}{
   Numeric.
@@ -38,19 +49,10 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
 
   }
-  \item{location}{
-  Numeric. The parameter \eqn{\alpha}{alpha} below.
-  If the user inputs a number then it is assumed known with this value.
-  The default means it is estimated by maximum likelihood
-  estimation, which means \code{min(y)} where \code{y} is the response
-  vector.
-
-
-  }
   \item{imethod}{
-  An integer with value \code{1} or \code{2} which
-  specifies the initialization method. If failure to converge occurs
-  try the other value, or else specify a value for \code{ishape}.
+  See \code{\link{CommonVGAMffArguments}} for information.
+  If failure to converge occurs then try specifying a value for
+  \code{ishape}.
 
 
   }
@@ -66,11 +68,12 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
   The Pareto distribution, which is used a lot in economics,
   has a probability density function that can be written
-  \deqn{f(y) = k  \alpha^k / y^{k+1}}{%
-        f(y) = k * alpha^k / y^(k+1)}
-  for \eqn{0 < \alpha < y}{0< alpha < y} and \eqn{k>0}.
-  The \eqn{\alpha}{alpha} is known as the location parameter, and 
-  \eqn{k} is known as the shape parameter.
+  \deqn{f(y;k,\alpha) = k  \alpha^k / y^{k+1}}{%
+        f(y;k,alpha) = k * alpha^k / y^(k+1)}
+  for \eqn{0<k} and \eqn{0 < \alpha < y}{0< alpha < y}.
+  The \eqn{\alpha}{alpha} is called the location parameter, and
+  it is either assumed \emph{known} or else \code{min(y)} is used.
+  The parameter \eqn{k} is called the shape parameter.
   The mean of \eqn{Y} is
   \eqn{\alpha k/(k-1)}{alpha*k/(k-1)} provided \eqn{k > 1}.
   Its variance is
@@ -102,9 +105,9 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
 }
 \references{ 
-  Evans, M., Hastings, N. and Peacock, B. (2000)
-  \emph{Statistical Distributions},
-  New York: Wiley-Interscience, Third edition.
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
+\emph{Statistical Distributions},
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
   Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006)
@@ -121,10 +124,10 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
   Bradford distribution.
 
 
-  For \code{pareto1},
+  For \code{paretoff},
   if the estimate of \eqn{k} is less than or equal to unity
   then the fitted values will be \code{NA}s.
-  Also, \code{pareto1} fits the Pareto(I) distribution.
+  Also, \code{paretoff} fits the Pareto(I) distribution.
   See \code{\link{paretoIV}} for the more general Pareto(IV/III/II)
   distributions, but there is a slight change in notation: \eqn{s = k}
   and \eqn{b=\alpha}{b = alpha}.
@@ -134,7 +137,7 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
   natural upper bound on the probability tail.
   The upper truncated Pareto distribution has three parameters (called
   \eqn{\alpha}{alpha}, \eqn{U} and \eqn{k} here) but the family function
-  \code{tpareto} estimates only \eqn{k}.
+  \code{truncpareto()} estimates only \eqn{k}.
   With known lower and upper limits, the ML estimator of \eqn{k} has
   the usual properties of MLEs. 
   Aban (2006) discusses other inferential details.
@@ -144,7 +147,7 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 \section{Warning }{
   The usual or unbounded Pareto distribution has two
   parameters (called \eqn{\alpha}{alpha} and \eqn{k} here)
-  but the family function \code{pareto1} estimates only
+  but the family function \code{paretoff} estimates only
   \eqn{k} using iteratively reweighted least squares. The
   MLE of the \eqn{\alpha}{alpha} parameter lies on the
   boundary and is \code{min(y)} where \code{y} is the
@@ -160,7 +163,7 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 }
 \seealso{
   \code{\link{Pareto}},
-  \code{\link{Tpareto}},
+  \code{\link{Truncpareto}},
   \code{\link{paretoIV}},
   \code{\link{gpd}}.
 
@@ -168,26 +171,26 @@ tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 }
 \examples{
 alpha <- 2; kay <- exp(3)
-pdat <- data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
-fit <- vglm(y ~ 1, pareto1, pdat, trace = TRUE)
-fit at extra # The estimate of alpha is here
+pdata <- data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
+fit <- vglm(y ~ 1, paretoff, pdata, trace = TRUE)
+fit at extra  # The estimate of alpha is here
 head(fitted(fit))
-with(pdat, mean(y))
+with(pdata, mean(y))
 coef(fit, matrix = TRUE)
-summary(fit) # Standard errors are incorrect!!
+summary(fit)  # Standard errors are incorrect!!
 
 # Here, alpha is assumed known
-fit2 <- vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE)
-fit2 at extra # alpha stored here
+fit2 <- vglm(y ~ 1, paretoff(location = alpha), pdata, trace = TRUE)
+fit2 at extra  # alpha stored here
 head(fitted(fit2))
 coef(fit2, matrix = TRUE)
-summary(fit2) # Standard errors are okay
+summary(fit2)  # Standard errors are okay
 
 # Upper truncated Pareto distribution
 lower <- 2; upper <- 8; kay <- exp(2)
-pdat3 <- data.frame(y = rtpareto(n = 100, lower = lower,
-                                 upper = upper, shape = kay))
-fit3 <- vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE)
+pdata3 <- data.frame(y = rtruncpareto(n = 100, lower = lower,
+                                      upper = upper, shape = kay))
+fit3 <- vglm(y ~ 1, truncpareto(lower, upper), pdata3, trace = TRUE)
 coef(fit3, matrix = TRUE)
 c(fit3 at misc$lower, fit3 at misc$upper)
 }
diff --git a/man/perks.Rd b/man/perks.Rd
index b600a85..8b5bc00 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -126,7 +126,7 @@ Also, monitor convergence by setting \code{trace = TRUE}.
 
 \examples{
 \dontrun{ set.seed(123)
-pdata <- data.frame(x2 = runif(nn <- 1000)) # x2 unused
+pdata <- data.frame(x2 = runif(nn <- 1000))  # x2 unused
 pdata <- transform(pdata, eta1  = -1,
                           ceta1 =  1)
 pdata <- transform(pdata, shape1 = exp(eta1),
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
index 01ecbbc..effcc0f 100644
--- a/man/perksUC.Rd
+++ b/man/perksUC.Rd
@@ -57,7 +57,7 @@ rperks(n, shape, scale = 1)
 probs <- seq(0.01, 0.99, by = 0.01)
 Shape <- exp(-1.0); Scale <- exp(1);
 max(abs(pperks(qperks(p = probs, Shape, Scale),
-                  Shape, Scale) - probs)) # Should be 0
+                  Shape, Scale) - probs))  # Should be 0
 
 \dontrun{ x <- seq(-0.1, 07, by = 0.01);
 plot(x, dperks(x, Shape, Scale), type = "l", col = "blue", las = 1,
@@ -69,7 +69,7 @@ lines(x, pperks(x, Shape, Scale), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qperks(probs, Shape, Scale)
 lines(Q, dperks(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
-pperks(Q, Shape, Scale) - probs # Should be all zero
+pperks(Q, Shape, Scale) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index 57fd302..a998289 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -8,17 +8,17 @@ applicable for rank-1 or rank-2 models with argument \code{noRRR = ~ 1}.
 
 }
 \usage{
-perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE, 
-              xlim = NULL, ylim = NULL, zlim = NULL,
-              gridlength = if (Rank == 1) 301 else c(51,51),
-              whichSpecies = NULL,
-              xlab = if (Rank == 1) "Latent Variable" else "Latent Variable 1",
-              ylab = if (Rank == 1) "Expected Value" else "Latent Variable 2",
-              zlab = "Expected value", labelSpecies = FALSE,
-              stretch = 1.05, main = "", ticktype = "detailed",
-              col = if (Rank == 1) par()$col else "white",
-              llty = par()$lty, llwd = par()$lwd,
-              add1 = FALSE, ...)
+perspqrrvglm(x, varI.latvar = FALSE, reference = NULL, show.plot = TRUE, 
+             xlim = NULL, ylim = NULL, zlim = NULL,
+             gridlength = if (Rank == 1) 301 else c(51,51),
+             which.species = NULL,
+             xlab = if (Rank == 1) "Latent Variable" else "Latent Variable 1",
+             ylab = if (Rank == 1) "Expected Value" else "Latent Variable 2",
+             zlab = "Expected value", labelSpecies = FALSE,
+             stretch = 1.05, main = "", ticktype = "detailed",
+             col = if (Rank == 1) par()$col else "white",
+             llty = par()$lty, llwd = par()$lwd,
+             add1 = FALSE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -27,7 +27,7 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   constrained quadratic ordination (CQO) object.
 
   }
-  \item{varlvI}{
+  \item{varI.latvar}{
   Logical that is fed into \code{\link{Coef.qrrvglm}}.
 
   }
@@ -35,7 +35,7 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
 
   }
-  \item{plot.it}{ Logical. Plot it? }
+  \item{show.plot}{ Logical. Plot it? }
   \item{xlim, ylim}{
   Limits of the x- and y-axis. Both are numeric of length 2.
   See \code{\link[graphics]{par}}.
@@ -54,7 +54,7 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   are the number of grid points on the  x- and y-axes respectively.
 
   }
-  \item{whichSpecies}{
+  \item{which.species}{
   Numeric or character vector. Indicates which species are to be
   plotted. The default is to plot all of them.  If numeric, it should
   contain values in the set \{1,2,\ldots,\eqn{S}\} where \eqn{S}
@@ -123,6 +123,7 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   \code{\link{lvplot.qrrvglm}} but plots the curves along a fine grid
   and there is no rugplot to show the site scores.
 
+
   For a rank-2 model, a perspective plot has the first latent variable as
   the x-axis, the second latent variable as the y-axis, and the expected
   value (fitted value) as the z-axis.  The result of a CQO is that each
@@ -130,13 +131,15 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   will, at each grid point, work out the maximum fitted value over all
   the species. The resulting response surface is plotted. Thus rare
   species will be obscured and abundant species will dominate the plot.
-  To view rare species, use the \code{whichSpecies} argument to select
+  To view rare species, use the \code{which.species} argument to select
   a subset of the species.
 
+
   A perspective  plot will be performed if \code{noRRR = ~ 1}, and
   \code{Rank = 1} or \code{2}.  Also, all the tolerance matrices of
   those species to be plotted must be positive-definite.
 
+
 }
 \value{
   For a rank-2 model, a list with the following components.
@@ -145,16 +148,23 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
   of fitted values on the grid. Here, \eqn{G_1}{G1} and \eqn{G_2}{G2}
   are the two values of \code{gridlength}.
 
+
+  }
+  \item{latvar1grid, latvar2grid}{
+  The grid points for the x-axis and y-axis.
+
+
   }
-  \item{lv1grid, lv2grid}{The grid points for the x-axis and y-axis. }
-  \item{maxfitted}{
+  \item{max.fitted}{
   A \eqn{G_1}{G1} by \eqn{G_2}{G2} matrix of maximum
-  of the fitted values over all species. These are the
-  values that are plotted on the z-axis.
+  of the fitted values over all species.
+  These are the values that are plotted on the z-axis.
+
 
   }
-  For a rank-1 model, the components \code{lv2grid} and \code{maxfitted}
-  are \code{NULL}.
+  For a rank-1 model, the components \code{latvar2grid} and
+  \code{max.fitted} are \code{NULL}.
+
 
 }
 \references{
@@ -173,6 +183,7 @@ canonical Gaussian ordination.
   Yee (2004) does not refer to perspective plots.  Instead, contour plots
   via \code{\link{lvplot.qrrvglm}} are used.
 
+
   For rank-1 models, a similar function to this one is
   \code{\link{lvplot.qrrvglm}}.  It plots the fitted values at the actual
   site score values rather than on a fine grid here.  The result has
@@ -181,28 +192,31 @@ canonical Gaussian ordination.
   bell-shaped curves are the truth because the data is more of a distance
   away.
 
+
 }
 \seealso{
-\code{\link[graphics]{persp}},
-\code{\link{cqo}},
-\code{\link{Coef.qrrvglm}},
-\code{\link{lvplot.qrrvglm}},
-\code{\link[graphics]{par}},
-\code{\link[graphics]{title}}.
+  \code{\link[graphics]{persp}},
+  \code{\link{cqo}},
+  \code{\link{Coef.qrrvglm}},
+  \code{\link{lvplot.qrrvglm}},
+  \code{\link[graphics]{par}},
+  \code{\link[graphics]{title}}.
+
+
 }
 \examples{\dontrun{
-hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+hspider[, 1:6] <- scale(hspider[, 1:6])  # Good idea when ITolerances = TRUE
 set.seed(111)
 r1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-          poissonff, hspider, trace = FALSE, ITolerances = TRUE)
+          poissonff, data = hspider, trace = FALSE, ITolerances = TRUE)
 set.seed(111)  # r2 below is an ill-conditioned model
 r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-          isdlv = c(2.4,1.0), Muxfactor = 3.0, trace = FALSE,
-          poissonff, hspider, Rank = 2, EqualTolerances = TRUE)
+          isd.lv = c(2.4, 1.0), Muxfactor = 3.0, trace = FALSE,
+          poissonff, data = hspider, Rank = 2, EqualTolerances = TRUE)
 
 sort(r1 at misc$deviance.Bestof)  # A history of the fits
 sort(r2 at misc$deviance.Bestof)  # A history of the fits
@@ -211,10 +225,10 @@ if (deviance(r2) > 857) stop("suboptimal fit obtained")
 persp(r1, xlim = c(-6,5), col = 1:4, label = TRUE)
 
 # Involves all species 
-persp(r2, xlim = c(-6,5), ylim = c(-4,5), theta = 10, phi = 20, zlim = c(0,220))
-# Omit the two dominant species to see what's behind them
-persp(r2, xlim = c(-6,5), ylim = c(-4,5), theta = 10, phi = 20, zlim = c(0,220), 
-      which = (1:10)[-c(8,10)]) # Use zlim to retain the original z-scale
+persp(r2, xlim = c(-6,5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220))
+# Omit the two dominant species to see what is behind them
+persp(r2, xlim = c(-6,5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220),
+      which = (1:10)[-c(8, 10)])  # Use zlim to retain the original z-scale
 }
 }
 \keyword{models}
diff --git a/man/plackUC.Rd b/man/plackUC.Rd
index 8def019..02d8da5 100644
--- a/man/plackUC.Rd
+++ b/man/plackUC.Rd
@@ -23,6 +23,7 @@ rplack(n, oratio)
   Logical.
   If \code{TRUE} then the logarithm is returned.
 
+
   }
 }
 \value{
@@ -55,7 +56,7 @@ Some contributions to contingency-type distributions.
 %}
 \seealso{
   \code{\link{plackett}},
-  \code{\link{frank}}.
+  \code{\link{bifrankcop}}.
 
 
 }
@@ -63,15 +64,15 @@ Some contributions to contingency-type distributions.
 \dontrun{ N <- 101; oratio <- exp(1)
 x <- seq(0.0, 1.0, len = N)
 ox <- expand.grid(x, x)
-z <- dplack(ox[,1], ox[,2], oratio = oratio)
-contour(x, x, matrix(z, N, N), col = "blue")
-z <- pplack(ox[,1], ox[,2], oratio = oratio)
-contour(x, x, matrix(z, N, N), col = "blue")
+zedd <- dplack(ox[, 1], ox[, 2], oratio = oratio)
+contour(x, x, matrix(zedd, N, N), col = "blue")
+zedd <- pplack(ox[, 1], ox[, 2], oratio = oratio)
+contour(x, x, matrix(zedd, N, N), col = "blue")
 
 plot(rr <- rplack(n = 3000, oratio = oratio))
-par(mfrow = c(1,2))
-hist(rr[,1]) # Should be uniform
-hist(rr[,2]) # Should be uniform
+par(mfrow = c(1, 2))
+hist(rr[, 1])  # Should be uniform
+hist(rr[, 2])  # Should be uniform
 }
 }
 \keyword{distribution}
diff --git a/man/plackett.Rd b/man/plackett.Rd
index 24802ba..da6a1c0 100644
--- a/man/plackett.Rd
+++ b/man/plackett.Rd
@@ -6,6 +6,7 @@
   Estimate the association parameter of Plackett's bivariate distribution
   by maximum likelihood estimation.
 
+
 }
 \usage{
 plackett(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
@@ -17,15 +18,18 @@ plackett(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
   See \code{\link{Links}} for more choices
   and information.
 
+
   }
   \item{ioratio}{
   Numeric. Optional initial value for \eqn{\psi}{psi}.
   If a convergence failure occurs try assigning a value or a different value.
 
+
   }
   \item{imethod, nsimEIM}{
   See \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -93,7 +97,7 @@ A class of bivariate distributions.
 
 \seealso{
   \code{\link{rplack}},
-  \code{\link{frank}}.
+  \code{\link{bifrankcop}}.
 
 
 }
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index c00b230..9c5a02b 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -66,8 +66,8 @@ canonical Gaussian ordination.
 \examples{\dontrun{
 # QRR-VGLM on the hunting spiders data
 # This is computationally expensive
-set.seed(111) # This leads to the global solution
-# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+set.seed(111)  # This leads to the global solution
+# hspider[, 1:6] <- scale(hspider[, 1:6])  # Standardize the environmental variables
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
                 Trocterr, Zoraspin) ~
diff --git a/man/plotrcim0.Rd b/man/plotrcim0.Rd
index cd71d82..82a42ec 100644
--- a/man/plotrcim0.Rd
+++ b/man/plotrcim0.Rd
@@ -11,7 +11,7 @@
 
 }
 \usage{
-  plotrcim0(object, centered = TRUE, whichplots = c(1, 2),
+  plotrcim0(object, centered = TRUE, which.plots = c(1, 2),
             hline0 = TRUE, hlty = "dashed", hcol = par()$col, hlwd = par()$lwd,
             rfirst = 1, cfirst = 1,
             rtype = "h", ctype = "h",
@@ -21,7 +21,8 @@
             rxlab = "", rylab = "Row effects",
             cmain = "Column effects", csub = "",
             cxlab= "", cylab = "Column effects",
-            rcol = par()$col, ccol = par()$col, ...)
+            rcol = par()$col, ccol = par()$col,
+            no.warning = FALSE, ...)
 
 }
 \arguments{
@@ -30,12 +31,14 @@
   This should be of rank-0, i.e., main effects only and no
   interactions.
 
+
 }
-\item{whichplots}{  
+\item{which.plots}{  
   Numeric, describing which plots are to be plotted.
   The row effects plot is 1 and the column effects plot is 2.
   Set the value \code{0}, say, for no plots at all.
 
+
 }
 \item{centered}{  
   Logical.
@@ -44,6 +47,7 @@
   If \code{FALSE} then the raw effects are used (of which
   the first are zero by definition).
 
+
 }
 \item{hline0, hlty, hcol, hlwd}{  
   \code{hline0} is logical. If \code{TRUE} then a horizontal line is
@@ -51,21 +55,25 @@
   Probably having \code{hline0 = TRUE} only makes sense when
   \code{centered = TRUE}.
 
+
 }
 \item{rfirst, cfirst}{  
   \code{rfirst} is the level of row that is placed first in the
   row effects plot, etc.
 
+
 } 
 \item{rmain, cmain}{  
   Character.
   \code{rmain} is the main label in the row effects plot, etc.
 
+
 }
 \item{rtype, ctype, rsub, csub}{
   See the \code{type} and \code{sub} arguments of 
   \code{\link[graphics:plot]{plot}}.
 
+
 }
 %\item{rlabels, clabels}{
 % rlabels = FALSE, clabels = FALSE,
@@ -83,29 +91,40 @@
   see \code{\link[graphics:par]{par}}.
   Ditto for \code{cxlab} and \code{cylab} for the column effects plot.
 
+
 }
 \item{rcex.lab, ccex.lab}{  
   Numeric.
   \code{rcex.lab} is \code{cex} for the row effects plot label,
   etc.
 
+
 }
 \item{rcex.axis, ccex.axis}{
   Numeric.
   \code{rcex.axis} is the \code{cex} argument for the row effects axis label,
   etc.
 
+
 }
 
 \item{rtick, ctick}{
   Logical.
   If \code{rtick = TRUE} then add ticks to the row effects plot, etc.
 
+
 }
 \item{rcol, ccol}{
   \code{rcol} give a colour for the row effects plot,
   etc.
 
+
+}
+\item{no.warning}{
+  Logical. If \code{TRUE} then no warning is issued if the
+  model is not rank-0.
+
+
 }
 %\item{llwd}{
 %  Fed into \code{lwd} of \code{\link[graphics:par]{par}}.
@@ -184,9 +203,9 @@
 
 }
 \examples{
-alcoff.e <- moffset(alcoff, "6", "Mon", postfix = "*") # Effective day
+alcoff.e <- moffset(alcoff, "6", "Mon", postfix = "*")  # Effective day
 fit0 <- rcim(alcoff.e, family = poissonff)
-\dontrun{par(oma = c(0, 0, 4, 0), mfrow = 1:2) # For all plots below too
+\dontrun{par(oma = c(0, 0, 4, 0), mfrow = 1:2)  # For all plots below too
 ii = plot(fit0, rcol = "blue", ccol = "orange",
           lwd = 4, ylim = c(-2, 2),  # A common ylim
           cylab = "Effective daily effects", rylab = "Hourly effects",
@@ -199,7 +218,7 @@ fit1 <- rcim(alcoff.e, negbinomial, trace = TRUE)
 \dontrun{ plot(fit1, ylim = c(-2, 2)) }
 
 # Univariate normal example
-fit2 <- rcim(alcoff.e, normal1, trace = TRUE)
+fit2 <- rcim(alcoff.e, uninormal, trace = TRUE)
 \dontrun{ plot(fit2, ylim = c(-200, 400)) }
 
 # Median-polish example
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index ca39d14..9288072 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -3,9 +3,10 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Default VGAM Plotting }
 \description{
-  Component functions of a \code{\link{vgam-class}} object can be plotted 
-  with \code{plotvgam()}. These are on the scale of the linear/additive
-  predictor. 
+  Component functions of a \code{\link{vgam-class}} object can
+  be plotted with \code{plotvgam()}. These are on the scale of
+  the linear/additive predictor.
+
 }
 \usage{
 plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
@@ -19,73 +20,94 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
 \arguments{
 
   \item{x}{ A fitted \pkg{VGAM} object, e.g., produced by
-    \code{\link{vgam}}, \code{\link{vglm}}, or \code{\link{rrvglm}}.
+  \code{\link{vgam}}, \code{\link{vglm}}, or \code{\link{rrvglm}}.
+
+
   }
   \item{newdata}{ Data frame.
-    May be used to reconstruct the original data set. }
-  \item{y}{ Unused. }
+  May be used to reconstruct the original data set.
+
+
+  }
+  \item{y}{ Unused.
+  }
   \item{residuals}{
   Logical. If \code{TRUE} then residuals are plotted.
-  See \code{type.residuals}}
+  See \code{type.residuals}
+
+
+  }
 \item{rugplot}{
   Logical. If \code{TRUE} then a rug plot is plotted at the
   foot of each plot. These values are jittered to expose ties.
+
   }
   \item{se}{
-    Logical. If \code{TRUE} then approximate \eqn{\pm 2}{+-2} pointwise
-    standard error bands are included in the plot.
+  Logical. If \code{TRUE} then approximate \eqn{\pm 2}{+-2} pointwise
+  standard error bands are included in the plot.
+
 
   }
   \item{scale}{
-    Numerical. By default, each plot will have its own
-    y-axis scale. However, by specifying a value, each plot's y-axis
-    scale will be at least \code{scale} wide.
+  Numerical. By default, each plot will have its own
+  y-axis scale. However, by specifying a value, each plot's y-axis
+  scale will be at least \code{scale} wide.
+
     }
   \item{raw}{
-    Logical. If \code{TRUE} then the smooth functions are those
-    obtained directly by the algorithm, and are plotted without
-    having to premultiply with the constraint matrices.
-    If \code{FALSE} then the smooth functions have been premultiply by
-    the constraint matrices.
-    The \code{raw} argument is directly fed into \code{predict.vgam()}.
+  Logical. If \code{TRUE} then the smooth functions are those
+  obtained directly by the algorithm, and are plotted without
+  having to premultiply with the constraint matrices.
+  If \code{FALSE} then the smooth functions have been premultiply by
+  the constraint matrices.
+  The \code{raw} argument is directly fed into \code{predict.vgam()}.
+
 
   }
   \item{offset.arg}{
-   Numerical vector of length \eqn{r}.
-    These are added to the component functions. Useful for
-    separating out the functions when \code{overlay} is \code{TRUE}.
-    If \code{overlay} is \code{TRUE} and there is one covariate then
-    using the intercept values as the offsets can be a good idea.
+  Numerical vector of length \eqn{r}.
+  These are added to the component functions. Useful for
+  separating out the functions when \code{overlay} is \code{TRUE}.
+  If \code{overlay} is \code{TRUE} and there is one covariate then
+  using the intercept values as the offsets can be a good idea.
+
 
   }
   \item{deriv.arg}{
-   Numerical. The order of the derivative.
-    Should be assigned an small 
-    integer such as 0, 1, 2. Only applying to \code{s()} terms,
-    it plots the derivative.
+  Numerical. The order of the derivative.
+  Should be assigned an small 
+  integer such as 0, 1, 2. Only applying to \code{s()} terms,
+  it plots the derivative.
+
 
   }
   \item{overlay}{
-   Logical. If \code{TRUE} then component functions of the same
-    covariate are overlaid on each other.
-    The functions are centered, so \code{offset.arg} can be useful
-    when \code{overlay} is \code{TRUE}.
+  Logical. If \code{TRUE} then component functions of the same
+  covariate are overlaid on each other.
+  The functions are centered, so \code{offset.arg} can be useful
+  when \code{overlay} is \code{TRUE}.
+
+
+  }
+  \item{type.residuals}{
+  if \code{residuals} is \code{TRUE} then the first
+  possible value
+  of this vector, is used to specify the type of residual.
+
 
-    }
-    \item{type.residuals}{
-   if \code{residuals} is \code{TRUE} then the first
-      possible value
-      of this vector, is used to specify the type of
-      residual.
   }
 
   \item{plot.arg}{
-   Logical. If \code{FALSE} then no plot is produced. }
+   Logical. If \code{FALSE} then no plot is produced.
+
+
+  }
   \item{which.term}{
-   Character or integer vector containing all terms to be
-    plotted, e.g., \code{which.term = c("s(age)", "s(height"))} or
-    \code{which.term = c(2, 5, 9)}.
-    By default, all are plotted.
+  Character or integer vector containing all terms to be
+  plotted, e.g., \code{which.term = c("s(age)", "s(height"))} or
+  \code{which.term = c(2, 5, 9)}.
+  By default, all are plotted.
+
 
   }
   \item{which.cf}{ An integer-valued vector specifying which
@@ -93,27 +115,31 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
   The values must be from the set \{1,2,\ldots,\eqn{r}\}.
   By default, all are plotted.
 
+
   }
   \item{control}{
   Other control parameters. See \code{\link{plotvgam.control}}.
 
+
   }
   \item{\dots}{
   Other arguments that can be fed into
   \code{\link{plotvgam.control}}. This includes line colors,
   line widths, line types, etc.
 
+
   }
 
   \item{varxij}{ Positive integer.
-    Used if \code{xij} of \code{\link{vglm.control}} was used,
-    this chooses which inner argument the component is plotted against.
-    This argument is related to \code{raw = TRUE} and terms such as
-    \code{NS(dum1,dum2)} and constraint matrices that have more than
-    one column. The default would plot the smooth against \code{dum1}
-    but setting \code{varxij = 2} could mean plotting the smooth against
-    \code{dum2}.
-    See the \pkg{VGAM} website for further information.
+  Used if \code{xij} of \code{\link{vglm.control}} was used,
+  this chooses which inner argument the component is plotted against.
+  This argument is related to \code{raw = TRUE} and terms such as
+  \code{NS(dum1,dum2)} and constraint matrices that have more than
+  one column. The default would plot the smooth against \code{dum1}
+  but setting \code{varxij = 2} could mean plotting the smooth against
+  \code{dum2}.
+  See the \pkg{VGAM} website for further information.
+
 
   }
 
@@ -133,22 +159,25 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
 \value{
   The original object, but with the \code{preplot} slot of the object
   assigned information regarding the plot.
-}
-\references{
-
-
-Yee, T. W. and Wild, C. J. (1996)
-Vector generalized additive models.
-\emph{Journal of the Royal Statistical Society, Series B, Methodological},
-\bold{58}, 481--493.
-
 
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information and examples.
 
 
 }
+%\references{
+%
+%
+%Yee, T. W. and Wild, C. J. (1996)
+%Vector generalized additive models.
+%\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+%\bold{58}, 481--493.
+%
+%
+%Documentation accompanying the \pkg{VGAM} package at
+%\url{http://www.stat.auckland.ac.nz/~yee}
+%contains further information and examples.
+%
+%
+%}
 \author{ Thomas W. Yee }
 
 \note{
@@ -171,6 +200,7 @@ contains further information and examples.
   \code{\link{vgam}},
   \code{\link{plotvgam.control}},
   \code{predict.vgam},
+  \code{\link{plotvglm}},
   \code{\link{vglm}}.
   
   
diff --git a/man/plotvglm.Rd b/man/plotvglm.Rd
new file mode 100644
index 0000000..dd878da
--- /dev/null
+++ b/man/plotvglm.Rd
@@ -0,0 +1,110 @@
+\name{plotvglm}
+\alias{plotvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Plots for VGLMs }
+\description{
+  Currently not working, this function can be used to feed
+  the object to the VGAM plotting function.
+  In the future some diagnostic plots will be plotted.
+
+}
+\usage{
+plotvglm(x, type = c("vglm", "vgam"),
+         newdata = NULL, y = NULL, residuals = NULL,
+         rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE,
+         offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+         type.residuals = c("deviance", "working", "pearson", "response"),
+         plot.arg = TRUE, which.term = NULL, which.cf = NULL,
+         control = plotvgam.control(...), varxij = 1, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+
+  \item{x}{
+  Same as \code{\link{plotvgam}}.
+
+
+  }
+  \item{type}{
+  Default is the first choice.
+  Currently the first choice gives an error (not written yet).
+  If \code{"vgam"} then all the arguments are fed into
+  \code{\link{plotvgam}}.
+
+
+  }
+
+  \item{newdata, y, residuals, rugplot}{
+    Same as \code{\link{plotvgam}}.
+
+
+  }
+  \item{se, scale, raw, offset.arg}{
+    Same as \code{\link{plotvgam}}.
+
+  }
+  \item{deriv.arg, overlay, type.residuals}{
+    Same as \code{\link{plotvgam}}.
+
+  }
+  \item{plot.arg, which.term, which.cf, control}{
+    Same as \code{\link{plotvgam}}.
+
+  }
+  \item{\dots, varxij}{
+    Same as \code{\link{plotvgam}}.
+
+  }
+
+}
+\details{
+  Currently this function has not been written.
+  When this is done some diagnostic plots based on
+  residuals and hatvalues will be done.
+  In the meanwhile, this function can be used to
+  call the plotting function for \code{\link{vgam}} objects.
+
+
+}
+\value{
+  Same as \code{\link{plotvgam}}.
+
+
+}
+%\references{
+%}
+%\author{ Thomas W. Yee }
+
+%\note{
+% \code{plotvglm()} is quite buggy at the moment.
+
+  
+% \code{plotvglm()} works in a similar
+% manner to S-PLUS's \code{plot.gam()}, however, there is no
+% options for interactive construction of the plots yet. 
+
+  
+%}
+
+\seealso{
+  \code{\link{plotvgam}},
+  \code{\link{plotvgam.control}},
+  \code{\link{vglm}}.
+  
+  
+}
+\examples{
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
+fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ bs(Age),
+            binom2.or(zero = NULL), coalminers)
+\dontrun{ par(mfrow = c(1, 3))
+plot(fit, type = "vgam", se = TRUE, ylim = c(-3, 2), las = 1)
+plot(fit, type = "vgam", se = TRUE, which.cf = 1:2,
+     lcol = "blue", scol = "orange", ylim = c(-3, 2))
+plot(fit, type = "vgam", se = TRUE, which.cf = 1:2,
+     lcol = "blue", scol = "orange", overlay = TRUE) }
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+\keyword{graphs}
diff --git a/man/poissonp.Rd b/man/poisson.points.Rd
similarity index 71%
rename from man/poissonp.Rd
rename to man/poisson.points.Rd
index 2b4134e..56f9756 100644
--- a/man/poissonp.Rd
+++ b/man/poisson.points.Rd
@@ -1,5 +1,5 @@
-\name{poissonp}
-\alias{poissonp}
+\name{poisson.points}
+\alias{poisson.points}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Poisson-points-on-a-plane/volume Distances Distribution }
 \description{
@@ -8,32 +8,42 @@
 
 }
 \usage{
-poissonp(ostatistic, dimension = 2, link = "loge",
-         idensity = NULL, imethod = 1)
+poisson.points(ostatistic, dimension = 2, link = "loge",
+               idensity = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{ostatistic}{
-  Order statistic. A single positive integer.
-  For example, the value 5 means the response are the distances of the
-  fifth nearest value to that point (usually over many planes or volumes).
+  Order statistic.
+  A single positive value, usually an integer.
+  For example, the value 5 means the response are the distances
+  of the fifth nearest value to that point (usually over many
+  planes or volumes).
+  Non-integers are allowed because the value 1.5 coincides
+  with \code{\link{maxwell}} when \code{dimension = 2}.
+  Note: if \code{ostatistic = 1} and \code{dimension = 2} then
+  this \pkg{VGAM} family function coincides with \code{\link{rayleigh}}.
+
 
   }
   \item{dimension}{
   The value 2 or 3; 2 meaning a plane and 3 meaning a volume.
 
+
   }
   \item{link}{
   Parameter link function applied to the (positive) density parameter,
   called \eqn{\lambda}{lambda} below.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{idensity}{
   Optional initial value for the parameter.
   A \code{NULL} value means a value is obtained internally.
   Use this argument if convergence failure occurs.
 
+
   }
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
@@ -41,6 +51,7 @@ poissonp(ostatistic, dimension = 2, link = "loge",
   If failure to converge occurs try another value
   and/or else specify a value for \code{idensity}.
 
+
   }
 }
 \details{
@@ -61,26 +72,31 @@ poissonp(ostatistic, dimension = 2, link = "loge",
   is the \emph{density} of the points.
   This \pkg{VGAM} family function estimates \eqn{\lambda}{lambda} by
   specifying the argument \code{ostatistic} and using
-  \code{dimension=3}.
+  \code{dimension = 3}.
 
 
   The mean of \eqn{D_u} is returned as the fitted values.
   Newton-Raphson is the same as Fisher-scoring.
 
+
 }
 \section{Warning}{
   Convergence may be slow if the initial values are far from the
   solution. This often corresponds to the situation when the response
   values are all close to zero, i.e., there is a high density of points.
 
+
   Formulae such as the means have not been fully checked.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}},
   \code{\link{rrvglm}}
   and \code{\link{vgam}}.
+
+
 }
 %\references{ 
 %}
@@ -88,15 +104,21 @@ poissonp(ostatistic, dimension = 2, link = "loge",
 %\note{
 %}
 \seealso{
-     \code{\link{poissonff}}.
+  \code{\link{poissonff}},
+  \code{\link{maxwell}},
+  \code{\link{rayleigh}}.
+
 
 }
 \examples{
-pdata <- data.frame(y = rgamma(10, shape = exp(-1))) # Not proper data!
-os <- 2
-fit <- vglm(y ~ 1, poissonp(os, 2), pdata, tra = TRUE, crit = "c")
-fit <- vglm(y ~ 1, poissonp(os, 3), pdata, tra = TRUE, crit = "c") # Slow convergence?
-fit <- vglm(y ~ 1, poissonp(os, 3, idensi = 1), pdata, trace = TRUE, crit = "c")
+pdata <- data.frame(y = rgamma(10, shape = exp(-1)))  # Not proper data!
+ostat <- 2
+fit <- vglm(y ~ 1, poisson.points(ostat, 2), pdata,
+            trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, poisson.points(ostat, 3), pdata,
+            trace = TRUE, crit = "coef")  # Slow convergence?
+fit <- vglm(y ~ 1, poisson.points(ostat, 3, idensi = 1), pdata,
+            trace = TRUE, crit = "coef")
 head(fitted(fit))
 with(pdata, mean(y))
 coef(fit, matrix = TRUE)
diff --git a/man/poisson.pointsUC.Rd b/man/poisson.pointsUC.Rd
new file mode 100644
index 0000000..d847f5c
--- /dev/null
+++ b/man/poisson.pointsUC.Rd
@@ -0,0 +1,88 @@
+\name{PoissonPoints}
+\alias{PoissonPoints}
+\alias{dpois.points}
+%\alias{ppois.points}
+%\alias{qpois.points}
+\alias{rpois.points}
+\title{Poisson Points Distribution}
+\description{
+  Density
+% distribution function, quantile function
+% and random generation
+  for the
+  PoissonPoints distribution.
+
+}
+\usage{
+dpois.points(x, lambda, ostatistic, dimension = 2, log = FALSE)
+}
+%ppois.points(q, lambda, ostatistic, dimension = 2, log = FALSE)
+%qpois.points(p, lambda, ostatistic, dimension = 2, log = FALSE)
+%rpois.points(n, lambda, ostatistic, dimension = 2, log = FALSE)
+\arguments{
+  \item{x}{vector of quantiles.}
+  \item{lambda}{
+  the mean density of points.
+
+
+  }
+  \item{ostatistic}{
+  positive values, usually integers.
+
+
+  }
+  \item{dimension}{
+  Either 2 and/or 3.
+
+
+  }
+% \item{p}{vector of probabilities.}
+% \item{n}{number of observations.
+% Same as \code{\link[stats:Uniform]{runif}}.
+
+
+% }
+  \item{log}{
+  Logical; if TRUE, the logarithm is returned.
+
+
+  }
+}
+\value{
+  \code{dpois.points} gives the density. % and
+% \code{ppois.points} gives the distribution function,
+% \code{qpois.points} gives the quantile function, and
+% \code{rpois.points} generates random deviates.
+
+
+}
+%\author{ T. W. Yee }
+\details{
+  See \code{\link{poisson.points}}, the \pkg{VGAM} family function
+  for estimating the parameters,
+  for the formula of the probability density function and other details.
+
+
+}
+%\section{Warning }{
+
+
+%}
+\seealso{
+  \code{\link{poisson.points}},
+  \code{\link[stats:Poisson]{dpois}},
+  \code{\link{Maxwell}}.
+
+
+}
+\examples{
+\dontrun{ lambda <- 1; xvec <- seq(0, 2, length = 400)
+plot(xvec, dpois.points(xvec, lambda, ostat = 1, dimension = 2),
+     type = "l", las = 1, col = "blue",
+     sub = "First order statistic",
+     main = paste("PDF of PoissonPoints distribution with lambda = ",
+                  lambda, " and on the plane", sep = "")) }
+}
+\keyword{distribution}
+
+
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index 0391be6..dab71c8 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -162,7 +162,9 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
     \code{\link{binomialff}},
     \code{\link{quasibinomialff}},
     \code{\link[stats]{poisson}},
-    \code{\link{poissonp}}.
+    \code{\link{poisson.points}},
+    \code{\link{ruge}},
+    \code{\link{V1}}.
 
 
 }
@@ -171,8 +173,9 @@ poissonff()
 
 set.seed(123)
 pdata <- data.frame(x2 = rnorm(nn <- 100))
-pdata <- transform(pdata, y1 = rpois(nn, exp(1 + x2)))
-(fit1 <- vglm(y1 ~ x2, family = poissonff, pdata))
+pdata <- transform(pdata, y1 = rpois(nn, exp(1 + x2)),
+                          y2 = rpois(nn, exp(1 + x2)))
+(fit1 <- vglm(cbind(y1, y2) ~ x2, family = poissonff, pdata))
 (fit2 <- vglm(y1 ~ x2, family = poissonff(bred = TRUE), pdata))
 coef(fit1, matrix = TRUE)
 coef(fit2, matrix = TRUE)
diff --git a/man/polf.Rd b/man/polf.Rd
index 5912f52..cc519db 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -22,7 +22,7 @@ polf(theta, cutpoint = NULL,
   The cutpoints should be non-negative integers.
   If \code{polf()} is used as the link function in
   \code{\link{cumulative}} then one should choose
-  \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
+  \code{reverse = TRUE, parallel = TRUE}.
 
   }
   \item{inverse, deriv, short, tag}{
@@ -100,7 +100,7 @@ polf("p", cutpoint = 2, tag = TRUE)
 p <- seq(0.01, 0.99, by = 0.01)
 y <- polf(p, cutpoint = 2)
 y. <- polf(p, cutpoint = 2, deriv = 1)
-max(abs(polf(y, cutpoint = 2, inv = TRUE) - p)) # Should be 0
+max(abs(polf(y, cutpoint = 2, inv = TRUE) - p))  # Should be 0
 
 \dontrun{par(mfrow = c(2, 1), las = 1)
 plot(p, y, type = "l", col = "blue", main = "polf()")
@@ -132,7 +132,7 @@ pdata <- transform(pdata, cuty = Cut(y1, breaks = cutpoints))
 with(pdata, table(cuty) / sum(table(cuty)))
 fit <- vglm(cuty ~ x2 + x3, data = pdata, trace = TRUE,
             cumulative(reverse = TRUE,
-                       parallel = TRUE, apply.parint = TRUE,
+                       parallel = TRUE,
                        link = polf(cutpoint = cutpoints[2:3]),
                        mv = TRUE))
 head(depvar(fit))
diff --git a/man/polonoUC.Rd b/man/polonoUC.Rd
index 19fc88f..b4d57f3 100644
--- a/man/polonoUC.Rd
+++ b/man/polonoUC.Rd
@@ -6,7 +6,7 @@
 \alias{rpolono}
 \title{The Poisson Lognormal Distribution}
 \description{
-  Density, and random
+  Density, distribution function and random
   generation for the Poisson lognormal distribution.
 
 }
@@ -52,20 +52,24 @@ rpolono(n, meanlog = 0, sdlog = 1)
   Used to test whether the cumulative probabilities have
   effectively reached unity.
 
+
   }
   \item{...}{
   Arguments passed into 
   \code{\link[stats]{integrate}}.
 
+
   }
 }
 \value{
   \code{dpolono} gives the density,
   \code{ppolono} gives the distribution function, and
-% \code{qpolono} gives the quantile function, and
   \code{rpolono} generates random deviates.
 
 
+% \code{qpolono} gives the quantile function, and
+
+
 }
 \references{
   Bulmer, M. G. (1974)
@@ -110,7 +114,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
 
 
   For the maximum likelihood estimation of the 2 parameters a \pkg{VGAM}
-  family function called \code{polono}, say, has not been written yet.
+  family function called \code{polono()}, say, has not been written yet.
 
 
 }
@@ -123,8 +127,8 @@ rpolono(n, meanlog = 0, sdlog = 1)
 }
 \examples{
 meanlog <- 0.5; sdlog <- 0.5; yy <- 0:19
-sum(proby <- dpolono(yy, m = meanlog, sd = sdlog)) # Should be 1
-max(abs(cumsum(proby) - ppolono(yy, m = meanlog, sd = sdlog))) # Should be 0
+sum(proby <- dpolono(yy, m = meanlog, sd = sdlog))  # Should be 1
+max(abs(cumsum(proby) - ppolono(yy, m = meanlog, sd = sdlog)))  # Should be 0
 
 \dontrun{ opar = par(no.readonly = TRUE)
 par(mfrow = c(2, 2))
@@ -132,11 +136,11 @@ plot(yy, proby, type = "h", col = "blue", ylab = "P[Y=y]", log = "",
      main = paste("Poisson lognormal(m = ", meanlog,
                   ", sdl = ", sdlog, ")", sep = ""))
 
-y <- 0:190 # More extreme values; use the approximation and plot on a log scale
-(sum(proby <- dpolono(y, m = meanlog, sd = sdlog, bigx = 100))) # Should be 1
+y <- 0:190  # More extreme values; use the approximation and plot on a log scale
+(sum(proby <- dpolono(y, m = meanlog, sd = sdlog, bigx = 100)))  # Should be 1
 plot(y, proby, type = "h", col = "blue", ylab = "P[Y=y] (log)", log = "y",
      main = paste("Poisson lognormal(m = ", meanlog,
-                  ", sdl = ", sdlog, ")", sep = "")) # Note the kink at bigx
+                  ", sdl = ", sdlog, ")", sep = ""))  # Note the kink at bigx
 
 # Random number generation
 table(y <- rpolono(n = 1000, m = meanlog, sd = sdlog))
diff --git a/man/posbernUC.Rd b/man/posbernUC.Rd
index 76b7a90..6e757e2 100644
--- a/man/posbernUC.Rd
+++ b/man/posbernUC.Rd
@@ -12,7 +12,7 @@
 }
 \usage{
 rposbern(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2),
-         cap.effect = -1, link = "logit", is.popn = FALSE, earg.link = FALSE)
+         cap.effect = 1, is.popn = FALSE, link = "logit", earg.link = FALSE)
 dposbern(x, prob, prob0 = prob, log = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -40,13 +40,13 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 
   \item{is.popn}{
   Logical.
-  If \code{TRUE} then argument \code{n} is the population
-  size and what is returned may have substantially less
-  rows than \code{n}.
+  If \code{TRUE} then argument \code{n} is the population size
+  and what is returned may have substantially less rows than \code{n}.
   That is, if an animal has at least one one in its sequence then
-  it is returned, else that animal is not returned.
-  Put in other words, only animals captured at least once are
-  returned in the sample.
+  it is returned, else that animal is not returned because it
+  never was captured.
+% Put in other words, only animals captured at least once are
+% returned in the sample.
 
 
   }
@@ -112,24 +112,32 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 
 }
 \details{
-  The form of the conditional likelihood is
-  described in \code{\link{posbernoulli.b}}.
-
-
+  The form of the conditional likelihood is described in
+  \code{\link{posbernoulli.b}} and/or
+  \code{\link{posbernoulli.t}} and/or
+  \code{\link{posbernoulli.tb}}.
   The denominator is equally shared among the elements of
   the matrix \code{x}.
 
 
 }
 \value{
-  This function returns a data frame with some attributes.
+  \code{rposbern} returns a data frame with some attributes.
   The function generates random deviates
   (\eqn{\tau} columns labelled \code{y1}, \code{y2}, \ldots)
   for the response.
   Some indicator columns are also included
-  (those starting with \code{ch} are for previous capture history,
-  and those starting with \code{z} are zero),
-  and these are useful for the \code{xij} argument.
+  (those starting with \code{ch} are for previous capture history).
+  The default setting corresponds to a \eqn{M_{bh}} model that
+  has a single trap-happy effect.
+  Covariates \code{x1}, \code{x2}, \ldots have the same
+  affect on capture/recapture at every sampling occasion
+  (see the argument \code{parallel.t} in, e.g.,
+  \code{\link{posbernoulli.tb}}).
+
+
+
+% and these are useful for the \code{xij} argument.
 
 
   The function \code{dposbern} gives the density,
@@ -139,30 +147,37 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 %\references{ }
 \author{ Thomas W. Yee. }
 \note{ 
-  The \code{r}-type function is experimental and does not follow the
+  The \code{r}-type function is experimental only and does not follow the
   usual conventions of \code{r}-type R functions.
-  The \code{d}-type function is more conventional.
+  It may change a lot in the future.
+  The \code{d}-type function is more conventional and is less
+  likely to change.
 
 
 }
 
 \seealso{ 
 % \code{\link{huggins91}},
+  \code{\link{posbernoulli.tb}},
   \code{\link{posbernoulli.b}},
-  \code{\link{posbernoulli.t}},
-  \code{\link{posbernoulli.tb}}.
+  \code{\link{posbernoulli.t}}.
 
 
 }
 \examples{
-set.seed(123); rposbern(n = 10)
-attributes(rposbern(n = 10))
+rposbern(n = 10)
+attributes(pdata <- rposbern(n = 100))
+M.bh <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + x3, posbernoulli.b(I2 = FALSE),
+             data = pdata, trace = TRUE)
+constraints(M.bh)
+summary(M.bh)
 }
 \keyword{distribution} 
 \keyword{datagen} 
 
 
 %double.ch = FALSE,
+% and those starting with \code{z} are zero.
 
 
 
diff --git a/man/posbernoulli.b.Rd b/man/posbernoulli.b.Rd
index 278ebc6..e3fc2d7 100644
--- a/man/posbernoulli.b.Rd
+++ b/man/posbernoulli.b.Rd
@@ -4,7 +4,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Positive Bernoulli Family Function with Behavioural Effects }
 \description{
-  Fits a GLM-like model to multiple Bernoulli responses where
+  Fits a GLM-/GAM-like model to multiple Bernoulli responses where
   each row in the capture history matrix response has at least one success
   (capture).
   Capture history behavioural effects are accommodated.
@@ -12,19 +12,25 @@
 
 }
 \usage{
-posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
-               icap.prob = NULL, irecap.prob = NULL)
+posbernoulli.b(link = "logit", drop.b = FALSE ~ 1,
+               type.fitted = c("likelihood.cond", "mean.uncond"), I2 = FALSE,
+               ipcapture = NULL, iprecapture = NULL,
+               p.small = 1e-4, no.warning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
+% apply.parint = FALSE,
 \arguments{
-  \item{link, parallel.b, apply.parint, icap.prob, irecap.prob}{
+  \item{link, drop.b, ipcapture, iprecapture}{
 
   See \code{\link{CommonVGAMffArguments}} for information about
   these arguments.
+  By default the parallelism assumption does not apply to the intercept.
   With an intercept-only model
-  setting \code{parallel.b = TRUE} results in the \eqn{M_0} model;
-  it just deletes the 2nd column of the constraint matrix corresponding
-  to the intercept.
+  setting \code{drop.b = TRUE ~ 1} results in the \eqn{M_0}/\eqn{M_h} model.
+
+
+% it just deletes the 2nd column of the constraint matrix corresponding
+% to the intercept.
 
 
 % The default value of \code{zero} means that the behavioural
@@ -38,31 +44,52 @@ posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
 
 
   }
-}
-\details{
-  This model
-  (commonly known as \eqn{M_b} in the capture--recapture literature)
-  operates on a capture history matrix response of 0s and 1s.
-  See \code{\link{posbernoulli.t}} for details.
+  \item{I2}{
+  Logical.
+  This argument is used for terms that are not parallel.
+  If \code{TRUE} then the constraint matrix \code{diag(2)}
+  (the general default constraint matrix in \pkg{VGAM}) is used,
+  else \code{cbind(0:1, 1)}. The latter means the first element/column
+  corresponds to the behavioural effect.
+  Consequently it and its standard error etc. can be accessed
+  directly without subtracting two quantities.
 
 
-  Each sampling occasion has the same probability and this is modelled here.
-  But once an animal is captured, it is marked so that its future
-  capture history can be recorded. The effect of the recapture
-  probability is modelled through a second linear/additive predictor,
-  and this usually differs from the first linear/additive predictor
-  by just a different intercept (because \code{parallel.b = TRUE}
-  but the parallelism does not apply to the intercept).
+  }
+  \item{type.fitted}{
+  Details at \code{\link{posbernoulli.tb}}.
 
 
+  }
+  \item{p.small, no.warning}{
+  See \code{\link{posbernoulli.t}}.
+
+
+  }
+
+
+}
+\details{
+  This model
+  (commonly known as \eqn{M_b}/\eqn{M_{bh}} in the
+  capture--recapture literature)
+  operates on a capture history matrix response of 0s and 1s
+  (\eqn{n \times \tau}{n x tau}).
+  See \code{\link{posbernoulli.t}} for details,
+  e.g., common assumptions with other models.
+  Once an animal is captured for the first time,
+  it is marked/tagged so that its future
+  capture history can be recorded. The effect of the recapture
+  probability is modelled through a second linear/additive predictor.
   It is well-known that some species of animals are affected by capture,
   e.g., trap-shy or trap-happy. This \pkg{VGAM} family function
   \emph{does} allow the capture history to be modelled via such
   behavioural effects.
+  So does \code{\link{posbernoulli.tb}} but \code{\link{posbernoulli.t}} cannot.
+
 
 
-  See \code{\link{posbernoulli.t}} for other information,
-  e.g., common assumptions.
+% If \code{drop.b = TRUE} the parallelism does not apply to the intercept.
 
 
   The number of linear/additive predictors is \eqn{M = 2},
@@ -72,14 +99,21 @@ posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
         \eqn{p_r} is the probability of recapture.
   The fitted value returned is of the same dimension as
   the response matrix, and depends on the capture history:
-  prior to being first captured, it is \code{cap.prob}.
-  Afterwards, it is \code{recap.prob}.
+  prior to being first captured, it is \code{pcapture}.
+  Afterwards, it is \code{precapture}.
 
 
-  By default, the constraint matrix for the intercept term
-  is set up so that \eqn{p_r} differs from \eqn{p_c} by a
-  simple binary effect. This allows an estimate of the
-  trap-happy/trap-shy effect.
+  By default, the constraint matrices for the intercept term
+  and the other covariates are set up so that \eqn{p_r}
+  differs from \eqn{p_c} by a simple binary effect,
+  on a logit scale.
+  However, this difference (the behavioural effect) is more
+  directly estimated by having \code{I2 = FALSE}.
+  Then it allows an estimate of the trap-happy/trap-shy effect;
+  these are positive/negative values respectively.
+  If \code{I2 = FALSE} then
+  the (nonstandard) constraint matrix used is \code{cbind(0:1, 1)},
+  meaning the first element can be interpreted as the behavioural effect.
 
 
 }
@@ -91,12 +125,12 @@ posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
 
 }
 
-\section{Warning }{
-
-  See \code{\link{posbernoulli.tb}}.
-
-
-}
+%\section{Warning }{
+%
+%  See \code{\link{posbernoulli.t}}.
+%
+%
+%}
 
 \references{
 
@@ -108,26 +142,33 @@ posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
 \author{ Thomas W. Yee. }
 
 \note{
-  When the number of sampling occasions is large
-  the computation becomes increasingly slower.
-  Monitor convergence by setting \code{trace = TRUE}.
-
-
   The dependent variable is \emph{not} scaled to row proportions.
   This is the same as \code{\link{posbernoulli.t}}
+  and \code{\link{posbernoulli.tb}}
   but different from \code{\link{posbinomial}}
   and \code{\link{binomialff}}.
 
 
+% Monitor convergence by setting \code{trace = TRUE}.
+
+
+% To  fit \eqn{M_{tb}}{M_tb} and \eqn{M_{tbh}}{M_tbh}
+% use \code{\link{posbernoulli.t}} with the \code{xij}
+% argument of \code{\link{vglm.control}}.
+
+
+
 }
 
 \seealso{ 
-  \code{\link{posbernoulli.t}} (including estimating \eqn{N}),
-  \code{\link{posbernoulli.tb}},
-  \code{\link{Perom}},
+  \code{\link{posbernoulli.t}} and
+  \code{\link{posbernoulli.tb}} (including estimating \eqn{N}),
+  \code{\link{deermice}},
   \code{\link{dposbern}},
   \code{\link{rposbern}},
-  \code{\link{posbinomial}}.
+  \code{\link{posbinomial}},
+  \code{\link{aux.posbernoulli.t}},
+  \code{\link{prinia}}.
 % \code{\link{huggins91}}.
 % \code{\link{vglm.control}} for \code{xij},
 
@@ -135,74 +176,77 @@ posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
 }
 
 \examples{
-# Perom data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+# deermice data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
 
 # Fit a M_b model
-M_b <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1,
-            data = Perom, posbernoulli.b, trace = TRUE)
-coef(M_b, matrix = TRUE)
-constraints(M_b, matrix = TRUE)
-summary(M_b)
+M.b <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1,
+            posbernoulli.b, data = deermice, trace = TRUE)
+coef(M.b)["(Intercept):1"]  # Behavioural effect on the logit scale
+coef(M.b, matrix = TRUE)
+constraints(M.b, matrix = TRUE)
+summary(M.b, presid = FALSE)
 
 # Fit a M_bh model
-M_bh <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
-             posbernoulli.b, trace = TRUE, data = Perom)
-coef(M_bh, matrix = TRUE)
-constraints(M_bh)  # (2,2) element of "(Intercept)" is the behavioural effect
-summary(M_bh)  # Estimate of behavioural effect is positive (trap-happy)
+M.bh <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+             posbernoulli.b, data = deermice, trace = TRUE)
+coef(M.bh, matrix = TRUE)
+coef(M.bh)["(Intercept):1"]  # Behavioural effect on the logit scale
+constraints(M.bh)  # (2,1) element of "(Intercept)" is for the behavioural effect
+summary(M.bh, presid = FALSE)  # Significant positive (trap-happy) behavioural effect
+# Approx. 95 percent confidence for the behavioural effect:
+SE.M.bh <- coef(summary(M.bh))["(Intercept):1", "Std. Error"]
+coef(M.bh)["(Intercept):1"] + c(-1, 1) * 1.96 * SE.M.bh
 
 # Fit a M_h model
-M_h <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
-            data = Perom,
-            posbernoulli.t(parallel.t = TRUE), trace = TRUE)
-coef(M_h, matrix = TRUE)
-constraints(M_h, matrix = TRUE)
-summary(M_h)
+M.h <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+            posbernoulli.b(drop.b = TRUE ~ sex + weight),
+            data = deermice, trace = TRUE)
+coef(M.h, matrix = TRUE)
+constraints(M.h, matrix = TRUE)
+summary(M.h, presid = FALSE)
 
 # Fit a M_0 model
-M_0 <- vglm(cbind(    y1 + y2 + y3 + y4 + y5 + y6,
+M.0 <- vglm(cbind(    y1 + y2 + y3 + y4 + y5 + y6,
                   6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1,
-            data = Perom, posbinomial, trace = TRUE)
-coef(M_0, matrix = TRUE)
-constraints(M_0, matrix = TRUE)
-summary(M_0)
+            posbinomial, data = deermice, trace = TRUE)
+coef(M.0, matrix = TRUE)
+summary(M.0, presid = FALSE)
 
 
 # Simulated data set ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-set.seed(123); nTimePts <- 5; N <- 1000
-hdata <- rposbern(n = N, nTimePts = nTimePts, pvars = 2,
-                  is.popn = TRUE)  # N is the popn size
-nrow(hdata)  # Less than N
-# The truth: xcoeffs are c(-2, 1, 2) and cap.effect = -1
-
-model1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
-               posbernoulli.b, data = hdata, trace = TRUE)
-coef(model1)
-coef(model1, matrix = TRUE)
-constraints(model1, matrix = TRUE)
-summary(model1)
-head(depvar(model1))    # Capture history response matrix
-head(model1 at extra$cap.hist1)  # Info on its capture history
-head(model1 at extra$cap1)  # When it was first captured
-head(fitted(model1))     # Depends on capture history
-(trap.effect <- coef(model1)["(Intercept):2"])  # Should be -1
-head(model.matrix(model1, type = "vlm"), 21)
-head(hdata)
-summary(hdata)
-dim(depvar(model1))
-vcov(model1)
-
-model1 at extra$N.hat     # Estimate of the population size; should be about N
-model1 at extra$SE.N.hat  # SE of the estimate of the population size
+set.seed(123); nTimePts <- 5; N <- 1000  # N is the popn size
+pdata <- rposbern(n = N, nTimePts = nTimePts, pvars = 2, is.popn = TRUE)
+nrow(pdata)  # Less than N (because some animals were never captured)
+# The truth: xcoeffs are c(-2, 1, 2) and cap.effect = +1
+
+M.bh.2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
+               posbernoulli.b, data = pdata, trace = TRUE)
+coef(M.bh.2)
+coef(M.bh.2, matrix = TRUE)
+constraints(M.bh.2, matrix = TRUE)
+summary(M.bh.2, presid = FALSE)
+head(depvar(M.bh.2))    # Capture history response matrix
+head(M.bh.2 at extra$cap.hist1)  # Info on its capture history
+head(M.bh.2 at extra$cap1)  # When it was first captured
+head(fitted(M.bh.2))     # Depends on capture history
+(trap.effect <- coef(M.bh.2)["(Intercept):1"])  # Should be +1
+head(model.matrix(M.bh.2, type = "vlm"), 21)
+head(pdata)
+summary(pdata)
+dim(depvar(M.bh.2))
+vcov(M.bh.2)
+
+M.bh.2 at extra$N.hat     # Estimate of the population size; should be about N
+M.bh.2 at extra$SE.N.hat  # SE of the estimate of the population size
 # An approximate 95 percent confidence interval:
-round(model1 at extra$N.hat + c(-1, 1) * 1.96 *  model1 at extra$SE.N.hat, 1)
+round(M.bh.2 at extra$N.hat + c(-1, 1) * 1.96 *  M.bh.2 at extra$SE.N.hat, 1)
 }
 \keyword{models}
 \keyword{regression}
 
 %# Compare the models using a LRT
-%lrtest(M_bh, M_h)
-%(wald.pvalue <- 2 * pnorm(abs(summary(M_bh)@coef3["(Intercept):2", "z value"]),
+%lrtest(M.bh, M.h)
+%(wald.pvalue <- 2 * pnorm(abs(summary(M.bh)@coef3["(Intercept):2", "z value"]),
 %                          lower.tail = FALSE))  # Two-sided pvalue
 
 
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
index a6cba19..2e6aef9 100644
--- a/man/posbernoulli.t.Rd
+++ b/man/posbernoulli.t.Rd
@@ -4,81 +4,114 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Positive Bernoulli Family Function with Time Effects }
 \description{
-  Fits a GLM-like model to multiple Bernoulli responses where
+  Fits a GLM/GAM-like model to multiple Bernoulli responses where
   each row in the capture history matrix response has at least one success
   (capture).
   Sampling occasion effects are accommodated.
 
 
+% Behavioural effects are accommodated via the \code{xij} argument
+% of \code{\link{vglm.control}}.
+
+
 }
 \usage{
-posbernoulli.t(link = "logit", parallel.t = FALSE, apply.parint = TRUE,
-               iprob = NULL)
+posbernoulli.t(link = "logit", parallel.t = FALSE ~ 1, iprob = NULL,
+               p.small = 1e-4, no.warning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
+%apply.parint = FALSE,
 \arguments{
-  \item{link, iprob, parallel.t, apply.parint}{
+  \item{link, iprob, parallel.t}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  Setting \code{parallel.t = TRUE} results in the \eqn{M_0} model.
+  By default, the parallelism assumption does not apply to the intercept.
+  Setting \code{parallel.t = FALSE ~ -1},
+  or equivalently \code{parallel.t = FALSE ~ 0},
+  results in the \eqn{M_0}/\eqn{M_h} model.
 
 
   }
-}
-\details{
-  This model
-  (commonly known as \eqn{M_t} in the capture--recapture literature)
-  operates on a capture history matrix response of 0s and 1s.
-  Each column is a sampling occasion where animals are potentially
-  captured (e.g., a field trip), and each row is an individual animal.
-  Capture is a 1, else a 0.
-  No removal of animals from the population is made (closed population),
-  e.g., no immigration or emigration.
-  Each row of the response matrix has at least one capture.
-
-
-  A conditional likelihood is maximized using Fisher scoring.
-  Each sampling occasion has a separate probability that is modelled here.
-  The probabilities can be constrained to be equal by setting
-  \code{parallel.t = TRUE};
-  then the results are effectively the same as \code{\link{posbinomial}}
-  except the binomial constants are not included in the log-likelihood.
-  If \code{parallel.t = FALSE} then each column should have
-  at least one 1 and at least one 0.
+  \item{p.small, no.warning}{
+  A small probability value used to give a warning for the
+  Horvitz--Thompson estimator.
+  Any estimated probability value less than \code{p.small} will
+  result in a warning, however, setting \code{no.warning = TRUE}
+  will suppress this warning if it occurs.
+  This is because the Horvitz-Thompson estimator is the sum of the
+  reciprocal of such probabilities, therefore any probability that
+  is too close to 0 will result in an unstable estimate.
 
 
-  It is well-known that some species of animals are affected by capture,
-  e.g., trap-shy or trap-happy. This \pkg{VGAM} family function
-  does \emph{not} allow any behavioral effect to be modelled
-  (\code{\link{posbernoulli.b}} does).
-  However, it \emph{does} allow covariates that are specific to
-  each sampling occasion, e.g., through the \code{xij} argument.
-  Ignoring capture history effects would mean
-  \code{\link{posbinomial}} could be used by aggregating
-  over the sampling occasions.
-
-
-  If there are no covariates that are specific to
-  each occasion then the response matrix can be summed over
-  the columns and \code{\link{posbinomial}} could be used by aggregating
-  over the sampling occasions.
-
 
+  }
+}
+\details{
+  These models (commonly known as \eqn{M_t} or \eqn{M_{th}}
+  (no prefix \eqn{h} means it is an intercept-only model)
+  in the capture--recapture literature) operate on a capture
+  history matrix response of 0s and 1s (\eqn{n \times \tau}{n x tau}).
+  Each column is a
+  sampling occasion where animals are potentially captured
+  (e.g., a field trip), and each row is an individual animal.
+  Capture is a 1, else a 0.  No removal of animals from
+  the population is made (closed population), e.g., no
+  immigration or emigration.  Each row of the response
+  matrix has at least one capture.
+  Once an animal is captured for the first time,
+  it is marked/tagged so that its future capture history can be recorded.
+  Then it is released immediately back into the population to remix.
+  It is released immediately after each recapture too.
   It is assumed that the animals are independent and
   that, for a given animal, each sampling occasion is independent.
   And animals do not lose their marks/tags, and
   all marks/tags are correctly recorded.
 
 
+
   The number of linear/additive predictors is equal to the number
   of sampling occasions, i.e., \eqn{M = \tau}, say.
   The default link functions
   are \eqn{(logit \,p_{1},\ldots,logit \,p_{\tau})^T}{(logit p_(1),\ldots,logit p_(tau))^T}
-  where \eqn{p} denotes the probability.
+  where each \eqn{p_{j}} denotes the probability of capture at
+  time point \eqn{j}.
+  The fitted value returned is a matrix of probabilities
+  of the same dimension as the response matrix.
+
 % Thus \eqn{M = \tau}{M = tau}.
 
 
-  The fitted value returned is of the same dimension as the response matrix.
 
+  A conditional likelihood is maximized here using Fisher scoring.
+  Each sampling occasion has a separate probability that
+  is modelled here. The probabilities can be constrained
+  to be equal by setting \code{parallel.t = FALSE ~ 0};
+  then the results are effectively the same as
+  \code{\link{posbinomial}} except the binomial constants are
+  not included in the log-likelihood.
+  If \code{parallel.t = TRUE ~ 0} then each column should have
+  at least one 1 and at least one 0.
+
+
+  It is well-known that some species of animals are affected
+  by capture, e.g., trap-shy or trap-happy. This \pkg{VGAM}
+  family function does \emph{not} allow any behavioral effect to be
+  modelled (\code{\link{posbernoulli.b}} 
+  and \code{\link{posbernoulli.tb}} do) because the
+  denominator of the likelihood function must be free of
+  behavioral effects.
+
+
+% via covariates that are specific to each sampling occasion,
+% e.g., through the \code{xij} argument.
+% Ignoring capture history effects would mean
+% \code{\link{posbinomial}} could be used by aggregating over
+% the sampling occasions.
+
+
+% If there are no covariates that are specific to each occasion
+% then the response matrix can be summed over the columns and
+% \code{\link{posbinomial}} could be used by aggregating over
+% the sampling occasions.
 
 
 }
@@ -90,7 +123,8 @@ posbernoulli.t(link = "logit", parallel.t = FALSE, apply.parint = TRUE,
 
   Upon fitting the \code{extra} slot has a (list) component
   called \code{N.hat}
-  which is a point estimate of the population size \eqn{N}.
+  which is a point estimate of the population size \eqn{N}
+  (it is the Horvitz-Thompson (1952) estimator).
   And there is a component called \code{SE.N.hat}
   containing its standard error.
 
@@ -123,13 +157,27 @@ capture--recapture experiments.
 \author{ Thomas W. Yee. }
 
 \note{
+% Models \eqn{M_{tbh}}{M_tbh} can be fitted using the
+% \code{xij} argument (see \code{\link{vglm.control}})
+% to input the behavioural effect indicator
+% variables.  Rather than manually setting these
+% up, they may be more conveniently obtained by
+% \code{\link{aux.posbernoulli.t}}.
+% See the example below.
+
+
   The \code{weights} argument of \code{\link{vglm}} need not be
   assigned, and the default is just a matrix of ones.
 
 
-  Numerical problems are more likely to occur if \code{parallel.t = FALSE}.
-  Each sampling occasion may need at least one success
+  Fewer numerical problems are likely to occur
+  for \code{parallel.t = TRUE}.
+  Data-wise, each sampling occasion may need at least one success
   (capture) and one failure.
+  Less stringent conditions in the data are needed when
+  \code{parallel.t = TRUE}.
+  Ditto when parallelism is applied to the intercept too.
+% for \code{apply.parint = TRUE}.
 
 
   The response matrix is returned unchanged;
@@ -139,8 +187,19 @@ capture--recapture experiments.
 
 
 
-Data-wise, at each sampling occasion, the \eqn{M_t} model requires at least
-one first capture and at least one noncapture.
+  Using \code{AIC()} or \code{BIC()} to compare
+  \code{\link{posbernoulli.t}},
+  \code{\link{posbernoulli.b}},
+  \code{\link{posbernoulli.tb}}
+  models with a
+  \code{\link{posbinomial}}
+  model requires \code{posbinomial(omit.constant = TRUE)} 
+  because one needs to remove the normalizing constant from the
+  log-likelihood function.
+  See \code{\link{posbinomial}} for an example.
+
+
+
 
 % If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of
 % the response matrix are not present then it pays to add
@@ -154,63 +213,93 @@ one first capture and at least one noncapture.
 
 }
 
-\section{Warning }{
-
-  See \code{\link{posbernoulli.tb}}.
-
-
-}
+%\section{Warning }{
+%
+%  See \code{\link{posbernoulli.tb}}.
+%
+%
+%}
 
 \seealso{ 
   \code{\link{posbernoulli.b}},
   \code{\link{posbernoulli.tb}},
-  \code{\link{Perom}},
+  \code{\link{deermice}},
+  \code{\link{Huggins89table1}},
   \code{\link{Huggins89.t1}},
-  \code{\link{vglm.control}} for \code{xij},
   \code{\link{dposbern}},
   \code{\link{rposbern}},
-  \code{\link{posbinomial}}.
+  \code{\link{posbinomial}},
+  \code{\link{AICvlm}},
+  \code{\link{BICvlm}},
+  \code{\link{prinia}}.
+% \code{\link{aux.posbernoulli.t}},
+% \code{\link{vglm.control}} for \code{xij},
 % \code{\link{huggins91}}.
 
 
 }
 
 \examples{
-M_t <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, trace = TRUE,
-            posbernoulli.t, data = Perom)  # Has parallel.t = FALSE
-coef(M_t, matrix = TRUE)
-summary(M_t)
-
-
-M_th.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
-              posbernoulli.t, data = Perom)  # Has parallel.t = FALSE
-summary(M_th.1)
-head(depvar(M_th.1))  # Response capture history matrix
-dim(depvar(M_th.1))
-
-M_h.2 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
-              posbernoulli.t(parallel.t = TRUE), data = Perom)
-lrtest(M_th.1, M_h.2)  # Test the parallelism assumption
-coef(M_h.2)
-coef(M_h.2, matrix = TRUE)
-constraints(M_h.2, matrix = TRUE)
-summary(M_h.2)
-head(model.matrix(M_h.2, type = "vlm"), 21)
-
-M_h.2 at extra$N.hat     # Estimate of the population size; should be about N
-M_h.2 at extra$SE.N.hat  # SE of the estimate of the population size
+M.t <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, posbernoulli.t,
+            data = deermice, trace = TRUE)
+coef(M.t, matrix = TRUE)
+constraints(M.t, matrix = TRUE)
+summary(M.t, presid = FALSE)
+
+M.h.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
+              posbernoulli.t(parallel.t = FALSE ~ -1), data = deermice)
+coef(M.h.1, matrix = TRUE)
+constraints(M.h.1)
+summary(M.h.1, presid = FALSE)
+head(depvar(M.h.1))  # Response capture history matrix
+dim(depvar(M.h.1))
+
+M.th.2 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
+               posbernoulli.t(parallel.t = FALSE), data = deermice)
+lrtest(M.h.1, M.th.2)  # Test the parallelism assumption wrt sex and weight
+coef(M.th.2)
+coef(M.th.2, matrix = TRUE)
+constraints(M.th.2)
+summary(M.th.2, presid = FALSE)
+head(model.matrix(M.th.2, type = "vlm"), 21)
+
+M.th.2 at extra$N.hat     # Estimate of the population size; should be about N
+M.th.2 at extra$SE.N.hat  # SE of the estimate of the population size
 # An approximate 95 percent confidence interval:
-round(M_h.2 at extra$N.hat + c(-1, 1) * 1.96 *  M_h.2 at extra$SE.N.hat, 1)
+round(M.th.2 at extra$N.hat + c(-1, 1) * 1.96 *  M.th.2 at extra$SE.N.hat, 1)
 
-
-# Fit (effectively) the parallel model using posbinomial()
-Perom <- transform(Perom, ysum = y1 + y2 + y3 + y4 + y5 + y6,
+# Fit a M_h model, effectively the parallel M_t model, using posbinomial()
+deermice <- transform(deermice, ysum = y1 + y2 + y3 + y4 + y5 + y6,
                           tau  = 6)
-M_h.3 <- vglm(cbind(ysum, tau - ysum) ~ sex + weight,
-              posbinomial, data = Perom, trace = TRUE)
-max(abs(coef(M_h.2) - coef(M_h.3)))  # Should be zero
-logLik(M_h.3) - logLik(M_h.2)  # Difference is due to the binomial constants
+M.h.3 <- vglm(cbind(ysum, tau - ysum) ~ sex + weight,
+              posbinomial(omit.constant = TRUE), data = deermice, trace = TRUE)
+max(abs(coef(M.h.1) - coef(M.h.3)))  # Should be zero
+logLik(M.h.3) - logLik(M.h.1)  # Difference is due to the binomial constants
 }
 \keyword{models}
 \keyword{regression}
 
+
+
+%# Fit a M_tbh model:
+%pdata <- aux.posbernoulli.t(with(deermice, cbind(y1, y2, y3, y4, y5, y6)))  # Convenient
+%deermice <- data.frame(deermice, bei = 0, pdata$cap.hist1)  # Put all into 1 dataframe
+%head(deermice)  # Augmented with behavioural effect indicator variables
+%M.tbh.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight + age + bei,
+%                posbernoulli.t(parallel.t = TRUE ~ sex + weight + age + bei - 1),
+%                data = deermice, trace = TRUE,
+%                xij = list(bei ~ bei1 + bei2 + bei3 + bei4 + bei5 + bei6 - 1),
+%                form2 = ~        bei1 + bei2 + bei3 + bei4 + bei5 + bei6 +
+%                           sex + weight + age + bei)
+%coef(M.tbh.1, matrix = TRUE)
+%head(deermice, 3)
+%head(model.matrix(M.tbh.1, type = "vlm"), 20)
+%summary(M.tbh.1, presid = FALSE)
+%head(depvar(M.tbh.1))  # Response capture history matrix
+%dim(depvar(M.tbh.1))
+
+
+
+
+
+
diff --git a/man/posbernoulli.tb.Rd b/man/posbernoulli.tb.Rd
index 512291e..39099e5 100644
--- a/man/posbernoulli.tb.Rd
+++ b/man/posbernoulli.tb.Rd
@@ -2,73 +2,128 @@
 %\alias{posbernoulli}
 \alias{posbernoulli.tb}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Positive Bernoulli Family Function with Time and Behavioural Effects (experimental) }
+\title{ Positive Bernoulli Family Function with Time and Behavioural Effects }
 \description{
-  Fits a GLM-like model to multiple
-  (currently only two or three)
+  Fits a GLM/GAM-like model to multiple
   Bernoulli responses where
   each row in the capture history matrix response has at least one success
   (capture).
   Sampling occasion effects and behavioural effects are accommodated.
-  However, this function only handles two and three sampling occasions.
 
 
 }
 \usage{
-posbernoulli.tb(link = "logit", parallel.t = FALSE, parallel.b = FALSE,
-                apply.parint = FALSE, imethod = 1, iprob = NULL,
-                dconst = 0.1, dpower = -2)
+posbernoulli.tb(link = "logit",
+                parallel.t = FALSE ~ 1,
+                parallel.b = FALSE ~ 0,
+                drop.b     = FALSE ~ 1,
+                type.fitted = c("likelihood.cond", "mean.uncond"),
+                imethod = 1, iprob = NULL,
+                p.small = 1e-4, no.warning = FALSE,
+                ridge.constant = 0.01, ridge.power = -4)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link, imethod, iprob, parallel.t, parallel.b, apply.parint}{
+  \item{link, imethod, iprob}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  But \code{parallel.t} and \code{parallel.b} must each be
-  logicals only.
+
+  }
+
+
+
+  \item{parallel.t, parallel.b, drop.b}{
+  A logical, or formula with a logical as the response.
+  See \code{\link{CommonVGAMffArguments}} for information.
+  The \code{parallel.}-type arguments
+  specify whether the constraint matrices have a parallelism
+  assumption for the temporal and behavioural effects.
   Argument \code{parallel.t} means parallel with respect to time, and
   matches the same argument name in \code{\link{posbernoulli.t}}.
 
 
-
   Suppose the model is intercept-only.
-  Setting \code{parallel.t = TRUE} results in the \eqn{M_b} model.
-  Setting \code{parallel.b = TRUE} results in the \eqn{M_t} model.
-  Setting \code{parallel.t = TRUE} and
-  setting \code{parallel.b = TRUE} results in the \eqn{M_0} model.
+  Setting \code{parallel.t = FALSE ~ 0} results in the \eqn{M_b} model.
+  Setting \code{drop.b = FALSE ~ 0} results in the \eqn{M_t} model
+  because it drops columns off the constraint matrices corresponding
+  to any behavioural effect.
+  Setting \code{parallel.t = FALSE ~ 0} and
+  setting \code{parallel.b = FALSE ~ 0} results in the \eqn{M_b} model.
+  Setting \code{parallel.t = FALSE ~ 0},
+          \code{parallel.b = FALSE ~ 0} and
+          \code{drop.b = FALSE ~ 0} results in the \eqn{M_0} model.
   Note the default for \code{parallel.t} and \code{parallel.b}
-  (both \code{FALSE})
-  may be unsuitable for most data sets which have a large \eqn{\tau}
-  because of the large number of parameters; it can be too flexible.
-  Note that adding covariates will result in a \eqn{M_{tbh}} model.
+  may be unsuitable for data sets which have a large \eqn{\tau}
+  because of the large number of parameters; it might be too flexible.
+  If it is desired to have the behaviour affect some of
+  the other covariates then set \code{drop.b = TRUE ~ 0}.
+
+
+  The default model has a different intercept for each
+  sampling occasion, a time-parallelism assumption for all other covariates,
+  and a dummy variable representing a
+  single behavioural effect (also in the intercept).
+
+
+  The most flexible model is to set
+  \code{parallel.b = TRUE  ~ 0},
+  \code{parallel.t = TRUE  ~ 0} and
+  \code{drop.b = TRUE ~ 0}.
+  This means that all possible temporal and behavioural effects are
+  estimated, for the intercepts and other covariates.
+  Such a model is \emph{not} recommended; it will contain a lot of paramters.
 
 
   }
-  \item{dconst, dpower}{
-  Decay constants and power (exponent) for the ridge adjustment
-  for the working weight matrices.
-  At iteration \eqn{t} of the IRLS algorithm
+  \item{type.fitted}{
+  Character, one of the choices for the type of fitted value returned.
+  The default is the first one.
+  Partial matching is okay.
+  For \code{"likelihood.cond"}:
+  the probability defined by the conditional likelihood.
+  For \code{"mean.uncond"}: the unconditional mean, which should
+  agree with \code{\link[base]{colMeans}} applied to the response
+  matrix for intercept-only models.
+
+
+  }
+  \item{ridge.constant, ridge.power}{
+  Determines the ridge parameters at each IRLS iteration.
+  They are the constant and power (exponent) for the ridge adjustment
+  for the working weight matrices (the capture probability block
+  matrix, hence the first \eqn{\tau} diagonal values).
+  At iteration \eqn{a} of the IRLS algorithm
   a positive value is added to the first \eqn{\tau}{tau}
   diagonal elements of the working weight matrices to make
-  them positive-definite. This adjustment is \eqn{K \times t^p}{K * t^p}
-  where \eqn{K} is \code{dconst} and \eqn{p} is \code{dpower}.
+  them positive-definite. This adjustment is the
+  mean of the diagonal elements of \code{wz} multipled by
+  \eqn{K \times a^p}{K * a^p}
+  where \eqn{K} is \code{ridge.constant} and \eqn{p} is \code{ridge.power}.
   This is always positive but decays to zero as iterations proceed
   (provided \eqn{p} is negative etc.).
 
 
   }
+  \item{p.small, no.warning}{
+  See \code{\link{posbernoulli.t}}.
+
+
+  }
+
+
 }
 \details{
   This model
-  (commonly known as \eqn{M_{tb}} in the capture--recapture literature)
-  operates on a response matrix of 0s and 1s.
+  (commonly known as \eqn{M_{tb}}/\eqn{M_{tbh}} in the capture--recapture literature)
+  operates on a response matrix of 0s and 1s (\eqn{n \times \tau}{n x tau}).
   See \code{\link{posbernoulli.t}}
   for information that is in common.
+  It allows time and behavioural effects to be modelled.
 
 
-  This \pkg{VGAM} family function is \emph{experimental only}.
-  When finished, it should allow time and behavioural effects to be modelled.
-  Evidently, the expected information matrix (EIM) is \emph{not} of
-  full rank, so \code{dconst} and \code{dpower} are used to
+  Evidently,
+  the expected information matrix (EIM) seems \emph{not}
+  of full rank (especially in early iterations), so
+  \code{ridge.constant} and \code{ridge.power} are used to
   \emph{try} fix up the problem.
   The default link functions
   are \eqn{(logit \,p_{c1},\ldots,logit \,p_{c\tau},logit \,p_{r2},\ldots,logit \,p_{r\tau})^T}{
@@ -77,8 +132,9 @@ posbernoulli.tb(link = "logit", parallel.t = FALSE, parallel.b = FALSE,
         the subscript \eqn{r} denotes recapture,
   and it is not possible to recapture the animal at sampling occasion 1.
   Thus \eqn{M = 2\tau - 1}{M=2*tau-1}.
-  The parameters are currently prefixed by \code{cap.prob} and \code{recap.prob}
+  The parameters are currently prefixed by \code{pcapture} and \code{precapture}
   for the capture and recapture probabilities.
+  This \pkg{VGAM} family function may be further modified in the future.
 
 
 % Not surprisingly,
@@ -103,20 +159,19 @@ posbernoulli.tb(link = "logit", parallel.t = FALSE, parallel.b = FALSE,
 \author{ Thomas W. Yee. }
 
 \note{
-  It is a good idea to constrain the probabilities for each sampling
-  occasion to be equal, and also allow the behavioural effect to
-  be modelled using the intercept.
-  See \code{M_tbh.1} below.
+  It is a good idea to apply the parallelism assumption to each sampling
+  occasion except possibly with respect to the intercepts.
+  Also, a simple behavioural effect such as being modelled using the intercept
+  is recommended; if the behavioural effect is not parallel and/or
+  allowed to apply to other covariates
+  then there will probably be too many parameters, and hence,
+  numerical problems. See \code{M_tbh.1} below.
 
 
-  The current restriction of handling only \eqn{\tau=2}{tau=2} and
-  \eqn{\tau=3}{tau=3} sampling occasions
-  is unfortunate and more work is needed to extend this to four or more.
+%Data-wise, at each sampling occasion, the \eqn{M_{tb}} model requires at least
+%one first capture and at least one noncapture.
 
 
-Data-wise, at each sampling occasion, the \eqn{M_{tb}} model requires at least
-one first capture and at least one noncapture.
-
 % If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of
 % the response matrix are not present then it pays to add
 % such rows to the response matrix and assign a small but
@@ -127,38 +182,23 @@ one first capture and at least one noncapture.
 % (1,1) rows present in the response matrix.
 
 
-}
-
-\section{Warning }{
-
-  As this model is likely to be overparameterized, probably this
-  function should not be used (for now?).
-
-
-% From Jakub:
-  Estimation for the population size (and its SE) for the
-  \eqn{M_{tb}} model may be wrong.
-  Models \eqn{M_{tbh}} and \eqn{M_{th}} may be wrong.
-  But models \eqn{M_{bh}}, \eqn{M_{h}}, \eqn{M_{b}},
-  \eqn{M_{t}}, \eqn{M_{0}} seem fine.
-
-
-
-  Inference, especially using standard errors, may be fraught here
-  because the EIM is, strictly speaking, not of full rank.
-  A similar adjustment is made by \code{\link{zipebcom}}.
   It is a good idea to monitor convergence.
-  The \eqn{M_0} model is best fitted with \code{\link{posbernoulli.b}}
-  or \code{\link{posbernoulli.t}} or \code{\link{posbinomial}} because
-  the standard errors are more accurate.
-
+  Simpler models such as the \eqn{M_0}/\eqn{M_h} models
+  are best fitted with \code{\link{posbernoulli.t}} or
+  \code{\link{posbernoulli.b}} or
+  \code{\link{posbinomial}}.
 
 }
 
+
 \seealso{ 
-  \code{\link{posbernoulli.b}} (including \eqn{\widehat{N}}),
+  \code{\link{posbernoulli.b}} (including \code{N.hat}),
   \code{\link{posbernoulli.t}},
-  \code{\link{posbinomial}}.
+  \code{\link{posbinomial}},
+  \code{\link{Huggins89table1}},
+  \code{\link{Huggins89.t1}},
+  \code{\link{deermice}}.
+  \code{\link{prinia}}.
 
 
 }
@@ -166,52 +206,83 @@ one first capture and at least one noncapture.
 \examples{
 \dontrun{
 # Example 1: simulated data
-set.seed(123)
-nTimePts <- 2  # Must be 2 or 3 currently (aka tau == # of sampling occasions)
-nnn <- 10000   # Number of animals
+nTimePts <- 5  # (aka tau == # of sampling occasions)
+nnn <- 1000   # Number of animals
 pdata <- rposbern(n = nnn, nTimePts = nTimePts, pvars = 2)
-dim(pdata)
-head(pdata)
+dim(pdata); head(pdata)
 
-clist <- list("(Intercept)" = cbind(1, c(0, 0, 1)),  # Capture effect is last coln
-              x2            = rbind(1, 1, 1))
-M_tbh.1 <- vglm(cbind(y1, y2) ~ x2,
-                constraints = clist, trace = TRUE,
-                posbernoulli.tb, data = pdata)
-summary(M_tbh.1)
-
-coef(M_tbh.1)
+M_tbh.1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
+                posbernoulli.tb, data = pdata, trace = TRUE)
+coef(M_tbh.1)  # First element is the behavioural effect
 coef(M_tbh.1, matrix = TRUE)
 constraints(M_tbh.1, matrix = TRUE)
-summary(M_tbh.1)  # Standard errors are very approximate
+summary(M_tbh.1, presid = FALSE)  # Standard errors are approximate
 head(fitted(M_tbh.1))
 head(model.matrix(M_tbh.1, type = "vlm"), 21)
 dim(depvar(M_tbh.1))
 
-
-# Example 2: Perom subset data
-Hlist <- list("(Intercept)" = cbind(1, c(0, 0, 0, 1, 1)),
-              sex           = rbind(1, 1, 1, 1, 1),
-              weight        = rbind(1, 1, 1, 1, 1))
-Psubset <- subset(Perom, y1 + y2 + y3 > 0)
-head(Psubset)
-
-fit1 <- vglm(cbind(y1, y2, y3) ~ sex + weight, constraints = Hlist,
-             posbernoulli.tb, data = Psubset, trace = TRUE)
+M_tbh.2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
+                posbernoulli.tb(parallel.t = FALSE ~  0),
+                data = pdata, trace = TRUE)
+coef(M_tbh.2)  # First element is the behavioural effect
+coef(M_tbh.2, matrix = TRUE)
+constraints(M_tbh.2, matrix = TRUE)
+summary(M_tbh.2, presid = FALSE)  # Standard errors are approximate
+head(fitted(M_tbh.2))
+head(model.matrix(M_tbh.2, type = "vlm"), 21)
+dim(depvar(M_tbh.2))
+
+# Example 2: deermice subset data
+fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+             posbernoulli.t, data = deermice, trace = TRUE)
 coef(fit1)
 coef(fit1, matrix = TRUE)
-summary(fit1)  # Standard errors are very approximate
-
-# fit1 is the same as Fit1:
-Fit1 <- vglm(cbind(y1, y2, y3) ~ sex + weight, data = Psubset,
-             posbernoulli.tb(parallel.t = TRUE), trace = TRUE)
-constraints(Fit1)  # Same as Hlist
-
-yyy <- depvar(fit1)
-if (length(table(4 * yyy[, 1] + 2 * yyy[, 2] + 1 * yyy[, 3])) != 2^(ncol(yyy))-1)
-  warning("not every combination is represented by a row in the response matrix")
+constraints(fit1, matrix = TRUE)
+summary(fit1, presid = FALSE)  # Standard errors are approximate
+
+# fit1 is the same as Fit1 (a M_{th} model):
+Fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+             posbernoulli.tb(drop.b = TRUE ~ sex + weight,
+                             parallel.t = TRUE),  # No parallelism for the intercept
+             data = deermice, trace = TRUE)
+constraints(Fit1)
 }
 }
 \keyword{models}
 \keyword{regression}
 
+
+
+
+%\section{Warning }{
+%  As this model is likely to be overparameterized, probably this
+%  function should not be used (for now?).
+%
+%% From Jakub:
+%  Estimation for the population size (and its SE) for the
+%  \eqn{M_{tb}} and \eqn{M_{tbh}} model may be wrong.
+%  But models
+%  \eqn{M_{0}},
+%  \eqn{M_{h}},
+%  \eqn{M_{b}},
+%  \eqn{M_{bh}},
+%  \eqn{M_{t}},
+%  \eqn{M_{th}}
+%  seem fine.
+%
+%  Inference, especially using standard errors, may be fraught here
+%  because the EIM is, strictly speaking, not of full rank.
+%  A similar adjustment is made by \code{\link{zipebcom}}.
+%  It is a good idea to monitor convergence.
+%  The \eqn{M_0}/\eqn{M_h} models are best fitted with
+%  \code{\link{posbernoulli.t}} or \code{\link{posbinomial}} because
+%  the standard errors are more accurate.
+%
+%
+%}
+
+%yyy <- depvar(fit1)
+%if (length(table(4 * yyy[, 1] + 2 * yyy[, 2] + 1 * yyy[, 3])) != 2^(ncol(yyy)) - 1)
+%  warning("not every combination is represented by a row in the response matrix")
+
+
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index b8eeabc..4465632 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -31,7 +31,11 @@ rposbinom(n, size, prob)
    given in \code{\link{posbinomial}}.
 
   }
-  \item{prob}{probability of success on each trial. }
+  \item{prob}{probability of success on each trial. 
+  Should be in \eqn{(0,1)}.
+
+
+  }
 
 
 % 20120407:
@@ -107,7 +111,7 @@ rposbinom(n, size, prob)
 prob <- 0.2; size <- 10
 table(y <- rposbinom(n = 1000, size, prob))
 mean(y)  # Sample mean
-size * prob / (1-(1-prob)^size)  # Population mean
+size * prob / (1 - (1 - prob)^size)  # Population mean
 
 (ii <- dposbinom(0:size, size, prob))
 cumsum(ii) - pposbinom(0:size, size, prob)  # Should be 0s
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index dbd02a4..3e7eacd 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -7,7 +7,9 @@
 
 }
 \usage{
-posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
+posbinomial(link = "logit", mv = FALSE, parallel = FALSE,
+            omit.constant = FALSE, p.small = 1e-4, no.warning = FALSE,
+            zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,6 +18,35 @@ posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
 
 
   }
+  \item{omit.constant}{
+    Logical.
+    If \code{TRUE} then the constant (\code{lchoose(size, size * yprop)}
+    is omitted from the \code{loglikelihood} calculation.
+    If the model is to be compared using
+    \code{AIC()} or \code{BIC()}
+    (see \code{\link{AICvlm}} or \code{\link{BICvlm}})
+    to the likes of
+    \code{\link{posbernoulli.tb}} etc. then it is important
+    to set \code{omit.constant = TRUE} because all models then
+    will not have any normalizing constants in the likelihood function.
+    Hence they become comparable.
+    This is because the \eqn{M_0} Otis et al. (1978) model
+    coincides with \code{posbinomial()}.
+    See below for an example.
+    Also see \code{\link{posbernoulli.t}} regarding estimating the
+    population size (\code{N.hat} and \code{SE.N.hat}) if the
+    number of trials is the same for all observations.
+
+
+
+  }
+  \item{p.small, no.warning}{
+  See \code{\link{posbernoulli.t}}.
+
+
+  }
+
+
 }
 \details{
   The positive binomial distribution is the ordinary binomial distribution
@@ -26,8 +57,10 @@ posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
   fitted values, i.e., the usual mean.
 
 
-  In the capture-recapture literature this model is called
-  the \eqn{M_0}. It arises from a sum of a sequence of
+  In the capture--recapture literature this model is called
+  the \eqn{M_0} if it is an intercept-only model.
+  Otherwise it is called the \eqn{M_h} when there are covariates.
+  It arises from a sum of a sequence of
   \eqn{\tau}-Bernoulli random variates subject to at least
   one success (capture).
   Here, each animal has the same probability of capture or
@@ -46,6 +79,12 @@ posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
 }
 \references{
 
+  Otis, D. L. et al. (1978)
+  Statistical inference from capture data on closed animal populations,
+  \emph{Wildlife Monographs},
+  \bold{62}, 3--135.
+
+
 Patil, G. P. (1962)
 Maximum likelihood estimation for
 generalised power series distributions and its application to a
@@ -55,7 +94,7 @@ truncated binomial distribution.
 
 
 Pearson, K. (1913)
-\emph{A monograph on Albinism in Man}.
+\emph{A Monograph on Albinism in Man}.
 Drapers Company Research Memoirs.
 
 
@@ -90,7 +129,8 @@ Drapers Company Research Memoirs.
   \code{\link{posbernoulli.b}},
   \code{\link{posbernoulli.t}},
   \code{\link{posbernoulli.tb}},
-  \code{\link{binomialff}}.
+  \code{\link{binomialff}},
+  \code{\link{AICvlm}}, \code{\link{BICvlm}}.
 
 
 }
@@ -105,14 +145,22 @@ Coef(fit1)  # = MLE of p = 0.3088
 head(fitted(fit1))
 sqrt(vcov(fit1, untransform = TRUE))  # SE = 0.0322
 
-# Fit a M_0 model to the Perom data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-M_0   <- vglm(cbind(    y1 + y2 + y3 + y4 + y5 + y6,
-                    6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1,
-              data = Perom, posbinomial, trace = TRUE)
-coef(M_0, matrix = TRUE)
-Coef(M_0)
-constraints(M_0, matrix = TRUE)
-summary(M_0)
+# Fit a M_0 model (Otis et al. 1978) to the deermice data ,,,,,,,,,,,,,,,,,,,,,,,
+M.0 <- vglm(cbind(    y1 + y2 + y3 + y4 + y5 + y6,
+                  6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1, trace = TRUE,
+            posbinomial(omit.constant = TRUE), data = deermice)
+coef(M.0, matrix = TRUE)
+Coef(M.0)
+constraints(M.0, matrix = TRUE)
+summary(M.0)
+c(   N.hat = M.0 at extra$N.hat,     # Since tau = 6, i.e., 6 Bernoulli trials per
+  SE.N.hat = M.0 at extra$SE.N.hat)  # observation is the same for each observation
+
+# Compare it to the M_b using AIC and BIC
+M.b <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, trace = TRUE,
+            posbernoulli.b, data = deermice)
+sort(c(M.0 = AIC(M.0), M.b = AIC(M.b)))  # Okay since omit.constant = TRUE
+sort(c(M.0 = BIC(M.0), M.b = BIC(M.b)))  # Okay since omit.constant = TRUE
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/posgeomUC.Rd b/man/posgeomUC.Rd
index 2aa790a..8cc9948 100644
--- a/man/posgeomUC.Rd
+++ b/man/posgeomUC.Rd
@@ -87,7 +87,7 @@ rposgeom(n, prob)
 \examples{
 prob <- 0.75; y = rposgeom(n = 1000, prob)
 table(y)
-mean(y) # Sample mean
+mean(y)  # Sample mean
 1 / prob  # Population mean
 
 (ii <- dposgeom(0:7, prob))
@@ -95,7 +95,7 @@ cumsum(ii) - pposgeom(0:7, prob)  # Should be 0s
 table(rposgeom(100, prob))
 
 table(qposgeom(runif(1000), prob))
-round(dposgeom(1:10, prob) * 1000) # Should be similar
+round(dposgeom(1:10, prob) * 1000)  # Should be similar
 
 \dontrun{
 x <- 0:5
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 790b98a..7eeb2df 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -104,13 +104,13 @@ for counts with extra zeros.
 \examples{
 munb <- 5; size <- 4; n <- 1000
 table(y <- rposnegbin(n, munb = munb, size = size))
-mean(y) # sample mean
-munb / (1 - (size / (size + munb))^size) # population mean
-munb / pnbinom(0, mu = munb, size = size, lower.tail = FALSE) # same as before
+mean(y)  # sample mean
+munb / (1 - (size / (size + munb))^size)  # population mean
+munb / pnbinom(0, mu = munb, size = size, lower.tail = FALSE)  # same as before
 
 x <- (-1):17
 (ii <- dposnegbin(x, munb = munb, size = size))
-max(abs(cumsum(ii) - pposnegbin(x, munb = munb, size = size))) # Should be 0
+max(abs(cumsum(ii) - pposnegbin(x, munb = munb, size = size)))  # Should be 0
 
 \dontrun{
 x <- 0:10
@@ -125,7 +125,7 @@ barplot(rbind(dposnegbin(x, munb = munb, size = size),
 nn <- 5000
 mytab <- cumsum(table(rposnegbin(nn, munb = munb, size = size))) / nn
 myans <- pposnegbin(sort(as.numeric(names(mytab))), munb = munb, size = size)
-max(abs(mytab - myans)) # Should be 0
+max(abs(mytab - myans))  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index a21da75..f0f4ee3 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -108,15 +108,6 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
   179--188.
 
 
-  Fisher, R. A., Corbet, A. S. and Williams, C. B. (1943)
-  The Relation Between the Number of Species and
-  the Number of Individuals in a Random Sample of an Animal
-  Population,
-  \emph{Journal of Animal Ecology},
-  \bold{12},
-  42--58.
-
-
   Williamson, E. and Bretherton, M. H. (1964)
   Tables of the logarithmic series distribution.
   \emph{Annals of Mathematical Statistics},
@@ -139,7 +130,9 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
   \code{\link{zanegbinomial}},
 % \code{\link[MASS]{rnegbin}}.
   \code{\link[stats:NegBinomial]{rnbinom}},
-  \code{\link{CommonVGAMffArguments}}.
+  \code{\link{CommonVGAMffArguments}},
+  \code{\link{corbet}},
+  \code{\link{logff}}.
 
 
 }
@@ -151,7 +144,7 @@ pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x2), size = exp(1))
                           y2 = rposnegbin(nn, munb = exp(1+2*x2), size = exp(3)))
 fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, pdata, trace = TRUE)
 coef(fit, matrix = TRUE)
-dim(depvar(fit)) # dim(fit at y) is not as good
+dim(depvar(fit))  # dim(fit at y) is not as good
 
 
 # Another artificial data example
@@ -160,25 +153,23 @@ pdata2 <- transform(pdata2, y3 = rposnegbin(nn, munb = munb, size = size))
 with(pdata2, table(y3))
 fit <- vglm(y3 ~ 1, posnegbinomial, pdata2, trace = TRUE)
 coef(fit, matrix = TRUE)
-with(pdata2, mean(y3)) # Sample mean
-head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
+with(pdata2, mean(y3))  # Sample mean
+head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1)  # Population mean
 head(fitted(fit), 3)
 head(predict(fit), 3)
 
 
 # Example: Corbet (1943) butterfly Malaya data
-corbet <- data.frame(nindiv = 1:24,
-                     ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12,
-                               14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
-fit <- vglm(nindiv ~ 1, posnegbinomial, weights = ofreq, data = corbet)
+fit <- vglm(ofreq ~ 1, posnegbinomial, weights = species, data = corbet)
 coef(fit, matrix = TRUE)
 Coef(fit)
 (khat <- Coef(fit)["size"])
-pdf2 <- dposnegbin(x = with(corbet, nindiv), mu = fitted(fit), size = khat)
-print( with(corbet, cbind(nindiv, ofreq, fitted = pdf2*sum(ofreq))), dig = 1)
+pdf2 <- dposnegbin(x = with(corbet, ofreq), mu = fitted(fit), size = khat)
+print( with(corbet, cbind(ofreq, species, fitted = pdf2*sum(species))), digits = 1)
 with(corbet,
-matplot(nindiv, cbind(ofreq, fitted = pdf2*sum(ofreq)), las = 1,
-        type = "b", ylab = "Frequency", col = c("blue", "orange"),
+matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1,
+        xlab = "Observed frequency (of individual butterflies)",
+        type = "b", ylab = "Number of species", col = c("blue", "orange"),
         main = "blue 1s = observe; orange 2s = fitted"))
 }
 }
@@ -186,4 +177,7 @@ matplot(nindiv, cbind(ofreq, fitted = pdf2*sum(ofreq)), las = 1,
 \keyword{regression}
 
 
-% bigN = with(corbet, sum(ofreq))
+% bigN = with(corbet, sum(species))
+
+
+
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index ffa64d6..0d20986 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -37,7 +37,7 @@ rposnorm(n, mean = 0, sd = 1)
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{posnormal1}}, the \pkg{VGAM} family function
+  See \code{\link{posnormal}}, the \pkg{VGAM} family function
   for estimating the parameters, 
   for the formula of the probability density function and other details.
 
@@ -47,7 +47,7 @@ rposnorm(n, mean = 0, sd = 1)
 
 %}
 \seealso{
-  \code{\link{posnormal1}}.
+  \code{\link{posnormal}}.
 
 }
 \examples{
@@ -63,7 +63,7 @@ Q <- qposnorm(probs, m = m)
 lines(Q, dposnorm(Q, m = m), col = "purple", lty = 3, type = "h")
 lines(Q, pposnorm(Q, m = m), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pposnorm(Q, m = m) - probs)) # Should be 0
+max(abs(pposnorm(Q, m = m) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/posnormal1.Rd b/man/posnormal.Rd
similarity index 90%
rename from man/posnormal1.Rd
rename to man/posnormal.Rd
index 3b3f750..86f3ddf 100644
--- a/man/posnormal1.Rd
+++ b/man/posnormal.Rd
@@ -1,13 +1,13 @@
-\name{posnormal1}
-\alias{posnormal1}
+\name{posnormal}
+\alias{posnormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Positive Normal Distribution Family Function }
 \description{
   Fits a positive (univariate) normal distribution.
 }
 \usage{
-posnormal1(lmean = "identity", lsd = "loge",
-           imean = NULL, isd = NULL, nsimEIM = 100, zero = NULL)
+posnormal(lmean = "identity", lsd = "loge",
+          imean = NULL, isd = NULL, nsimEIM = 100, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -91,7 +91,7 @@ posnormal1(lmean = "identity", lsd = "loge",
 \author{ Thomas W. Yee }
 \note{
   The response variable for this family function is the same as
-  \code{\link{normal1}} except positive values are required.
+  \code{\link{uninormal}} except positive values are required.
   Reasonably good initial values are needed.
   Fisher scoring is implemented.
 
@@ -107,7 +107,7 @@ posnormal1(lmean = "identity", lsd = "loge",
 
 }
 \seealso{ 
-    \code{\link{normal1}},
+    \code{\link{uninormal}},
     \code{\link{tobit}}.
 
 
@@ -119,10 +119,10 @@ pdata <- transform(pdata, y = rposnorm(n <- 1000, m = m, sd = SD))
 
 \dontrun{with(pdata, hist(y, prob = TRUE, border = "blue",
          main = paste("posnorm(m =", m[1], ", sd =", round(SD[1], 2),")"))) }
-fit <- vglm(y ~ 1, fam = posnormal1, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, fam = posnormal, pdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 (Cfit <- Coef(fit))
-mygrid <- with(pdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram
+mygrid <- with(pdata, seq(min(y), max(y), len = 200))  # Add the fit to the histogram
 \dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col = "red")}
 }
 \keyword{models}
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index ee38af2..3f91325 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -89,14 +89,14 @@ rpospois(n, lambda)
 lambda <- 2; y = rpospois(n = 1000, lambda)
 table(y)
 mean(y)  # Sample mean
-lambda / (1 - exp(-lambda)) # Population mean
+lambda / (1 - exp(-lambda))  # Population mean
 
 (ii <- dpospois(0:7, lambda))
 cumsum(ii) - ppospois(0:7, lambda)  # Should be 0s
 table(rpospois(100, lambda))
 
 table(qpospois(runif(1000), lambda))
-round(dpospois(1:10, lambda) * 1000) # Should be similar
+round(dpospois(1:10, lambda) * 1000)  # Should be similar
 
 \dontrun{ x <- 0:7
 barplot(rbind(dpospois(x, lambda), dpois(x, lambda)),
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index ee99ba6..bcb3776 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -92,7 +92,7 @@ Coef(fit)
 summary(fit)
 fitted(fit)
 
-pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data
+pdata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
 pdata <- transform(pdata, lambda = exp(1 - 2 * x2))
 pdata <- transform(pdata, y1 = rpospois(nn, lambda))
 with(pdata, table(y1))
diff --git a/man/powl.Rd b/man/powerlink.Rd
similarity index 72%
rename from man/powl.Rd
rename to man/powerlink.Rd
index c1542a4..397977b 100644
--- a/man/powl.Rd
+++ b/man/powerlink.Rd
@@ -1,5 +1,5 @@
-\name{powl}
-\alias{powl}
+\name{powerlink}
+\alias{powerlink}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Power Link Function }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-powl(theta, power = 1, inverse = FALSE, deriv = 0,
-      short = TRUE, tag = FALSE)
+powerlink(theta, power = 1, inverse = FALSE, deriv = 0,
+          short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -42,7 +42,7 @@ powl(theta, power = 1, inverse = FALSE, deriv = 0,
 
 }
 \value{
-  For \code{powl} with \code{deriv = 0}, then \code{theta} raised
+  For \code{powerlink} with \code{deriv = 0}, then \code{theta} raised
   to the power of \code{power}.
   And if \code{inverse = TRUE} then
   \code{theta} raised to the power of \code{1/power}.
@@ -73,20 +73,23 @@ powl(theta, power = 1, inverse = FALSE, deriv = 0,
 \seealso{ 
   \code{\link{Links}},
   \code{\link{loge}}.
+
+
 }
 \examples{
-powl("a", power = 2, short = FALSE, tag = TRUE)
-powl(x <- 1:5)
-powl(x, power = 2)
-max(abs(powl(powl(x, power = 2), power = 2, inverse=TRUE) - x)) # Should be 0
-powl(x <- (-5):5, power = 0.5) # Has NAs
+powerlink("a", power = 2, short = FALSE, tag = TRUE)
+powerlink(x <- 1:5)
+powerlink(x, power = 2)
+max(abs(powerlink(powerlink(x, power = 2),
+                  power = 2, inverse = TRUE) - x))  # Should be 0
+powerlink(x <- (-5):5, power = 0.5)  # Has NAs
 
 # 1/2 = 0.5
 pdata <- data.frame(y = rbeta(n = 1000, shape1 = 2^2, shape2 = 3^2))
-fit <- vglm(y ~ 1, beta.ab(lshape1 = powl(power = 0.5), i1 = 3,
-                           lshape2 = powl(power = 0.5), i2 = 7), pdata)
+fit <- vglm(y ~ 1, beta.ab(lshape1 = powerlink(power = 0.5), i1 = 3,
+                           lshape2 = powerlink(power = 0.5), i2 = 7), pdata)
 t(coef(fit, matrix = TRUE))
-Coef(fit) # Useful for intercept-only models
+Coef(fit)  # Useful for intercept-only models
 vcov(fit, untransform = TRUE)
 }
 \keyword{math}
diff --git a/man/prats.Rd b/man/prats.Rd
new file mode 100644
index 0000000..7b7f2ab
--- /dev/null
+++ b/man/prats.Rd
@@ -0,0 +1,95 @@
+\name{prats}
+\alias{prats}
+\docType{data}
+\title{ Pregnant Rats Toxological Experiment Data }
+
+\description{
+  A small toxological experiment data.
+  The subjects are fetuses from
+  two randomized groups of pregnant rats,
+  and they were given a placebo or chemical treatment.
+  The number with birth defects were recorded, as well
+  as each litter size.
+
+
+}
+\usage{
+data(prats)
+}
+\format{
+  A data frame with the following variables.
+
+  \describe{
+   
+    \item{treatment}{
+    A \code{0} means control;
+    a \code{1} means the chemical treatment.
+
+
+    }
+    \item{alive, litter.size}{
+    The number of fetuses alive at 21 days, out of
+    the number of fetuses alive at  4 days (the litter size).
+
+
+    }
+
+  }
+}
+\details{
+  The data concerns a toxological experiment where
+  the subjects are fetuses from
+  two randomized groups of 16 pregnant rats each,
+  and they were given a placebo or chemical treatment.
+  The number with birth defects andn the litter size were recorded.
+  Half the rats were fed a control diet during pregnancy and
+  lactation, and the diet of the other half was treated with a
+  chemical. For each litter the number of pups alive at 4 days
+  and the number of pups that survived the 21 day lactation period,
+  were recorded.
+
+
+}
+\source{
+ 
+  Weil, C. S. (1970)
+  Selection of the valid number of sampling units and a consideration
+  of their combination in toxicological studies involving
+  reproduction, teratogenesis or carcinogenesis.
+  \emph{Food and Cosmetics Toxicology},
+  \bold{8}(2), 177--182.
+
+
+%Food and Cosmetics Toxicology
+%Fd. Cosmet. Toxicol.
+
+
+ 
+}
+\references{
+
+  Williams, D. A. (1975)
+  The Analysis of Binary Responses From Toxicological
+            Experiments Involving Reproduction and Teratogenicity.
+  \emph{Biometrics},
+  \bold{31}(4), 949--952.
+
+
+}
+\seealso{
+  \code{\link[VGAM]{betabinomial}},
+  \code{\link[VGAM]{betabinomial.ab}}.
+
+
+}
+\examples{
+prats
+colSums(subset(prats, treatment == 0))
+colSums(subset(prats, treatment == 1))
+summary(prats)
+}
+\keyword{datasets}
+
+
+%
+%
diff --git a/man/predictqrrvglm.Rd b/man/predictqrrvglm.Rd
index 684afcd..3ce3e16 100644
--- a/man/predictqrrvglm.Rd
+++ b/man/predictqrrvglm.Rd
@@ -9,9 +9,9 @@
 }
 \usage{
 predictqrrvglm(object, newdata=NULL,
-               type = c("link", "response", "lv", "terms"),
+               type = c("link", "response", "latvar", "terms"),
                se.fit = FALSE, deriv = 0, dispersion = NULL,
-               extra = object at extra, varlvI = FALSE, reference = NULL, ...)
+               extra = object at extra, varI.latvar = FALSE, reference = NULL, ...)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -21,14 +21,18 @@ predictqrrvglm(object, newdata=NULL,
   An optional data frame in which to look for variables with which
   to predict. If omitted, the fitted linear predictors are used.
 
+
   }
   \item{type, se.fit, dispersion, extra}{
   See \code{\link{predictvglm}}.
 
+
   }
   \item{deriv}{ Derivative. Currently only 0 is handled. }
-  \item{varlvI, reference}{
+  \item{varI.latvar, reference}{
   Arguments passed into \code{\link{Coef.qrrvglm}}.
+
+
   }
   \item{\dots}{ Currently undocumented. }
 }
@@ -38,8 +42,10 @@ predictqrrvglm(object, newdata=NULL,
   Currently there are lots of limitations of this function; it is
   unfinished.
 
+
 % and optionally estimates standard errors of those predictions
 
+
 }
 \value{
   See \code{\link{predictvglm}}.
@@ -53,27 +59,30 @@ canonical Gaussian ordination.
 \emph{Ecological Monographs},
 \bold{74}, 685--701.
 
+
 }
 
 \author{ T. W. Yee }
 \note{
   This function is not robust and has not been checked fully.
 
+
 }
 
 \seealso{ 
   \code{\link{cqo}}.
 
+
 }
 
 \examples{
-hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+hspider[,1:6]=scale(hspider[,1:6])  # Standardize the environmental variables
 set.seed(1234)
 # vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
 # vvv                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
 # vvv          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
 # vvv          fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=TRUE)
-# vvv sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+# vvv sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 
 # vvv head(predict(p1))
 
diff --git a/man/predictvglm.Rd b/man/predictvglm.Rd
index a27fda7..75df01b 100644
--- a/man/predictvglm.Rd
+++ b/man/predictvglm.Rd
@@ -154,8 +154,8 @@ class(fit)
 (q0 <- head(predict(fit)))
 (q1 <- predict(fit, newdata = head(pneumo)))
 (q2 <- predict(fit, newdata = head(pneumo)))
-all.equal(q0, q1) # Should be TRUE
-all.equal(q1, q2) # Should be TRUE
+all.equal(q0, q1)  # Should be TRUE
+all.equal(q1, q2)  # Should be TRUE
 
 head(predict(fit))
 head(predict(fit, untransform = TRUE))
@@ -164,9 +164,9 @@ p0 <- head(predict(fit, type = "response"))
 p1 <- head(predict(fit, type = "response", newdata = pneumo))
 p2 <- head(predict(fit, type = "response", newdata = pneumo))
 p3 <- head(fitted(fit))
-all.equal(p0, p1) # Should be TRUE
-all.equal(p1, p2) # Should be TRUE
-all.equal(p2, p3) # Should be TRUE
+all.equal(p0, p1)  # Should be TRUE
+all.equal(p1, p2)  # Should be TRUE
+all.equal(p2, p3)  # Should be TRUE
 
 predict(fit, type = "terms", se = TRUE)
 }
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index af31dce..0af4793 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -20,17 +20,20 @@ prentice74(llocation = "identity", lscale = "loge", lshape = "identity",
   and the shape parameter \eqn{q}, respectively.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{ilocation, iscale}{
   Initial value for \eqn{a} and \eqn{b}, respectively.
   The defaults mean an initial value is determined internally for each.
 
+
   }
   \item{ishape}{
   Initial value for \eqn{q}.
   If failure to converge occurs, try some other value.
   The default means an initial value is determined internally.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -38,6 +41,7 @@ prentice74(llocation = "identity", lscale = "loge", lshape = "identity",
   The values must be from the set \{1,2,3\}.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -70,6 +74,7 @@ else \eqn{q < 0} is right skew.
   The object is used by modelling functions such as \code{\link{vglm}},
   and \code{\link{vgam}}.
 
+
 }
 \references{
   Prentice, R. L. (1974)
@@ -111,7 +116,7 @@ pdata <- data.frame(x2 = runif(nn <- 1000))
 pdata <- transform(pdata, loc = -1 + 2*x2, Scale = exp(1))
 pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, k = 1))
 fit <- vglm(y ~ x2, prentice74(zero = 2:3), pdata, trace = TRUE)
-coef(fit, matrix = TRUE) # Note the coefficients for location
+coef(fit, matrix = TRUE)  # Note the coefficients for location
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/prinia.Rd b/man/prinia.Rd
new file mode 100644
index 0000000..8e68b89
--- /dev/null
+++ b/man/prinia.Rd
@@ -0,0 +1,129 @@
+\name{prinia}
+\alias{prinia}
+\docType{data}
+\title{Yellow-bellied Prinia
+%%   ~~ data name/kind ... ~~
+
+}
+\description{
+ A data frame with yellow-bellied Prinia.
+
+}
+\usage{
+data(prinia)
+}
+
+\format{
+  A data frame with 151 observations on the following 23 variables.
+
+\describe{
+  \item{length}{a numeric vector,
+  the scaled wing length (zero mean and unit variance).
+
+
+  }
+  \item{fat}{a numeric vector, fat index;
+  originally 1 (no fat) to 4 (very fat) but
+  converted to 0 (no fat) versus 1 otherwise.
+
+
+  }
+  \item{cap}{a numeric vector,
+  number of times the bird was captured or recaptured.
+
+
+  }
+  \item{noncap}{a numeric vector,
+  number of times the bird was not captured.
+
+
+  }
+  \item{y1, y2, y3, y4, y5, y6}{
+  a numeric vector of 0s and 1s; for noncapture and capture resp.
+
+
+  }
+  \item{y7, y8, y9, y10, y11, y12}{
+  same as above.
+
+
+  }
+  \item{y13, y14, y15, y16, y17, y18, y19}{
+  same as above.
+
+
+  }
+}
+
+}
+\details{
+  The yellow-bellied Prinia \emph{Prinia flaviventris}
+  is a common bird species located in Southeast Asia. A
+  capture--recapture experiment was conducted at the Mai Po
+  Nature Reserve in Hong Kong during 1991, where captured
+  individuals had their wing lengths measured and fat index
+  recorded. A total of 19 weekly capture occasions were
+  considered, where 151 distinct birds were captured.
+
+
+  More generally, the prinias are a genus of small
+  insectivorous birds, and are sometimes referred to as
+  \emph{wren-warblers}.  They are a little-known group of the
+  tropical and subtropical Old World, the roughly 30 species
+  being divided fairly equally between Africa and Asia.
+
+
+
+% 20131030; this is old:
+% The example below illustrates the necessity of creating
+% variables \code{y1}, \code{y2}, \ldots in order for
+% \code{\link{posbernoulli.b}},
+% \code{\link{posbernoulli.t}} and
+% \code{\link{posbernoulli.tb}} to work.
+% In contrast, \code{\link{posbinomial}} may have a simple 2-column
+% matrix as the response.
+
+
+% \emph{Prinia inornate} is from the SS paper, not exactly this bird.
+
+
+
+%%  ~~ If necessary, more details than the __description__ above ~~
+}
+\source{
+
+  Thanks to Paul Yip for permission to make this data available.
+
+
+% Further information is at:
+
+% Huggins, R. M. and Yip, P. S. F. (1997).
+% Statistical analysis of removal experiments with the use of auxillary variables.
+% \emph{Statistica Sinica} \bold{7}, 705--712.
+
+
+  Hwang, W.-H. and Huggins, R. M. (2007)
+  Application of semiparametric regression models in the
+  analysis of capture--recapture experiments.
+  \emph{Australian and New Zealand Journal of Statistics} \bold{49}, 191--202.
+
+
+}
+\examples{
+head(prinia)
+summary(prinia)
+rowSums(prinia[, c("cap", "noncap")])  # 19s
+
+#  Fit a positive-binomial distribution (M.h) to the data:
+fit1 <- vglm(cbind(cap, noncap) ~ length + fat, posbinomial, data = prinia)
+
+#  Fit another positive-binomial distribution (M.h) to the data:
+#  The response input is suitable for posbernoulli.*-type functions.
+fit2 <- vglm(cbind( y1,  y2,  y3,  y4,  y5, y6,  y7,  y8,  y9,
+                   y10, y11, y12, y13, y14, y15, y16, y17, y18, y19) ~
+             length + fat, posbernoulli.b(drop.b = FALSE ~ 0), data = prinia)
+}
+\keyword{datasets}
+
+
+
diff --git a/man/probit.Rd b/man/probit.Rd
index 45cc882..ba5a095 100644
--- a/man/probit.Rd
+++ b/man/probit.Rd
@@ -67,7 +67,7 @@ probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
   In terms of the threshold approach with cumulative probabilities for
   an ordinal response this link function corresponds to the univariate
-  normal distribution (see \code{\link{normal1}}).
+  normal distribution (see \code{\link{uninormal}}).
 
 
 }
@@ -82,11 +82,11 @@ probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 \examples{
 p <- seq(0.01, 0.99, by = 0.01)
 probit(p)
-max(abs(probit(probit(p), inverse = TRUE) - p)) # Should be 0
+max(abs(probit(probit(p), inverse = TRUE) - p))  # Should be 0
 
 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-probit(p) # Has NAs
-probit(p, bvalue = .Machine$double.eps) # Has no NAs
+probit(p)  # Has NAs
+probit(p, bvalue = .Machine$double.eps)  # Has no NAs
 
 \dontrun{p <- seq(0.01, 0.99, by = 0.01); par(lwd = (mylwd <- 2))
 plot(p, logit(p), type = "l", col = "limegreen", ylab = "transformation",
diff --git a/man/propodds.Rd b/man/propodds.Rd
index cdeb047..34c060d 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -85,10 +85,10 @@ contains further information and examples.
 # Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-depvar(fit) # Sample proportions
-weights(fit, type = "prior") # Number of observations
+depvar(fit)  # Sample proportions
+weights(fit, type = "prior")  # Number of observations
 coef(fit, matrix = TRUE)
-constraints(fit) # Constraint matrices
+constraints(fit)  # Constraint matrices
 summary(fit)
 
 # Check that the model is linear in let ----------------------
@@ -100,7 +100,7 @@ fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, pneumo)
               cumulative(parallel = FALSE, reverse = TRUE), pneumo))
 pchisq(deviance(fit) - deviance(fit3),
        df = df.residual(fit) - df.residual(fit3), lower.tail = FALSE)
-lrtest(fit3, fit) # Easier
+lrtest(fit3, fit)  # Easier
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/prplot.Rd b/man/prplot.Rd
index c1d4c77..1d843ac 100644
--- a/man/prplot.Rd
+++ b/man/prplot.Rd
@@ -85,7 +85,7 @@ prplot.control(xlab = NULL, ylab = "Probability", main = NULL, xlim = NULL,
 \examples{
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
-M <- npred(fit) # Or fit at misc$M
+M <- npred(fit)  # Or fit at misc$M
 \dontrun{ prplot(fit)
 prplot(fit, lty = 1:M, col = (1:M)+2, rug = TRUE, las = 1,
        ylim = c(0, 1), rlwd = 2) }
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 94e83bb..3364fa8 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -21,7 +21,7 @@ qrrvglm.control(Rank = 1,
                 FastAlgorithm = TRUE,
                 GradientFunction = TRUE,
                 Hstep = 0.001,
-                isdlv = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
+                isd.latvar = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
                 iKvector = 0.1,
                 iShape = 0.1,
                 ITolerances = FALSE,
@@ -32,7 +32,7 @@ qrrvglm.control(Rank = 1,
                 noRRR = ~ 1, Norrr = NA,
                 optim.maxit = 20,
                 Parscale = if(ITolerances) 0.001 else 1.0,
-                SD.Cinit = 0.02,
+                sd.Cinit = 0.02,
                 SmallNo = 5.0e-13, 
                 trace = TRUE,
                 Use.Init.Poisson.QO = TRUE, 
@@ -144,7 +144,7 @@ qrrvglm.control(Rank = 1,
 
 %  Used only if \code{FastAlgorithm} is \code{TRUE}.
   } 
-  \item{isdlv}{
+  \item{isd.latvar}{
    Initial standard deviations for the latent variables (site scores).
    Numeric, positive and of length \eqn{R} (recycled if necessary).
    This argument is used only if \code{ITolerances = TRUE}. Used by
@@ -179,7 +179,7 @@ qrrvglm.control(Rank = 1,
    and scale all numerical variables in the \eqn{x_2} vector}.
    See \bold{Details} for more details.
    The success of \code{ITolerances = TRUE} often
-   depends on suitable values for \code{isdlv} and/or
+   depends on suitable values for \code{isd.latvar} and/or
    \code{MUXfactor}.
 
  }
@@ -210,7 +210,7 @@ qrrvglm.control(Rank = 1,
    values. If the latent variable values are too large then this will
    result in numerical problems. By too large, it is meant that the
    standard deviation of the latent variable values are greater than
-   \code{MUXfactor[r] * isdlv[r]} for \code{r=1:Rank} (this is why
+   \code{MUXfactor[r] * isd.latvar[r]} for \code{r=1:Rank} (this is why
    centering and scaling all the numerical predictor variables in
    \eqn{x_2} is recommended). A value about 3 or 4 is recommended.
    If failure to converge occurs, try a slightly lower value.
@@ -252,7 +252,7 @@ qrrvglm.control(Rank = 1,
    See \bold{Details} for more information.
    It's probably best to leave this argument alone.
   } 
-  \item{SD.Cinit}{ 
+  \item{sd.Cinit}{ 
       Standard deviation of the initial values for the elements
       of \eqn{C}.
       These are normally distributed with mean zero.
@@ -330,12 +330,12 @@ qrrvglm.control(Rank = 1,
    errors and it appears there are more local solutions. To help avoid
    the overflow errors, scaling \eqn{C} by the factor \code{Parscale}
    can help enormously. Even better, scaling \eqn{C} by specifying
-   \code{isdlv} is more understandable to humans. If failure to
+   \code{isd.latvar} is more understandable to humans. If failure to
    converge occurs, try adjusting \code{Parscale}, or better, setting
    \code{EqualTolerances = TRUE} (and hope that the estimated tolerance
    matrix is positive-definite). To fit an equal-tolerances model, it
    is firstly best to try setting \code{ITolerances = TRUE} and varying
-   \code{isdlv} and/or \code{MUXfactor} if it fails to converge.
+   \code{isd.latvar} and/or \code{MUXfactor} if it fails to converge.
    If it still fails to converge after many attempts, try setting
    \code{EqualTolerances = TRUE}, however this will usually be a lot slower
    because it requires a lot more memory.
@@ -351,7 +351,7 @@ qrrvglm.control(Rank = 1,
    effectively the same as the \code{ITolerances = TRUE} model (the two are
    transformations of each other). In general, \code{ITolerances = TRUE}
    is numerically more unstable and presents a more difficult problem
-   to optimize; the arguments \code{isdlv} and/or \code{MUXfactor} often
+   to optimize; the arguments \code{isd.latvar} and/or \code{MUXfactor} often
    must be assigned some good value(s) (possibly found by trial and error)
    in order for convergence to occur. Setting \code{ITolerances = TRUE}
    \emph{forces} a bell-shaped curve or surface onto all the species data,
@@ -429,7 +429,7 @@ Constrained additive ordination.
   hunting spiders data. Because \code{ITolerances = TRUE}, it is a good idea
   to center all the \eqn{x_2} variables first. Upon fitting the model,
   the actual standard deviation of the site scores are computed. Ideally,
-  the \code{isdlv} argument should have had this value for the best
+  the \code{isd.latvar} argument should have had this value for the best
   chances of getting good initial values.  For comparison, the model is
   refitted with that value and it should run more faster and reliably.
 }
@@ -469,24 +469,24 @@ Constrained additive ordination.
 
 \examples{
 \dontrun{ # Poisson CQO with equal tolerances
-set.seed(111) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+set.seed(111)  # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6])  # Good idea when ITolerances = TRUE
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           quasipoissonff, data = hspider, EqualTolerances = TRUE)
-sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 
-(isdlv <- apply(lv(p1), 2, sd)) # Should be approx isdlv
+(isd.latvar <- apply(latvar(p1), 2, sd))  # Should be approx isd.latvar
  
 # Refit the model with better initial values
 set.seed(111)  # This leads to the global solution
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, 
                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-          ITolerances = TRUE, isdlv = isdlv, # Note the use of isdlv here
-          quasipoissonff, data = hspider)
-sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+          ITolerances = TRUE, quasipoissonff, data = hspider,
+          isd.latvar = isd.latvar)  # Note the use of isd.latvar here
+sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 }
 }
 \keyword{models}
@@ -497,13 +497,13 @@ sort(p1 at misc$deviance.Bestof) # A history of all the iterations
 %\dontrun{
 %# 20120221; withdrawn for a while coz it creates a lot of error messages.
 %# Negative binomial CQO; smallest deviance is about 275.389
-%set.seed(1234) # This leads to a reasonable (but not the global) solution?
+%set.seed(1234)  # This leads to a reasonable (but not the global) solution?
 %nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, 
 %                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
 %          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-%          ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
+%          ITol = FALSE, EqualTol = TRUE,  # A good idea for negbinomial
 %          fam = negbinomial, data = hspider)
-%sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
+%sort(nb1 at misc$deviance.Bestof)  # A history of all the iterations
 %summary(nb1)
 %}
 %\dontrun{ lvplot(nb1, lcol = 1:12, y = TRUE, pcol = 1:12) }
diff --git a/man/qtplot.gumbel.Rd b/man/qtplot.gumbel.Rd
index 70566ba..870bc3f 100644
--- a/man/qtplot.gumbel.Rd
+++ b/man/qtplot.gumbel.Rd
@@ -7,7 +7,7 @@
   Plots quantiles associated with a Gumbel model.
 }
 \usage{
-qtplot.gumbel(object, plot.it = TRUE,
+qtplot.gumbel(object, show.plot = TRUE,
     y.arg = TRUE, spline.fit = FALSE, label = TRUE,
     R = object at misc$R, percentiles = object at misc$percentiles,
     add.arg = FALSE, mpv = object at misc$mpv,
@@ -25,7 +25,7 @@ qtplot.gumbel(object, plot.it = TRUE,
   \code{"gumbel"} or \code{"egumbel"}.
 
   }
-  \item{plot.it}{
+  \item{show.plot}{
   Logical. Plot it? If \code{FALSE} no plot will be done.
 
 
@@ -86,7 +86,7 @@ qtplot.gumbel(object, plot.it = TRUE,
 \value{
   The object with a list called \code{qtplot} in the \code{post}
   slot of \code{object}.
-  (If \code{plot.it = FALSE} then just the list is returned.)
+  (If \code{show.plot = FALSE} then just the list is returned.)
   The list contains components
   \item{fitted.values}{
   The percentiles of the response,
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 0009d78..814f242 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -8,7 +8,7 @@
 \usage{
 qtplot.lmscreg(object, newdata = NULL, 
                percentiles = object at misc$percentiles, 
-               plot.it = TRUE, ...)
+               show.plot = TRUE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -24,7 +24,7 @@ qtplot.lmscreg(object, newdata = NULL,
   that specify the percentiles (quantiles). 
   The default are the percentiles used when the model was fitted. 
   }
-  \item{plot.it}{ Logical. Plot it? If \code{FALSE} no plot will
+  \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will
   be done. }
   \item{\dots}{ Graphical parameter that are passed into
   \code{\link{plotqtplot.lmscreg}}.
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index 26a0453..9228513 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -152,7 +152,7 @@ quasibinomialff()
 quasibinomialff(link = "probit")
 
 # Nonparametric logistic regression
-hunua <- transform(hunua, a.5 = sqrt(altitude)) # Transformation of altitude
+hunua <- transform(hunua, a.5 = sqrt(altitude))  # Transformation of altitude
 fit1 <- vglm(agaaus ~ poly(a.5, 2), quasibinomialff, hunua)
 fit2 <- vgam(agaaus ~ s(a.5, df = 2), quasibinomialff, hunua)
 \dontrun{
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index a6b5cb6..6ebd635 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -121,12 +121,12 @@ quasipoissonff(link = "loge", onedpar = FALSE,
 quasipoissonff()
 
 \dontrun{n <- 200; p <- 5; S <- 5
-mydata <- rcqo(n, p, S, fam = "poisson", EqualTol = FALSE)
+mydata <- rcqo(n, p, S, fam = "poisson", eq.tol = FALSE)
 myform <- attr(mydata, "formula")
 p1 <- cqo(myform, fam = quasipoissonff, EqualTol = FALSE, data = mydata)
-sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 lvplot(p1, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S)
-summary(p1) # The dispersion parameters are estimated
+summary(p1)  # The dispersion parameters are estimated
 }}
 \keyword{models}
 \keyword{regression}
diff --git a/man/qvar.Rd b/man/qvar.Rd
new file mode 100644
index 0000000..8269f74
--- /dev/null
+++ b/man/qvar.Rd
@@ -0,0 +1,114 @@
+\name{qvar}
+\alias{qvar}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+Quasi-variances Extraction Function
+
+%%  ~~function to do ... ~~
+}
+\description{
+  Takes a \code{\link{rcim}} fit of the appropriate format and
+  returns either the quasi-variances or quasi-standard errors.
+
+
+%%  ~~ A concise (1-5 lines) description of what the function does. ~~
+}
+\usage{
+qvar(object, se = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{
+  A \code{\link{rcim}} object that has family function
+  \code{\link{uninormal}} with the
+  \code{\link{explink}} link.
+  See below for an example.
+
+
+}
+\item{se}{
+  Logical. If \code{TRUE} then the quasi-variances are returned,
+  else the square root of them, called quasi-standard errors.
+
+
+}
+
+\item{\ldots}{
+  Currently unused.
+
+
+}
+}
+\details{
+
+  This simple function is ad hoc and simply is equivalent to
+  computing the quasi-variances
+  by \code{diag(predict(fit1)[, c(TRUE, FALSE)]) / 2}.
+  This function is for convenience only.
+  Serious users of quasi-variances ought to understand why and how this
+  function works.
+
+
+}
+\value{
+  A vector of quasi-variances  or quasi-standard errors.
+
+  
+}
+%\references{
+%
+%}
+
+\author{
+
+  T. W. Yee.
+
+
+}
+
+%\note{
+%  This is an adaptation of \code{qvcalc()} in \pkg{qvcalc}.
+%
+%
+%}
+
+%\section{Warning }{
+%  N
+%
+%
+%}
+
+
+\seealso{
+  \code{\link{rcim}},
+  \code{\link{uninormal}},
+  \code{\link{explink}},
+  \code{\link{Qvar}},
+  \code{\link[MASS]{ships}}.
+
+
+%% ~~objects to See Also as \code{\link{help}}, ~~~
+}
+\examples{
+data("ships", package = "MASS")
+Shipmodel <- vglm(incidents ~ type + year + period,
+                  quasipoissonff, offset = log(service),
+                  data = ships, subset = (service > 0))
+
+# Easiest form of input
+fit1 <- rcim(Qvar(Shipmodel, "type"), uninormal("explink"), maxit = 99)
+qvar(fit1)              # Quasi-variances
+qvar(fit1, se = FALSE)  # Quasi-standard errors
+
+# Manually compute them:
+(quasiVar <- exp(diag(fitted(fit1))) / 2)                 # Version 1
+(quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2)   # Version 2
+(quasiSE  <- sqrt(quasiVar))
+
+\dontrun{ plotqvar(fit1, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{models}
+\keyword{regression}
+% \code{\link[qvcalc:qvcalc]{qvcalc}} in \pkg{qvcalc}
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index 80954e3..8675854 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -20,6 +20,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   See \code{\link{Links}} for more choices.
   A log link is the default because \eqn{b} is positive.
 
+
   }
   \item{nrfs}{
   Numeric, of length one, with value in \eqn{[0,1]}.
@@ -28,6 +29,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   The default value uses a mixture of the two algorithms, and retaining
   positive-definite working weights.
 
+
   }
   \item{oim.mean}{
   Logical, used only for intercept-only models.
@@ -36,6 +38,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   out the working weights.
   \code{FALSE} means use another algorithm.
 
+
   }
   \item{oim}{
   Logical.
@@ -43,10 +46,12 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   \code{TRUE} means the Newton-Raphson algorithm, and 
   \code{FALSE} means Fisher scoring.
 
+
   }
   \item{zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -70,7 +75,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   in the \code{extra} slot.
 
 
-  Th \pkg{VGAM} family function \code{rayleigh} handles multiple responses.
+  The \pkg{VGAM} family function \code{rayleigh} handles multiple responses.
 
 
 }
@@ -88,22 +93,31 @@ cenrayleigh(lscale = "loge", oim = TRUE)
 
 }
 \references{
-Evans, M., Hastings, N. and Peacock, B. (2000)
+
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
 \author{ T. W. Yee }
 \note{
-A related distribution is the Maxwell distribution.
+  The  \code{\link{poisson.points}} family function is
+  more general so that if \code{ostatistic = 1} and \code{dimension = 2}
+  then it coincides with \code{\link{rayleigh}}.
+  Another related distribution is the Maxwell distribution.
+
+
+
 
 }
 \seealso{
-    \code{\link{Rayleigh}},
-    \code{\link{genrayleigh}},
-    \code{\link{riceff}},
-    \code{\link{maxwell}}.
+  \code{\link{Rayleigh}},
+  \code{\link{genrayleigh}},
+  \code{\link{riceff}},
+  \code{\link{maxwell}},
+  \code{\link{poisson.points}}.
+
 
 }
 \examples{
diff --git a/man/rayleighUC.Rd b/man/rayleighUC.Rd
index 312169a..2ab4882 100644
--- a/man/rayleighUC.Rd
+++ b/man/rayleighUC.Rd
@@ -10,6 +10,7 @@
   generation for the Rayleigh distribution with parameter
   \code{a}.
 
+
 }
 \usage{
 drayleigh(x, scale = 1, log = FALSE)
@@ -41,9 +42,9 @@ rrayleigh(n, scale = 1)
 }
 \references{
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index 5b58355..09ec3b2 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -10,13 +10,15 @@ rcqo(n, p, S, Rank = 1,
      family = c("poisson", "negbinomial", "binomial-poisson",
                 "Binomial-negbinomial", "ordinal-poisson",
                 "Ordinal-negbinomial", "gamma2"),
-     EqualMaxima = FALSE, EqualTolerances = TRUE, ESOptima = FALSE,
-     loabundance = if (EqualMaxima) hiabundance else 10,
-     hiabundance = 100, sdlv = head(1.5/2^(0:3), Rank),
-     sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * ifelse(scalelv, sdlv, 1),
-     sdTolerances = 0.25, Kvector = 1, Shape = 1,
-     sqrt = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
-     seed = NULL, Crow1positive = TRUE, xmat = NULL, scalelv = TRUE)
+     eq.maxima = FALSE, eq.tolerances = TRUE, es.optima = FALSE,
+     lo.abundance = if (eq.maxima) hi.abundance else 10,
+     hi.abundance = 100, sd.latvar = head(1.5/2^(0:3), Rank),
+     sd.optima = ifelse(es.optima, 1.5/Rank, 1) *
+                       ifelse(scale.latvar, sd.latvar, 1),
+     sd.tolerances = 0.25, Kvector = 1, Shape = 1,
+     sqrt.arg = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
+     seed = NULL, optima1.arg = NULL, Crow1positive = TRUE,
+     xmat = NULL, scale.latvar = TRUE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -60,19 +62,21 @@ rcqo(n, p, S, Rank = 1,
 %    else zz.
     
   }
-  \item{EqualMaxima}{
+  \item{eq.maxima}{
     Logical. Does each species have the same maxima?
-    See arguments \code{loabundance} and \code{hiabundance}.
-    
+    See arguments \code{lo.abundance} and \code{hi.abundance}.
+
+
   }
-  \item{EqualTolerances}{
+  \item{eq.tolerances}{
     Logical. Does each species have the
     same tolerance? If \code{TRUE} then the common value is 1 along
     every latent variable, i.e., all species' tolerance matrices are the
     order-\eqn{R} identity matrix.
-    
+
+
   }
-  \item{ESOptima}{
+  \item{es.optima}{
     Logical. Do the species have equally spaced optima?
     If \code{TRUE} then the quantity
     \eqn{S^{1/R}}{S^(1/R)} must be an
@@ -81,23 +85,24 @@ rcqo(n, p, S, Rank = 1,
     of optimum values is possible in \eqn{R}-dimensional
     latent variable space
     in order to place the species' optima.
-    Also see the argument \code{sdTolerances}.
+    Also see the argument \code{sd.tolerances}.
+    
     
   }
-  \item{loabundance, hiabundance}{
+  \item{lo.abundance, hi.abundance}{
     Numeric. These are recycled to a vector of length \eqn{S}.
     The species have a maximum
-    between \code{loabundance} and \code{hiabundance}. That is,
+    between \code{lo.abundance} and \code{hi.abundance}. That is,
     at their optimal environment, the mean abundance of each
-    species is between the two componentwise values. If \code{EqualMaxima}
-    is \code{TRUE} then \code{loabundance} and \code{hiabundance}
+    species is between the two componentwise values. If \code{eq.maxima}
+    is \code{TRUE} then \code{lo.abundance} and \code{hi.abundance}
     must have the same values.
-    If \code{EqualMaxima} is \code{FALSE} then the
+    If \code{eq.maxima} is \code{FALSE} then the
     logarithm of the maxima are uniformly distributed between
-    \code{log(loabundance)} and \code{log(hiabundance)}.
+    \code{log(lo.abundance)} and \code{log(hi.abundance)}.
     
   }
-  \item{sdlv}{
+  \item{sd.latvar}{
     Numeric, of length \eqn{R}
     (recycled if necessary). Site scores along
     each latent variable have these standard deviation values.
@@ -107,29 +112,29 @@ rcqo(n, p, S, Rank = 1,
     axis, etc.
         
   }
-  \item{sdOptima}{
+  \item{sd.optima}{
     Numeric, of length \eqn{R} (recycled if necessary).
-    If \code{ESOptima = FALSE} then,
+    If \code{es.optima = FALSE} then,
     for the \eqn{r}th latent variable axis,
     the optima of the species are generated from a
     normal distribution centered about 0.
-    If \code{ESOptima = TRUE} then the \eqn{S} optima
+    If \code{es.optima = TRUE} then the \eqn{S} optima
     are equally spaced about 0 along every latent variable axis.
-    Regardless of the value of \code{ESOptima}, the optima
-    are then scaled to give standard deviation \code{sdOptima[r]}.
+    Regardless of the value of \code{es.optima}, the optima
+    are then scaled to give standard deviation \code{sd.optima[r]}.
     
   }
-  \item{sdTolerances}{
-    Logical. If \code{EqualTolerances = FALSE} then, for the
+  \item{sd.tolerances}{
+    Logical. If \code{eq.tolerances = FALSE} then, for the
     \eqn{r}th latent variable, the
     species' tolerances are
     chosen from a normal distribution with mean 1 and
     standard deviation
-    \code{sdTolerances[r]}.
+    \code{sd.tolerances[r]}.
     However, the first species \code{y1} has its tolerance matrix
     set equal to the order-\eqn{R} identity matrix.
     All tolerance matrices for all species are diagonal in this function.
-    This argument is ignored if \code{EqualTolerances} is \code{TRUE},
+    This argument is ignored if \code{eq.tolerances} is \code{TRUE},
     otherwise it is recycled to length \eqn{R} if necessary.
 
   }
@@ -151,9 +156,9 @@ rcqo(n, p, S, Rank = 1,
     one, and that \eqn{Var(Y) = \mu^2 / \lambda}{Var(Y) = mu^2 / lambda}.
 
   }
-  \item{sqrt}{
+  \item{sqrt.arg}{
     Logical. Take the square-root of the negative binomial counts?
-    Assigning \code{sqrt = TRUE} when \code{family="negbinomial"} means
+    Assigning \code{sqrt.arg = TRUE} when \code{family="negbinomial"} means
     that the resulting species data can be considered very crudely to be
     approximately Poisson distributed.
     They will not integers in general but much easier (less numerical
@@ -177,7 +182,7 @@ rcqo(n, p, S, Rank = 1,
     Note that each environmental variable is normally distributed
     with mean 0. The standard deviation of each environmental variable
     is chosen so that the site scores have the determined standard
-    deviation, as given by argument \code{sdlv}.
+    deviation, as given by argument \code{sd.latvar}.
     
   }
   \item{breaks}{
@@ -195,6 +200,12 @@ rcqo(n, p, S, Rank = 1,
     \code{\link[base:Random]{.Random.seed}} as \code{"seed"} attribute.
 
   }
+  \item{optima1.arg}{
+    If assigned and \code{Rank = 1} then these are the explicity optima.
+    Recycled to length \code{S}.
+
+
+  }
   \item{Crow1positive}{
     See \code{\link{qrrvglm.control}} for details.
     
@@ -205,9 +216,9 @@ rcqo(n, p, S, Rank = 1,
    environmental matrix can be inputted.
     
   }
-  \item{scalelv}{
+  \item{scale.latvar}{
    Logical. If \code{FALSE} the argument
-   \code{sdlv} is ignored and no scaling of the latent variable
+   \code{sd.latvar} is ignored and no scaling of the latent variable
    values is performed. 
     
   }
@@ -220,11 +231,11 @@ rcqo(n, p, S, Rank = 1,
   The species packing model states that species have equal tolerances,
   equal maxima, and optima which are uniformly distributed over
   the latent variable space. This can be achieved by assigning
-  the arguments \code{ESOptima = TRUE}, \code{EqualMaxima = TRUE},
-  \code{EqualTolerances = TRUE}.
+  the arguments \code{es.optima = TRUE}, \code{eq.maxima = TRUE},
+  \code{eq.tolerances = TRUE}.
 
   At present, the Poisson and negative binomial abundances are
-  generated first using \code{loabundance} and \code{hiabundance},
+  generated first using \code{lo.abundance} and \code{hi.abundance},
   and if \code{family} is binomial or ordinal then it is converted into
   these forms.
 
@@ -259,7 +270,7 @@ rcqo(n, p, S, Rank = 1,
     \code{family}.
     
   }
-  \item{"ccoefficients"}{
+  \item{"concoefficients"}{
     The \eqn{p-1} by \eqn{R} matrix of
     constrained coefficients
     (or canonical coefficients).
@@ -275,15 +286,15 @@ rcqo(n, p, S, Rank = 1,
   \item{"logmaxima"}{
     The \eqn{S}-vector of species' maxima, on a log scale.
     These are uniformly distributed between
-    \code{log(loabundance)} and \code{log(hiabundance)}.
+    \code{log(lo.abundance)} and \code{log(hi.abundance)}.
     
 
   }
-  \item{"lv"}{
+  \item{"latvar"}{
     The \eqn{n} by \eqn{R} matrix of site scores.
     Each successive column (latent variable) has
     sample standard deviation
-    equal to successive values of \code{sdlv}.
+    equal to successive values of \code{sd.latvar}.
     
   }
   \item{"eta"}{
@@ -304,8 +315,8 @@ rcqo(n, p, S, Rank = 1,
   }
   Other attributes are \code{"break"},
   \code{"family"}, \code{"Rank"},
-  \code{"loabundance"}, \code{"hiabundance"},
-  \code{"EqualTolerances"}, \code{"EqualMaxima"},
+  \code{"lo.abundance"}, \code{"hi.abundance"},
+  \code{"eq.tolerances"}, \code{"eq.maxima"},
   \code{"seed"} as used.
 
 }
@@ -357,41 +368,45 @@ A theory of gradient analysis.
 \dontrun{
 # Example 1: Species packing model:
 n <- 100; p <- 5; S <- 5
-mydata <- rcqo(n, p, S, ESOpt = TRUE, EqualMax = TRUE)
+mydata <- rcqo(n, p, S, es.opt = TRUE, eq.max = TRUE)
 names(mydata)
 (myform <- attr(mydata, "formula"))
-fit <- cqo(myform, poissonff, mydata, Bestof = 3) # EqualTol = TRUE 
-matplot(attr(mydata, "lv"), mydata[,-(1:(p-1))], col = 1:S)
+fit <- cqo(myform, poissonff, mydata, Bestof = 3)  # eq.tol = TRUE 
+matplot(attr(mydata, "latvar"), mydata[,-(1:(p-1))], col = 1:S)
 persp(fit, col = 1:S, add = TRUE)
-lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # The same plot as above
+lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)  # The same plot as above
 
 # Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
-attr(mydata, "ccoefficients") # The 'truth'
+concoef(fit)  # The fitted model
+attr(mydata, "concoefficients")  # The 'truth'
 
-c(apply(attr(mydata, "lv"), 2, sd), apply(lv(fit), 2, sd)) # Both values should be approx equal
+c(apply(attr(mydata, "latvar"), 2, sd),
+  apply(latvar(fit), 2, sd))  # Both values should be approx equal
 
 
 # Example 2: negative binomial data fitted using a Poisson model:
 n <- 200; p <- 5; S <- 5
 mydata <- rcqo(n, p, S, fam = "negbin", sqrt = TRUE)
 myform <- attr(mydata, "formula")
-fit <- cqo(myform, fam = poissonff, dat = mydata) # ITol = TRUE,
+fit <- cqo(myform, fam = poissonff, dat = mydata)  # ITol = TRUE,
 lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)
 # Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
-attr(mydata, "ccoefficients") # The 'truth'
+concoef(fit)  # The fitted model
+attr(mydata, "concoefficients")  # The 'truth'
 
 
 # Example 3: gamma2 data fitted using a Gaussian model:
 n <- 200; p <- 5; S <- 3
 mydata <- rcqo(n, p, S, fam = "gamma2", Log = TRUE)
-fit <- cqo(attr(mydata, "formula"), fam = gaussianff, dat = mydata) # ITol = TRUE,
-matplot(attr(mydata, "lv"), exp(mydata[,-(1:(p-1))]), col = 1:S) # 'raw' data
-lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # Fitted model to transformed data
+fit <- cqo(attr(mydata, "formula"),
+           fam = gaussianff, data = mydata)  # ITol = TRUE,
+matplot(attr(mydata, "latvar"),
+        exp(mydata[, -(1:(p-1))]), col = 1:S)  # 'raw' data
+# Fitted model to transformed data:
+lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)
 # Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
-attr(mydata, "ccoefficients") # The 'truth'
+concoef(fit)  # The fitted model
+attr(mydata, "concoefficients")  # The 'truth'
 }
 }
 \keyword{distribution}
diff --git a/man/recexp1.Rd b/man/recexp1.Rd
index c2bc948..fdacdf3 100644
--- a/man/recexp1.Rd
+++ b/man/recexp1.Rd
@@ -59,7 +59,7 @@ recexp1(lrate = "loge", irate = NULL, imethod = 1)
 }
 \examples{
 rawy <- rexp(n <- 10000, rate = exp(1))
-y <- unique(cummax(rawy)) # Keep only the records
+y <- unique(cummax(rawy))  # Keep only the records
 
 length(y) / y[length(y)]   # MLE of rate
 
@@ -71,8 +71,8 @@ Coef(fit)
 \keyword{regression}
 
 %# Keep only the records
-%delete = c(FALSE, rep(TRUE, len=n-1))
-%for(i in 2:length(rawy))
+%delete = c(FALSE, rep(TRUE, len = n-1))
+%for (i in 2:length(rawy))
 %    if (rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
 %(y = rawy[!delete])
 
diff --git a/man/reciprocal.Rd b/man/reciprocal.Rd
index 9f925c7..060e377 100644
--- a/man/reciprocal.Rd
+++ b/man/reciprocal.Rd
@@ -1,18 +1,19 @@
 \name{reciprocal}
 \alias{reciprocal}
-\alias{nreciprocal}
+\alias{negreciprocal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Reciprocal link function }
 \description{
   Computes the reciprocal transformation, including its inverse and the
   first two derivatives.
 
+
 }
 \usage{
- reciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
-            short = TRUE, tag = FALSE)
-nreciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
-            short = TRUE, tag = FALSE)
+   reciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+              short = TRUE, tag = FALSE)
+negreciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+              short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -40,7 +41,7 @@ nreciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
 
 
-  The \code{nreciprocal} link function computes the negative reciprocal,
+  The \code{negreciprocal} link function computes the negative reciprocal,
   i.e., \eqn{-1/ \theta}{-1/theta}.
 
 
@@ -77,19 +78,19 @@ close to 0.
 
 \seealso{ 
     \code{\link{identity}},
-    \code{\link{powl}}.
+    \code{\link{powerlink}}.
 
 
 }
 \examples{
- reciprocal(1:5)
- reciprocal(1:5, inverse = TRUE, deriv = 2)
-nreciprocal(1:5)
-nreciprocal(1:5, inverse = TRUE, deriv = 2)
+   reciprocal(1:5)
+   reciprocal(1:5, inverse = TRUE, deriv = 2)
+negreciprocal(1:5)
+negreciprocal(1:5, inverse = TRUE, deriv = 2)
 
 x <- (-3):3
-reciprocal(x) # Has Inf
-reciprocal(x, bvalue = .Machine$double.eps) # Has no Inf
+reciprocal(x)  # Has Inf
+reciprocal(x, bvalue = .Machine$double.eps)  # Has no Inf
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/recnormal1.Rd b/man/recnormal.Rd
similarity index 88%
rename from man/recnormal1.Rd
rename to man/recnormal.Rd
index b38d578..82c02dd 100644
--- a/man/recnormal1.Rd
+++ b/man/recnormal.Rd
@@ -1,5 +1,5 @@
-\name{recnormal1}
-\alias{recnormal1}
+\name{recnormal}
+\alias{recnormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Upper Record Values from a Univariate Normal Distribution }
 \description{
@@ -9,8 +9,8 @@
 
 }
 \usage{
-recnormal1(lmean = "identity", lsd = "loge",
-           imean = NULL, isd = NULL, imethod = 1, zero = NULL)
+recnormal(lmean = "identity", lsd = "loge",
+          imean = NULL, isd = NULL, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -79,8 +79,8 @@ recnormal1(lmean = "identity", lsd = "loge",
 }
 
 \seealso{
-    \code{\link{normal1}},
-    \code{\link{dcennormal1}}.
+    \code{\link{uninormal}},
+    \code{\link{double.cennormal}}.
 
 
 }
@@ -91,7 +91,7 @@ Rdata <- data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3))))
 # Keep only observations that are records:
 rdata <- data.frame(y = unique(cummax(with(Rdata, rawy))))
 
-fit <- vglm(y ~ 1, recnormal1, rdata, trace = TRUE, maxit = 200)
+fit <- vglm(y ~ 1, recnormal, rdata, trace = TRUE, maxit = 200)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
@@ -101,7 +101,7 @@ summary(fit)
 
 %# Keep only observations that are records
 %delete = c(FALSE, rep(TRUE, len = n))
-%for(i in 2:length(rawy))
-%    if (rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
+%for (i in 2:length(rawy))
+%  if (rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
 %(y = rawy[!delete])
 
diff --git a/man/rhobit.Rd b/man/rhobit.Rd
index ea68df7..6ad52e1 100644
--- a/man/rhobit.Rd
+++ b/man/rhobit.Rd
@@ -102,9 +102,9 @@ abline(v = 0, h = 0, lty = 2)
 }
 
 x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01))
-rhobit(x) # Has NAs
+rhobit(x)  # Has NAs
 rhobit(x, bminvalue = -1 + .Machine$double.eps,
-          bmaxvalue =  1 - .Machine$double.eps) # Has no NAs
+          bmaxvalue =  1 - .Machine$double.eps)  # Has no NAs
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index 0af13b6..a38d6eb 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -12,12 +12,15 @@
   Rician distribution.
 
 }
+
 \usage{
 drice(x, vee, sigma, log = FALSE)
-%price(q, vee, sigma)
-%qrice(p, vee, sigma)
 rrice(n, vee, sigma)
 }
+%price(q, vee, sigma)
+%qrice(p, vee, sigma)
+
+
 \arguments{
   \item{x}{vector of quantiles.}
 % \item{p}{vector of probabilities.}
@@ -38,6 +41,7 @@ rrice(n, vee, sigma)
 % \code{qrice} gives the quantile function, and
   \code{rrice} generates random deviates.
 
+
 }
 \author{ T. W. Yee }
 \details{
@@ -45,6 +49,7 @@ rrice(n, vee, sigma)
   for estimating the two parameters,
   for the formula of the probability density function and other details.
 
+
 }
 %\section{Warning }{
 %
@@ -52,14 +57,15 @@ rrice(n, vee, sigma)
 \seealso{
   \code{\link{riceff}}.
 
+
 }
 \examples{
-\dontrun{
-x <- seq(0.01, 7, len = 201)
+\dontrun{ x <- seq(0.01, 7, len = 201)
 plot(x, drice(x, vee = 0, sigma = 1), type = "n", las = 1,, ylab = "",
      main = "Density of Rice distribution for various values of v")
-sigma <- 1; vee <- c(0,0.5,1,2,4)
-for(ii in 1:length(vee)) lines(x, drice(x, vee[ii], sigma), col = ii)
+sigma <- 1; vee <- c(0, 0.5, 1, 2, 4)
+for (ii in 1:length(vee))
+  lines(x, drice(x, vee[ii], sigma), col = ii)
 legend(x = 5, y = 0.6, legend = as.character(vee),
        col = 1:length(vee), lty = 1)
 }
diff --git a/man/riceff.Rd b/man/riceff.Rd
index 822bb9d..59df95b 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -6,6 +6,7 @@
   Estimates the two parameters of a Rice distribution
   by maximum likelihood estimation.
 
+
 }
 \usage{
 riceff(lvee = "loge", lsigma = "loge",
@@ -17,6 +18,7 @@ riceff(lvee = "loge", lsigma = "loge",
   Link functions for the \eqn{v} and \eqn{\sigma}{sigma} parameters.
   See \code{\link{Links}} for more choices and for general information.
 
+
   }
   \item{ivee, isigma}{
   Optional initial values for the parameters.
@@ -24,10 +26,12 @@ riceff(lvee = "loge", lsigma = "loge",
   If convergence failure occurs (this \pkg{VGAM} family function seems
   to require good initial values) try using these arguments.
 
+
   }
   \item{nsimEIM, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
@@ -50,6 +54,7 @@ riceff(lvee = "loge", lsigma = "loge",
   \eqn{z=-v^2/(2 \sigma^2)}{z=-v^2/(2*sigma^2)}.
   Simulated Fisher scoring is implemented.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -65,6 +70,7 @@ Mathematical Analysis of Random Noise.
 \emph{Bell System Technical Journal},
 \bold{24}, 46--156.
 
+
 }
 \author{ T. W. Yee }
 \note{
@@ -86,10 +92,10 @@ Mathematical Analysis of Random Noise.
 
 }
 \examples{
-\dontrun{ vee <- exp(2); sigma <- exp(1);
-y <- rrice(n <- 1000, vee, sigma)
-fit <- vglm(y ~ 1, riceff, trace = TRUE, crit = "c")
-c(mean(y), fitted(fit)[1])
+\dontrun{ vee <- exp(2); sigma <- exp(1)
+rdata <- data.frame(y = rrice(n <- 1000, vee, sigma))
+fit <- vglm(y ~ 1, riceff, data = rdata, trace = TRUE, crit = "coef")
+c(with(rdata, mean(y)), fitted(fit)[1])
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/rig.Rd b/man/rigff.Rd
similarity index 78%
rename from man/rig.Rd
rename to man/rigff.Rd
index 542b66e..31439b0 100644
--- a/man/rig.Rd
+++ b/man/rigff.Rd
@@ -1,5 +1,5 @@
-\name{rig}
-\alias{rig}
+\name{rigff}
+\alias{rigff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Reciprocal Inverse Gaussian distribution }
 \description{
@@ -8,7 +8,7 @@
 
 }
 \usage{
-rig(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
+rigff(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,16 +16,19 @@ rig(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
   Link functions  for \code{mu} and \code{lambda}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu, ilambda}{
   Initial values for \code{mu} and \code{lambda}.
   A \code{NULL} means a value is computed internally.
 
+
   }
 }
 \details{
   See Jorgensen (1997) for details.
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -35,24 +38,28 @@ rig(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
 
 }
 \references{
+
 Jorgensen, B. (1997)
 \emph{The Theory of Dispersion Models}.
 London: Chapman & Hall
 
+
 }
 \author{ T. W. Yee }
 \note{ 
   This distribution is potentially useful for dispersion modelling.
 
+
 }
 \seealso{
   \code{\link{simplex}}.
 
+
 }
 \examples{
-rdata <- data.frame(y = rchisq(n = 100, df = 14)) # Not 'proper' data!!
-fit <- vglm(y ~ 1, rig, rdata, trace = TRUE)
-fit <- vglm(y ~ 1, rig, rdata, trace = TRUE, eps = 1e-9, crit = "coef")
+rdata <- data.frame(y = rchisq(n = 100, df = 14))  # Not 'proper' data!!
+fit <- vglm(y ~ 1, rigff, rdata, trace = TRUE)
+fit <- vglm(y ~ 1, rigff, rdata, trace = TRUE, eps = 1e-9, crit = "coef")
 summary(fit)
 }
 \keyword{models}
diff --git a/man/rlplot.egev.Rd b/man/rlplot.egev.Rd
index 6bc976b..e73640a 100644
--- a/man/rlplot.egev.Rd
+++ b/man/rlplot.egev.Rd
@@ -8,7 +8,7 @@
 
 }
 \usage{
-rlplot.egev(object, plot.it = TRUE,
+rlplot.egev(object, show.plot = TRUE,
     probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
     add.arg = FALSE, xlab = "Return Period", ylab = "Return Level",
     main = "Return Level Plot",
@@ -26,7 +26,7 @@ rlplot.egev(object, plot.it = TRUE,
     \code{"gev"} or \code{"egev"}.
 
   }
-  \item{plot.it}{
+  \item{show.plot}{
   Logical. Plot it? If \code{FALSE} no plot will be done.
 
   }
@@ -147,8 +147,8 @@ par(mfrow = c(1, 2))
 rlplot(fit) -> i1
 rlplot(fit2, pcol = "darkorange", lcol = "blue", log = FALSE,
        scol = "darkgreen", slty = "dashed", las = 1) -> i2
-range(i2 at post$rlplot$upper - i1 at post$rlplot$upper) # Should be near 0
-range(i2 at post$rlplot$lower - i1 at post$rlplot$lower) # Should be near 0
+range(i2 at post$rlplot$upper - i1 at post$rlplot$upper)  # Should be near 0
+range(i2 at post$rlplot$lower - i1 at post$rlplot$lower)  # Should be near 0
 }
 }
 \keyword{graphs}
diff --git a/man/rrar.Rd b/man/rrar.Rd
index ff65b84..83ae723 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -90,24 +90,24 @@ time series.
 \dontrun{
 year <- seq(1961 + 1/12, 1972 + 10/12, by = 1/12)
 par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
-for(ii in 1:4) {
+for (ii in 1:4) {
   plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1,
        type = "l", xlab = "", ylab = "", col = "blue")
   points(year, grain.us[, ii], pch = "*", col = "blue")
 }
-apply(grain.us, 2, mean) # mu vector
-cgrain <- scale(grain.us, scale = FALSE) # Center the time series only
+apply(grain.us, 2, mean)  # mu vector
+cgrain <- scale(grain.us, scale = FALSE)  # Center the time series only
 fit <- vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE)
 summary(fit)
 
-print(fit at misc$Ak1, dig = 2)
-print(fit at misc$Cmatrices, dig = 3)
-print(fit at misc$Dmatrices, dig = 3)
-print(fit at misc$omegahat, dig = 3)
-print(fit at misc$Phimatrices, dig = 2)
+print(fit at misc$Ak1, digits = 2)
+print(fit at misc$Cmatrices, digits = 3)
+print(fit at misc$Dmatrices, digits = 3)
+print(fit at misc$omegahat, digits = 3)
+print(fit at misc$Phimatrices, digits = 2)
 
 par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1))
-for(ii in 1:4) {
+for (ii in 1:4) {
   plot(year, fit at misc$Z[, ii], main = paste("Z", ii, sep = ""),
        type = "l", xlab = "", ylab = "", las = 1, col = "blue")
   points(year, fit at misc$Z[, ii], pch = "*", col = "blue")
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index 987d567..64121a8 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -147,7 +147,7 @@ Objects can be created by calls to \code{\link{rrvglm}}.
   from class \code{ "vlm"}.
   The \emph{working} residuals at the final IRLS iteration.
   }
-  \item{\code{rss}:}{
+  \item{\code{res.ss}:}{
   Object of class \code{"numeric"},
   from class \code{ "vlm"}.
   Residual sum of squares at the final IRLS iteration with
@@ -260,7 +260,7 @@ Vector generalized additive models.
 \examples{
 \dontrun{ # Rank-1 stereotype model of Anderson (1984)
 pneumo <- transform(pneumo, let = log(exposure.time),
-                            x3  = runif(nrow(pneumo))) # x3 is unrelated
+                            x3  = runif(nrow(pneumo)))  # x3 is unrelated
 fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
               multinomial, pneumo, Rank = 1)
 Coef(fit)
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 9482aa3..4ee419a 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -145,7 +145,7 @@ Regression and ordered categorical variables.
 \bold{46}, 1--30.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -203,10 +203,8 @@ Regression and ordered categorical variables.
 
 \seealso{
     \code{\link{rrvglm.control}},
-%   \code{\link{qrrvglm.control}},
     \code{\link{lvplot.rrvglm}}
     (same as \code{\link{biplot.rrvglm}}),
-%   \code{\link{vcovqrrvglm}},
     \code{\link{rrvglm-class}},
     \code{\link{grc}},
     \code{\link{cqo}},
@@ -219,7 +217,7 @@ Regression and ordered categorical variables.
     \code{\link{negbinomial}}
     \code{\link{zipoisson}}
     and \code{\link{zinegbinomial}}.
-    (see Yee (2012) and \pkg{COZIGAM}).
+    (see Yee (2014) and \pkg{COZIGAM}).
     Methods functions include
     \code{\link{Coef.rrvglm}},
     \code{summary.rrvglm},
@@ -227,6 +225,13 @@ Regression and ordered categorical variables.
     Data include
     \code{\link{crashi}}.
 
+
+%   \code{\link{qrrvglm.control}},
+%   \code{\link{vcovqrrvglm}},
+
+
+
+
 }
 
 \examples{
@@ -248,21 +253,21 @@ beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
 beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "log(size)"]
 (delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat))
 (delta2.hat <- 2 - a21.hat)
-# exp(a21.hat * predict(rrnb2)[1,1] - predict(rrnb2)[1,2]) # delta1.hat
+# exp(a21.hat * predict(rrnb2)[1,1] - predict(rrnb2)[1,2])  # delta1.hat
 summary(rrnb2)
 
 # Obtain a 95 percent confidence interval for delta2:
-se.a21.hat <- sqrt(vcov(rrnb2)["I(lv.mat)", "I(lv.mat)"])
+se.a21.hat <- sqrt(vcov(rrnb2)["I(latvar.mat)", "I(latvar.mat)"])
 ci.a21 <- a21.hat +  c(-1, 1) * 1.96 * se.a21.hat
 (ci.delta2 <- 2 - rev(ci.a21))  # The 95 percent confidence interval
 
 Confint.rrnb(rrnb2)  # Quick way to get it
 
 # Plot the abundances and fitted values against the latent variable
-plot(y2 ~ lv(rrnb2), data = mydata, col = "blue",
+plot(y2 ~ latvar(rrnb2), data = mydata, col = "blue",
      xlab = "Latent variable", las = 1) 
-ooo <- order(lv(rrnb2))
-lines(fitted(rrnb2)[ooo] ~ lv(rrnb2)[ooo], col = "red")
+ooo <- order(latvar(rrnb2))
+lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "red")
 
 # Example 2: stereotype model (reduced-rank multinomial logit model)
 data(car.all)
@@ -283,7 +288,7 @@ fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
 fit at misc$deviance  # A history of the fits
 Coef(fit)
 biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2,
-       ccol = "blue", scol = "red", Ccol = "darkgreen", Clwd = 2,
+       ccol = "blue", scol = "orange", Ccol = "darkgreen", Clwd = 2,
        main = "1=Germany, 2=Japan, 3=Korea, 4=USA")
 }
 }
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index b60344d..9c3bcc3 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -9,15 +9,18 @@
 }
 \usage{
 rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
-    Corner = TRUE, Uncorrelated.lv = FALSE, Wmat = NULL, Svd.arg = FALSE, 
-    Index.corner = if (length(szero)) 
-    head((1:1000)[-szero], Rank) else 1:Rank,
+    Corner = TRUE, Uncorrelated.latvar = FALSE,
+    Wmat = NULL, Svd.arg = FALSE,
+    Index.corner = if (length(str0)) 
+    head((1:1000)[-str0], Rank) else 1:Rank,
     Ainit = NULL, Alpha = 0.5, Bestof = 1, Cinit = NULL,
     Etamat.colmax = 10,
-    SD.Ainit = 0.02, SD.Cinit = 0.02, szero = NULL,
+    sd.Ainit = 0.02, sd.Cinit = 0.02, str0 = NULL,
     noRRR = ~1, Norrr = NA,
+    noWarning = FALSE,
     trace = FALSE, Use.Init.Poisson.QO = FALSE, 
-    checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75, ...)
+    checkwz = TRUE, Check.rank = TRUE,
+    wzepsilon = .Machine$double.eps^0.75, ...)
 }
 %- maybe also `usage' for other objects documented here.
 \arguments{
@@ -46,7 +49,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
 
 
   }
-\item{Uncorrelated.lv}{
+\item{Uncorrelated.latvar}{
   Logical indicating whether uncorrelated latent variables are to be used.
   This is normalization forces the variance-covariance
   matrix of the latent variables to be \code{diag(Rank)}, i.e., unit
@@ -114,17 +117,18 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
 %   RR-VGLM is to be fitted. If \code{TRUE}, an object of class
 %   \code{"qrrvglm"} will be returned, otherwise \code{"rrvglm"}.
 % }
-  \item{szero}{
+  \item{str0}{
   Integer vector specifying which rows
   of the estimated constraint matrices (\bold{A}) are
   to be all zeros.
   These are called \emph{structural zeros}.
   Must not have any common value with \code{Index.corner}, and
   be a subset of the vector \code{1:M}.
+  The default, \code{str0 = NULL}, means no structural zero rows at all.
 
 
   }
-  \item{SD.Ainit, SD.Cinit}{
+  \item{sd.Ainit, sd.Cinit}{
       Standard deviation of the initial values for the elements
       of \bold{A} and \bold{C}.
       These are normally distributed with mean zero.  
@@ -180,6 +184,12 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
     any values less than \code{wzepsilon} are replaced with this value.
 
+
+  }
+  \item{noWarning, Check.rank}{
+    Same as \code{\link{vglm.control}}.
+
+
   }
   \item{wzepsilon}{
   Small positive number used to test whether the diagonals of the working
@@ -190,6 +200,8 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     Variables in \dots are passed into
     \code{\link{vglm.control}}. If the derivative algorithm is used, then
     \dots are also passed into \code{\link{rrvglm.optim.control}}.
+
+
   }
   In the above, \eqn{R} is the \code{Rank} and 
   \eqn{M} is the number of linear predictors.
@@ -248,13 +260,14 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
   \code{\link{vglm.control}},
   \code{\link{cqo}}.
 
+
 }
 
 \examples{
 \dontrun{
 set.seed(111)
 pneumo <- transform(pneumo, let = log(exposure.time),
-                            x3 = runif(nrow(pneumo))) # x3 is random noise
+                            x3 = runif(nrow(pneumo)))  # x3 is random noise
 fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
               multinomial, pneumo, Rank = 1, Index.corner = 2)
 constraints(fit)
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
index 0b629d8..e3f5c40 100644
--- a/man/rrvglm.optim.control.Rd
+++ b/man/rrvglm.optim.control.Rd
@@ -24,18 +24,24 @@ rrvglm.optim.control(Fnscale = 1, Maxit = 100,
   \item{Abstol}{ Passed into \code{optim} as \code{abstol}. }
   \item{Reltol}{ Passed into \code{optim} as \code{reltol}. }
   \item{\dots}{ Ignored. }
+
 }
 \details{
 See \code{\link[stats]{optim}} for more details. 
+
+
 }
 \value{
   A list with components equal to the arguments.
+
+
 }
 %\references{ ~put references to the literature/web site here ~ }
 \author{ Thomas W. Yee }
 \note{
 The transition between optimization methods may be unstable, so users
 may have to vary the value of \code{Switch.optimizer}.
+
  
 Practical experience with \code{Switch.optimizer} shows that setting
 it to too large a value may lead to a local solution, whereas setting
@@ -44,12 +50,14 @@ if BFGS kicks in too late when the Nelder-Mead algorithm is starting to
 converge to a local solution, then switching to BFGS will not be sufficient
 to bypass convergence to that local solution.
 
+
 }
 
 \seealso{
   \code{\link{rrvglm.control}},
   \code{\link[stats]{optim}}.
 
+
 }
 %\examples{
 %}
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 99760a4..771f8b3 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -10,23 +10,27 @@
 \usage{
 seq2binomial(lprob1 = "logit", lprob2 = "logit",
              iprob1 = NULL,    iprob2 = NULL,
-             parallel = FALSE, apply.parint = TRUE, zero = NULL)
+             parallel = FALSE, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
+%  apply.parint = TRUE,
 \arguments{
   \item{lprob1, lprob2}{ 
   Parameter link functions applied to the two probabilities,
   called \eqn{p} and \eqn{q} below.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{iprob1, iprob2}{ 
   Optional initial value for the first and second probabilities respectively.
   A \code{NULL} means a value is obtained in the \code{initialize} slot.
 
+
   }
-  \item{parallel, apply.parint, zero}{
+  \item{parallel, zero}{
   Details at \code{\link{Links}}.
+  If \code{parallel = TRUE} then the constraint also applies to the intercept.
 
 
   }
@@ -101,17 +105,22 @@ sdata <- data.frame(mvector = round(rnorm(nn <- 100, m = 10, sd = 2)),
                     x2 = runif(nn))
 sdata <- transform(sdata, prob1 = logit(+2 - x2, inverse = TRUE),
                           prob2 = logit(-2 + x2, inverse = TRUE))
-sdata <- transform(sdata, successes1 = rbinom(nn, size = mvector, prob = prob1))
+sdata <- transform(sdata, successes1 = rbinom(nn, size = mvector,    prob = prob1))
 sdata <- transform(sdata, successes2 = rbinom(nn, size = successes1, prob = prob2))
 sdata <- transform(sdata, y1 = successes1 / mvector)
 sdata <- transform(sdata, y2 = successes2 / successes1)
-fit <- vglm(cbind(y1, y2) ~ x2, seq2binomial,  weight = mvector,
+fit <- vglm(cbind(y1, y2) ~ x2, seq2binomial, weight = mvector,
             data = sdata, trace = TRUE)
 coef(fit)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 head(depvar(fit))
 head(weights(fit, type = "prior"))  # Same as with(sdata, mvector)
+# Number of first successes:
+head(depvar(fit)[, 1] * c(weights(fit, type = "prior")))
+# Number of second successes:
+head(depvar(fit)[, 2] * c(weights(fit, type = "prior")) * 
+                          depvar(fit)[, 1])
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/simplex.Rd b/man/simplex.Rd
index 8f9df48..8ff504c 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -20,15 +20,18 @@ simplex(lmu = "logit", lsigma = "loge",
   Link function for \code{mu} and \code{sigma}. 
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imu, isigma}{
   Optional initial values for \code{mu} and \code{sigma}.
   A \code{NULL} means a value is obtained internally.
 
+
   }
   \item{imethod, shrinkage.init, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
 }
 \details{
diff --git a/man/simplexUC.Rd b/man/simplexUC.Rd
index a517042..4557206 100644
--- a/man/simplexUC.Rd
+++ b/man/simplexUC.Rd
@@ -1,7 +1,7 @@
 \name{Simplex }
 \alias{dsimplex}
-\alias{psimplex}
-\alias{qsimplex}
+%\alias{psimplex}
+%\alias{qsimplex}
 \alias{rsimplex}
 \title{ Simplex Distribution }
 \description{
@@ -19,12 +19,19 @@ rsimplex(n, mu = 0.5, dispersion = 1)
   Vector of quantiles.
   The support of the distribution is the interval \eqn{(0,1)}.
 
+
   }
-  \item{mu, dispersion}{Mean and dispersion parameters.
+  \item{mu, dispersion}{
+  Mean and dispersion parameters.
   The former lies in the interval \eqn{(0,1)} and the latter is positive.
 
+
+  }
+  \item{n, log}{
+  Same usage as \code{\link[stats:Uniform]{runif}}.
+
+
   }
-  \item{n, log}{Same usage as \code{\link[stats:Uniform]{runif}}.}
 }
 \details{
 
@@ -40,6 +47,7 @@ rsimplex(n, mu = 0.5, dispersion = 1)
   \code{dsimplex(x)} gives the density function,
   \code{rsimplex(n)} gives \eqn{n} random variates.
 
+
 }
 % \references{ 
 %
@@ -53,11 +61,11 @@ rsimplex(n, mu = 0.5, dispersion = 1)
 }
 
 \examples{
-sigma <- c(4, 2, 1) # Dispersion parameter
+sigma <- c(4, 2, 1)  # Dispersion parameter
 mymu  <- c(0.1, 0.5, 0.7); xxx <- seq(0, 1, len = 501)
-\dontrun{ par(mfrow = c(3, 3)) # Figure 2.1 of Song (2007)
-for(iii in 1:3)
-  for(jjj in 1:3) {
+\dontrun{ par(mfrow = c(3, 3))  # Figure 2.1 of Song (2007)
+for (iii in 1:3)
+  for (jjj in 1:3) {
     plot(xxx, dsimplex(xxx, mymu[jjj], sigma[iii]),
          type = "l", col = "blue", xlab = "", ylab = "", main =
          paste("mu = ", mymu[jjj], ", sigma = ", sigma[iii], sep = "")) } }
diff --git a/man/skellam.Rd b/man/skellam.Rd
index 7e37b2d..20b2829 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -17,6 +17,7 @@ skellam(lmu1 = "loge", lmu2 = "loge", imu1 = NULL, imu2 = NULL,
   Link functions for the \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} parameters.
   See \code{\link{Links}} for more choices and for general information.
 
+
   }
   \item{imu1, imu2}{
   Optional initial values for the parameters.
@@ -24,25 +25,28 @@ skellam(lmu1 = "loge", lmu2 = "loge", imu1 = NULL, imu2 = NULL,
   If convergence failure occurs (this \pkg{VGAM} family function seems
   to require good initial values) try using these arguments.
 
+
   }
   \item{nsimEIM, parallel, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
   In particular, setting \code{parallel=TRUE} will constrain the
   two means to be equal.
 
+
   }
 }
 \details{
   The Skellam distribution models the difference between two
-  independent Poisson distributions.
+  independent Poisson distributions
+  (with means \eqn{\mu_{j}}{mu_j}, say).
   It has density function
   \deqn{f(y;\mu_1,\mu_2) =
   \left( \frac{ \mu_1 }{\mu_2} \right)^{y/2} \,
-  \exp(-\mu_1-\mu_2 ) \, I_y( 2 \sqrt{ \mu_1 \mu_2})
+  \exp(-\mu_1-\mu_2 ) \, I_{|y|}( 2 \sqrt{ \mu_1 \mu_2})
   }{%
 f(y;mu1,mu2) =
   ( \mu1 / mu_2 )^(y/2) *
-  exp(-mu1-mu2 ) * I_y( 2 * sqrt(mu1*mu2)) 
+  exp(-mu1-mu2 ) * I_(|y|)( 2 * sqrt(mu1*mu2)) 
   }
   where \eqn{y} is an integer,
   \eqn{\mu_1 > 0}{mu1 > 0},
@@ -51,7 +55,8 @@ f(y;mu1,mu2) =
   first kind with order \eqn{v}.
 
 
-  The mean is \eqn{\mu_1 - \mu_2}{mu1 - mu2} (returned as the fitted values)
+  The mean is \eqn{\mu_1 - \mu_2}{mu1 - mu2}
+  (returned as the fitted values),
   and the variance is \eqn{\mu_1 + \mu_2}{mu1 + mu2}.
   Simulated Fisher scoring is implemented.
 
@@ -73,6 +78,7 @@ f(y;mu1,mu2) =
 }
 
 \references{
+
 Skellam, J. G. (1946)
 The frequency distribution of the difference between 
 two Poisson variates belonging to different populations.
@@ -99,9 +105,9 @@ two Poisson variates belonging to different populations.
 \examples{
 \dontrun{
 sdata <- data.frame(x2 = runif(nn <- 1000))
-sdata <- transform(sdata, mu1 = exp(1+x2), mu2 = exp(1+x2))
+sdata <- transform(sdata, mu1 = exp(1 + x2), mu2 = exp(1 + x2))
 sdata <- transform(sdata, y = rskellam(nn, mu1, mu2))
-fit1 <- vglm(y ~ x2, skellam, sdata, trace = TRUE, crit = "c")
+fit1 <- vglm(y ~ x2, skellam, sdata, trace = TRUE, crit = "coef")
 fit2 <- vglm(y ~ x2, skellam(parallel = TRUE), sdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 coef(fit2, matrix = TRUE)
@@ -109,6 +115,7 @@ summary(fit1)
 # Likelihood ratio test for equal means:
 pchisq(2 * (logLik(fit1) - logLik(fit2)),
        df = fit2 at df.residual - fit1 at df.residual, lower.tail = FALSE)
+lrtest(fit1, fit2)  # Alternative
 }
 }
 \keyword{models}
diff --git a/man/skellamUC.Rd b/man/skellamUC.Rd
index b5e73db..8030416 100644
--- a/man/skellamUC.Rd
+++ b/man/skellamUC.Rd
@@ -14,17 +14,28 @@
 }
 \usage{
 dskellam(x, mu1, mu2, log = FALSE)
-%pskellam(q, mu1, mu2)
-%qskellam(p, mu1, mu2)
 rskellam(n, mu1, mu2)
 }
+%pskellam(q, mu1, mu2)
+%qskellam(p, mu1, mu2)
 \arguments{
   \item{x}{vector of quantiles.}
 % \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-    Must be a positive integer of length 1.}
-  \item{mu1, mu2}{ See \code{\link{skellam}}}.
-  \item{log}{ Logical; if TRUE, the logarithm is returned. }
+  Same as \code{\link[stats:Uniform]{runif}}.
+
+
+  }
+  \item{mu1, mu2}{
+  See \code{\link{skellam}}
+
+
+  }.
+  \item{log}{
+  Logical; if TRUE, the logarithm is returned.
+
+
+  }
 }
 \value{
   \code{dskellam} gives the density, and
@@ -32,6 +43,7 @@ rskellam(n, mu1, mu2)
 % \code{qskellam} gives the quantile function, and
   \code{rskellam} generates random deviates.
 
+
 }
 %\author{ T. W. Yee }
 \details{
@@ -58,7 +70,7 @@ rskellam(n, mu1, mu2)
 \dontrun{ mu1 <- 1; mu2 <- 2; x <- (-7):7
 plot(x, dskellam(x, mu1, mu2), type = "h", las = 1, col = "blue",
      main = paste("Density of Skellam distribution with mu1 = ", mu1,
-                " and mu2 = ", mu2, sep = "")) }
+                  " and mu2 = ", mu2, sep = "")) }
 }
 \keyword{distribution}
 
diff --git a/man/snormUC.Rd b/man/skewnormUC.Rd
similarity index 63%
rename from man/snormUC.Rd
rename to man/skewnormUC.Rd
index 0b8e5fb..88f9fd0 100644
--- a/man/snormUC.Rd
+++ b/man/skewnormUC.Rd
@@ -1,24 +1,25 @@
-\name{snorm}
-\alias{snorm}
-\alias{dsnorm}
-%\alias{psnorm}
-%\alias{qsnorm}
-\alias{rsnorm}
+\name{skewnorm}
+\alias{skewnorm}
+\alias{dskewnorm}
+%\alias{pskewnorm}
+%\alias{qskewnorm}
+\alias{rskewnorm}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Skew-Normal Distribution }
 \description{
   Density and 
 % , distribution function, quantile function and
   random generation
-  for the skew-normal distribution.
+  for the univariate skew-normal distribution.
+
 
 }
 \usage{
-dsnorm(x, location = 0, scale = 1, shape = 0, log = FALSE)
-%psnorm(q, lambda)
-%qsnorm(p, lambda)
-rsnorm(n, location = 0, scale = 1, shape = 0)
+dskewnorm(x, location = 0, scale = 1, shape = 0, log = FALSE)
+rskewnorm(n, location = 0, scale = 1, shape = 0)
 }
+%pskewnorm(q, lambda)
+%qskewnorm(p, lambda)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{x}{vector of quantiles.}
@@ -32,43 +33,50 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
   \item{location}{
   The location parameter \eqn{\xi}{xi}. A vector. 
 
+
   }
   \item{scale}{
   The scale parameter \eqn{\omega}{w}. A positive vector.
 
+
   }
   \item{shape}{
   The shape parameter. It is called \eqn{\alpha}{alpha} in
-  \code{\link{skewnormal1}}.
+  \code{\link{skewnormal}}.
+
 
   }
   \item{log}{
   Logical.
   If \code{log=TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
 \details{
-  See \code{\link{skewnormal1}}, which currently only estimates the shape
+  See \code{\link{skewnormal}}, which currently only estimates the shape
   parameter.
   More generally here, \eqn{Z = \xi + \omega Y}{Z = xi + w * Y} where
   \eqn{Y} has a standard skew-normal distribution
-  (see \code{\link{skewnormal1}}),
+  (see \code{\link{skewnormal}}),
   \eqn{\xi}{xi} is the location parameter and
   \eqn{\omega}{w} is the scale parameter.
 
+
 }
 \value{
-  \code{dsnorm} gives the density,
-% \code{psnorm} gives the distribution function,
-% \code{qsnorm} gives the quantile function, and
-  \code{rsnorm} generates random deviates.
+  \code{dskewnorm} gives the density,
+% \code{pskewnorm} gives the distribution function,
+% \code{qskewnorm} gives the quantile function, and
+  \code{rskewnorm} generates random deviates.
+
 
 }
 \references{ 
     \url{http://tango.stat.unipd.it/SN}.
 
+
 }
 
 \author{ T. W. Yee }
@@ -76,22 +84,22 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
   The default values of all three parameters corresponds to the
   skew-normal being the standard normal distribution.
 
+
 }
 
 \seealso{ 
-  \code{\link{skewnormal1}}.
+  \code{\link{skewnormal}}.
 
 
 }
 \examples{
-\dontrun{ N <- 200 # grid resolution
-shape <- 7
-x <- seq(-4, 4, len = N)
-plot(x, dsnorm(x, shape = shape), type = "l", col = "blue", las = 1,
+\dontrun{ N <- 200  # Grid resolution
+shape <- 7; x <- seq(-4, 4, len = N)
+plot(x, dskewnorm(x, shape = shape), type = "l", col = "blue", las = 1,
      ylab = "", lty = 1, lwd = 2)
 abline(v = 0, h = 0, col = "grey")
 lines(x, dnorm(x), col = "orange", lty = 2, lwd = 2)
-legend("topleft", leg = c(paste("Blue = dsnorm(x, ", shape,")", sep = ""),
+legend("topleft", leg = c(paste("Blue = dskewnorm(x, ", shape,")", sep = ""),
        "Orange = standard normal density"), lty = 1:2, lwd = 2,
        col = c("blue", "orange")) }
 }
diff --git a/man/skewnormal1.Rd b/man/skewnormal.Rd
similarity index 69%
rename from man/skewnormal1.Rd
rename to man/skewnormal.Rd
index de92d9a..a3fcb02 100644
--- a/man/skewnormal1.Rd
+++ b/man/skewnormal.Rd
@@ -1,14 +1,15 @@
-\name{skewnormal1}
-\alias{skewnormal1}
+\name{skewnormal}
+\alias{skewnormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Univariate Skew-Normal Distribution Family Function }
 \description{
   Maximum likelihood estimation of the shape parameter of a univariate
   skew-normal distribution.
 
+
 }
 \usage{
-skewnormal1(lshape = "identity", ishape = NULL, nsimEIM = NULL)
+skewnormal(lshape = "identity", ishape = NULL, nsimEIM = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,6 +17,7 @@ skewnormal1(lshape = "identity", ishape = NULL, nsimEIM = NULL)
   See \code{\link{Links}} and
   \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -55,17 +57,16 @@ skewnormal1(lshape = "identity", ishape = NULL, nsimEIM = NULL)
 }
 \references{
 
-Azzalini, A. A. (1985)
-A class of distributions which include the normal.
-\emph{Scandinavian Journal of Statistics},
-\bold{12}, 171--178.
+  Azzalini, A. A. (1985)
+  A class of distributions which include the normal.
+  \emph{Scandinavian Journal of Statistics},
+  \bold{12}, 171--178.
 
 
-Azzalini, A. and Capitanio, A. (1999)
-Statistical applications of the multivariate skew-normal
-distribution.
-\emph{Journal of the Royal Statistical Society, Series B, Methodological},
-\bold{61}, 579--602.
+  Azzalini, A. and Capitanio, A. (1999)
+  Statistical applications of the multivariate skew-normal distribution.
+  \emph{Journal of the Royal Statistical Society, Series B, Methodological},
+  \bold{61}, 579--602.
 
 
 }
@@ -89,27 +90,27 @@ distribution.
 
 }  
 \seealso{
-  \code{\link{snorm}},
-  \code{\link{normal1}},
-  \code{\link{fnormal1}}.
+  \code{\link{skewnorm}},
+  \code{\link{uninormal}},
+  \code{\link{foldnormal}}.
 
 
 }
 
 \examples{
-sdata <- data.frame(y = rsnorm(nn <- 1000, shape = 5))
-fit <- vglm(y ~ 1, skewnormal1, sdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-head(fitted(fit), 1)
-with(sdata, mean(y))
-\dontrun{ with(sdata, hist(y, prob = TRUE))
-x <- with(sdata, seq(min(y), max(y), len = 200))
-with(sdata, lines(x, dsnorm(x, shape = Coef(fit)), col = "blue")) }
+sdata <- data.frame(y1 = rskewnorm(nn <- 1000, shape = 5))
+fit1 <- vglm(y1 ~ 1, skewnormal, sdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+head(fitted(fit1), 1)
+with(sdata, mean(y1))
+\dontrun{ with(sdata, hist(y1, prob = TRUE))
+x <- with(sdata, seq(min(y1), max(y1), len = 200))
+with(sdata, lines(x, dskewnorm(x, shape = Coef(fit1)), col = "blue")) }
 
 sdata <- data.frame(x2 = runif(nn))
-sdata <- transform(sdata, y = rsnorm(nn, shape = 1 + 2*x2))
-fit <- vglm(y ~ x2, skewnormal1, sdata, trace = TRUE, crit = "coef")
-summary(fit)
+sdata <- transform(sdata, y2 = rskewnorm(nn, shape = 1 + 2*x2))
+fit2 <- vglm(y2 ~ x2, skewnormal, sdata, trace = TRUE, crit = "coef")
+summary(fit2)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/studentt.Rd b/man/studentt.Rd
index 4466b72..1a9074c 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -5,11 +5,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Student t Distribution }
 \description{
-  Estimation of parameters in a Student t distribution.
+  Estimating the parameters of a Student t distribution.
+
 
 }
 \usage{
-studentt(ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
+ studentt(ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
 studentt2(df = Inf, llocation = "identity", lscale = "loge",
           ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
 studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
@@ -26,12 +27,14 @@ studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
   A \code{\link{loglog}} link keeps the degrees of freedom greater
   than unity; see below.
 
+
   }
   \item{ilocation, iscale, idf}{
   Optional initial values.
   If given, the values must be in range.
   The default is to compute an initial value internally.
 
+
   }
 
   \item{tol1}{
@@ -39,6 +42,7 @@ studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
   initial value is 1.
   Best to leave this argument alone.
 
+
   }
 
   \item{df}{
@@ -46,10 +50,12 @@ studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
   It may be of length equal to the number of columns of a response
   matrix.
 
+
   }
   \item{imethod, zero}{
   See \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -78,9 +84,10 @@ studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
   Let \eqn{Y = (T - \mu) / \sigma}{Y = (T -  mu) /  sigma} where
   \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are the location
   and scale parameters respectively.
-  Then \code{studentt3} estimates the location, scale and degrees of freedom parameters.
-  And \code{studentt2} estimates the location, scale parameters for a user-specified
-  degrees of freedom, \code{df}.
+  Then \code{studentt3} estimates the location, scale and
+  degrees of freedom parameters.
+  And \code{studentt2} estimates the location, scale parameters
+  for a user-specified degrees of freedom, \code{df}.
   And \code{studentt} estimates the degrees of freedom parameter only.
   The fitted values are the location parameters.
   By default the linear/additive predictors are
@@ -118,7 +125,7 @@ application to financial econometrics.
 
 \author{ T. W. Yee }
 \note{
-  \code{studentt3} and \code{studentt2} can handle multiple responses.
+  \code{studentt3()} and \code{studentt2()} can handle multiple responses.
 
 
   Practical experience has shown reasonably good initial values are
@@ -132,17 +139,17 @@ application to financial econometrics.
   A standard normal distribution corresponds to a \emph{t} distribution
   with infinite degrees of freedom. Consequently, if the data is close
   to normal, there may be convergence problems; best to use
-  \code{\link{normal1}} instead.
+  \code{\link{uninormal}} instead.
 
 
 }
 \seealso{
-    \code{\link{normal1}},
-    \code{\link{cauchy1}},
-    \code{\link{logistic}},
-    \code{\link{huber2}},
-    \code{\link{koenker}},
-    \code{\link[stats]{TDist}}.
+  \code{\link{uninormal}},
+  \code{\link{cauchy1}},
+  \code{\link{logistic}},
+  \code{\link{huber2}},
+  \code{\link{koenker}},
+  \code{\link[stats]{TDist}}.
 
 
 }
@@ -159,6 +166,8 @@ coef(fit2, matrix = TRUE)
 \keyword{models}
 \keyword{regression}
 
-%Evans, M., Hastings, N. and Peacock, B. (2000)
+%Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 %\emph{Statistical Distributions},
-%New York: Wiley-Interscience, Third edition.
+%Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
+
+
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 4c420bf..bd9e9e1 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -16,6 +16,7 @@ tikuv(d, lmean = "identity", lsigma = "loge",
   The \eqn{d} parameter. It must be a single numeric value less than 2.
   Then \eqn{h = 2-d>0} is another parameter.
 
+
   }
   \item{lmean, lsigma}{
   Link functions for the mean and standard
@@ -24,6 +25,7 @@ tikuv(d, lmean = "identity", lsigma = "loge",
   They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively.
   See \code{\link{Links}} for more choices.
 
+
   }
 
 
@@ -39,6 +41,7 @@ tikuv(d, lmean = "identity", lsigma = "loge",
   Optional initial value for \eqn{\sigma}{sigma}.
   A \code{NULL} means a value is computed internally.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -49,6 +52,7 @@ tikuv(d, lmean = "identity", lsigma = "loge",
   a linear combination of the explanatory variables.
   For many data sets having \code{zero = 2} is a good idea.
 
+
   }
 }
 \details{
@@ -114,7 +118,7 @@ tikuv(d, lmean = "identity", lsigma = "loge",
 }
 \seealso{ 
   \code{\link{dtikuv}},
-  \code{\link{normal1}}.
+  \code{\link{uninormal}}.
 
 
 }
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index 96164c7..59bd0c0 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -29,6 +29,7 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
   For \code{rtikuv}, arguments \code{mean} and \code{sigma} must be of
   length 1.
 
+
   }
   \item{Smallno}{
   Numeric, a small value used by the rejection method for determining
@@ -36,15 +37,18 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
   That is, \code{ptikuv(L) < Smallno} and \code{ptikuv(U) > 1-Smallno}
   where \code{L} and \code{U} are the lower and upper limits respectively.
 
+
   }
   \item{\ldots}{
   Arguments that can be passed into \code{\link[stats]{uniroot}}.
 
+
   }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -90,7 +94,7 @@ legend("topleft", col = c("orange","blue","green"), lty = rep(1, len = 3),
        legend = paste("d =", c(-10, -1, 1))) }
 
 probs <- seq(0.1, 0.9, by = 0.1)
-ptikuv(qtikuv(p = probs, d =  1), d = 1) - probs # Should be all 0
+ptikuv(qtikuv(p = probs, d =  1), d = 1) - probs  # Should be all 0
 }
 \keyword{distribution}
 
diff --git a/man/tobit.Rd b/man/tobit.Rd
index a977e6f..fb64dc8 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -146,24 +146,24 @@ tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
 
 
   If there is no censoring then
-  \code{\link{normal1}} is recommended instead. Any value of the
+  \code{\link{uninormal}} is recommended instead. Any value of the
   response less than \code{Lower} or greater than \code{Upper} will
   be assigned the value \code{Lower} and \code{Upper} respectively,
   and a warning will be issued.
   The fitted object has components \code{censoredL} and \code{censoredU}
   in the \code{extra} slot which specifies whether observations
   are censored in that direction.
-  The function \code{\link{cennormal1}} is an alternative
+  The function \code{\link{cennormal}} is an alternative
   to \code{tobit()}.
 
 
 }
 \seealso{
   \code{\link{rtobit}},
-  \code{\link{cennormal1}},
-  \code{\link{normal1}},
-  \code{\link{dcennormal1}},
-  \code{\link{posnormal1}},
+  \code{\link{cennormal}},
+  \code{\link{uninormal}},
+  \code{\link{double.cennormal}},
+  \code{\link{posnormal}},
   \code{\link[stats:Normal]{rnorm}}.
 
 
@@ -173,31 +173,31 @@ tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
 # Here, fit1 is a standard Tobit model and fit2 is a nonstandard Tobit model
 tdata <- data.frame(x2 = seq(-1, 1, length = (nn <- 100)))
 set.seed(1)
-Lower <- 1; Upper = 4 # For the nonstandard Tobit model
+Lower <- 1; Upper <- 4  # For the nonstandard Tobit model
 tdata <- transform(tdata,
-  Lower.vec = rnorm(nn, Lower, 0.5),
-  Upper.vec = rnorm(nn, Upper, 0.5))
+                   Lower.vec = rnorm(nn, Lower, 0.5),
+                   Upper.vec = rnorm(nn, Upper, 0.5))
 meanfun1 <- function(x) 0 + 2*x
 meanfun2 <- function(x) 2 + 2*x
 meanfun3 <- function(x) 2 + 2*x
 meanfun4 <- function(x) 3 + 2*x
 tdata <- transform(tdata,
-  y1 = rtobit(nn, mean = meanfun1(x2)), # Standard Tobit model 
+  y1 = rtobit(nn, mean = meanfun1(x2)),  # Standard Tobit model 
   y2 = rtobit(nn, mean = meanfun2(x2), Lower = Lower, Upper = Upper),
   y3 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec),
   y4 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec))
-with(tdata, table(y1 == 0)) # How many censored values?
-with(tdata, table(y2 == Lower | y2 == Upper)) # How many censored values?
+with(tdata, table(y1 == 0))  # How many censored values?
+with(tdata, table(y2 == Lower | y2 == Upper))  # How many censored values?
 with(tdata, table(attr(y2, "cenL")))
 with(tdata, table(attr(y2, "cenU")))
 
 fit1 <- vglm(y1 ~ x2, tobit, tdata, trace = TRUE,
-            crit = "coeff") # crit = "coeff" is recommended
+             crit = "coeff")  # crit = "coeff" is recommended
 coef(fit1, matrix = TRUE)
 summary(fit1)
 
 fit2 <- vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"),
-            tdata, crit = "coeff", trace = TRUE) # ditto
+            tdata, crit = "coeff", trace = TRUE)  # ditto
 table(fit2 at extra$censoredL)
 table(fit2 at extra$censoredU)
 coef(fit2, matrix = TRUE)
@@ -205,7 +205,7 @@ coef(fit2, matrix = TRUE)
 fit3 <- vglm(y3 ~ x2,
             tobit(Lower = with(tdata, Lower.vec),
                   Upper = with(tdata, Upper.vec), type.f = "cens"),
-            tdata, crit = "coeff", trace = TRUE) # ditto
+            tdata, crit = "coeff", trace = TRUE)  # ditto
 table(fit3 at extra$censoredL)
 table(fit3 at extra$censoredU)
 coef(fit3, matrix = TRUE)
@@ -214,11 +214,11 @@ coef(fit3, matrix = TRUE)
 fit4 <- vglm(cbind(y3, y4) ~ x2,
             tobit(Lower = rep(with(tdata, Lower.vec), each = 2),
                   Upper = rep(with(tdata, Upper.vec), each = 2)),
-            tdata, crit = "coeff", trace = TRUE) # ditto
-head(fit4 at extra$censoredL) # A matrix
-head(fit4 at extra$censoredU) # A matrix
-head(fit4 at misc$Lower)      # A matrix
-head(fit4 at misc$Upper)      # A matrix
+            tdata, crit = "coeff", trace = TRUE)  # ditto
+head(fit4 at extra$censoredL)  # A matrix
+head(fit4 at extra$censoredU)  # A matrix
+head(fit4 at misc$Lower)       # A matrix
+head(fit4 at misc$Upper)       # A matrix
 coef(fit4, matrix = TRUE)
 }
 
@@ -235,7 +235,7 @@ legend(-1.0, 2.5, c("Truth", "Estimate", "Naive"),
 lines(meanfun1(x2) ~ x2, tdata, col = "purple", lwd = 2)
 lines(fitted(fit1) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y1 ~ x2, tdata)) ~ x2, tdata, col = "black",
-      lty = 2, lwd = 2) # This is simplest but wrong!
+      lty = 2, lwd = 2)  # This is simplest but wrong!
 
 # Plot fit2
 plot(y2 ~ x2, tdata, las = 1, main = "Tobit model",
@@ -250,7 +250,7 @@ legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
 lines(meanfun2(x2) ~ x2, tdata, col = "purple", lwd = 2)
 lines(fitted(fit2) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y2 ~ x2, tdata)) ~ x2, tdata, col = "black",
-      lty = 2, lwd = 2) # This is simplest but wrong!
+      lty = 2, lwd = 2)  # This is simplest but wrong!
 
 # Plot fit3
 plot(y3 ~ x2, tdata, las = 1,
@@ -266,7 +266,7 @@ legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
 lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2)
 lines(fitted(fit3) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
-      lty = 2, lwd = 2) # This is simplest but wrong!
+      lty = 2, lwd = 2)  # This is simplest but wrong!
 
 # Plot fit4
 plot(y3 ~ x2, tdata, las = 1,
@@ -282,7 +282,7 @@ legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
 lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2)
 lines(fitted(fit4)[, 1] ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
-      lty = 2, lwd = 2) # This is simplest but wrong!
+      lty = 2, lwd = 2)  # This is simplest but wrong!
 }
 }
 \keyword{models}
diff --git a/man/tobitUC.Rd b/man/tobitUC.Rd
index 30ac150..16fc305 100644
--- a/man/tobitUC.Rd
+++ b/man/tobitUC.Rd
@@ -78,9 +78,9 @@ lines(Q, ptobit(Q, m = m, Lower = Lower, Upper = Upper),
 lines(Q, dtobit(Q, m = m, Lower = Lower, Upper = Upper),
       col = "darkgreen", lty = "dashed", type = "h")
 abline(h = probs, col = "purple", lty = "dashed")
-max(abs(ptobit(Q, m = m, Lower = Lower, Upper = Upper) - probs)) # Should be 0
+max(abs(ptobit(Q, m = m, Lower = Lower, Upper = Upper) - probs))  # Should be 0
 
-endpts <- c(Lower, Upper) # Endpoints have a spike
+endpts <- c(Lower, Upper)  # Endpoints have a spike
 lines(endpts, dtobit(endpts, m = m, Lower = Lower, Upper = Upper),
       col = "blue", lwd = 2, type = "h")
 }
diff --git a/man/toxop.Rd b/man/toxop.Rd
index 74dd173..1ed56fb 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -4,6 +4,7 @@
 \title{ Toxoplasmosis Data }
 \description{
   Toxoplasmosis data in 34 cities in El Salvador.
+
 }
 \usage{data(toxop)}
 \format{
@@ -34,7 +35,7 @@
 }
 
 \seealso{
-    \code{\link[VGAM]{dexpbinomial}}.
+    \code{\link[VGAM]{double.expbinomial}}.
 
 }
 
diff --git a/man/triangle.Rd b/man/triangle.Rd
index 6e9e90e..fd82608 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -9,13 +9,15 @@
 }
 \usage{
 triangle(lower = 0, upper = 1,
-         link = elogit(min = lower, max = upper), itheta = NULL)
+         link = elogit(min = 0, max = 1), itheta = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lower, upper}{lower and upper limits of the distribution.
      Must be finite.
      Called \eqn{A} and \eqn{B} respectively below.
+
+
    }
 
   \item{link}{
@@ -24,26 +26,30 @@ triangle(lower = 0, upper = 1,
   See \code{\link{Links}} for more choices.
   The default constrains the estimate to lie in the interval.
 
+
   }
   \item{itheta}{
   Optional initial value for the parameter.
   The default is to compute the value internally.
 
+
   }
 }
 \details{
   The triangle distribution
   has a probability density function that consists of two lines
-  joined at \eqn{\theta}{theta}. The lines intersect the
+  joined at \eqn{\theta}{theta}, which is the location of the mode.
+  The lines intersect the
   \eqn{y = 0} axis at \eqn{A} and \eqn{B}.
   Here, Fisher scoring is used.
 
 
+
   On fitting, the \code{extra} slot has components called \code{lower}
   and \code{upper} which contains the values of the above arguments
   (recycled to the right length).
   The fitted values are the mean of the distribution, which is
-  a little messy to write.
+  \eqn{(A + B + \theta)/3}{(A + B + theta)/3}.
 
 
 }
@@ -54,30 +60,75 @@ triangle(lower = 0, upper = 1,
 
 
 }
-%\references{ 
-%}
+\references{ 
+
+
+Kotz, S. and van Dorp, J. R. (2004)
+Beyond Beta: Other Continuous Families of Distributions
+with Bounded Support and Applications.
+Chapter 1.
+World Scientific: Singapore.
+
+
+
+}
 \author{ T. W. Yee }
+\section{Warning}{
+  The MLE regularity conditions do not seem to hold for this
+  distribution so that misleading inferences may result, e.g., in
+  the \code{summary} and \code{vcov} of the object.
+  Additionally, convergence to the MLE often appears to fail.
+
+
+}
+
 \note{
   The response must contain values in \eqn{(A, B)}.
   For most data sets (especially small ones) it is very common for
   half-stepping to occur.
 
 
+% 20130603
+  Arguments \code{lower} and \code{upper} and \code{link} must match.
+  For example, setting
+  \code{lower = 0.2} and \code{upper = 4} and
+  \code{link = elogit(min = 0.2, max = 4.1)} will result in an error.
+  Ideally \code{link = elogit(min = lower, max = upper)}
+  ought to work but it does not (yet)!
+  Minimal error checking is done for this deficiency.
+
+
+
 }
 \seealso{
-     \code{\link{Triangle}}.
+  \code{\link{Triangle}}.
 
 
 }
 \examples{
-tdata <- data.frame(y  = rtriangle(n <- 3000, theta = 3/4))
+# Example 1
+tdata <- data.frame(y = rtriangle(n <- 3000, theta = 3/4))
 fit <- vglm(y ~ 1, triangle(link = "identity"), tdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fit at extra$lower)
 head(fitted(fit))
 with(tdata, mean(y))
+
+# Example 2; Kotz and van Dorp (2004), p.14
+rdata <- data.frame(y = c(0.1, 0.25, 0.3, 0.4, 0.45, 0.6, 0.75, 0.8))
+fit <- vglm(y ~ 1, triangle(link = "identity"), rdata, trace = TRUE,
+            crit = "coef", maxit = 1000)
+Coef(fit)  # The MLE is the 3rd order statistic, which is 0.3.
+fit <- vglm(y ~ 1, triangle(link = "identity"), rdata, trace = TRUE,
+            crit = "coef", maxit = 1001)
+Coef(fit)  # The MLE is the 3rd order statistic, which is 0.3.
 }
 \keyword{models}
 \keyword{regression}
 
+
+
+% 20130603: yettodo: fix up so ideally
+%        link = elogit(min = lower, max = upper), itheta = NULL)
+% works.
diff --git a/man/triangleUC.Rd b/man/triangleUC.Rd
index ba227ac..2cb4e75 100644
--- a/man/triangleUC.Rd
+++ b/man/triangleUC.Rd
@@ -9,6 +9,7 @@
   Density, distribution function, quantile function and random
   generation for the Triangle distribution with parameter
   \code{theta}.
+
 }
 \usage{
 dtriangle(x, theta, lower = 0, upper = 1, log = FALSE)
@@ -20,16 +21,19 @@ rtriangle(n, theta, lower = 0, upper = 1)
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-    Must be a positive integer of length 1.}
+    Same as \code{\link[stats]{runif}}.
+
+   }
   \item{theta}{the theta parameter which lies between \code{lower}
      and \code{upper}. }
   \item{lower, upper}{lower and upper limits of the distribution.
      Must be finite.
-   }
+  }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
+
   }
 
 }
@@ -39,6 +43,7 @@ rtriangle(n, theta, lower = 0, upper = 1)
   \code{qtriangle} gives the quantile function, and
   \code{rtriangle} generates random deviates.
 
+
 }
 %\references{
 %
@@ -52,7 +57,7 @@ rtriangle(n, theta, lower = 0, upper = 1)
 
 }
 %\note{
-%  
+%
 %}
 \seealso{
   \code{\link{triangle}}.
@@ -70,7 +75,7 @@ lines(x, ptriangle(x, theta = theta), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qtriangle(probs, theta = theta)
 lines(Q, dtriangle(Q, theta = theta), col = "purple", lty = 3, type = "h")
-ptriangle(Q, theta = theta) - probs # Should be all zero
+ptriangle(Q, theta = theta) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/trplot.Rd b/man/trplot.Rd
index 6eab4cb..aa5dbe3 100644
--- a/man/trplot.Rd
+++ b/man/trplot.Rd
@@ -65,7 +65,7 @@ quadratic ordination.
 
 \examples{
 \dontrun{ set.seed(123)
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[, 1:6] <- scale(hspider[, 1:6])  # Standardized environmental vars
 p1cqo <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
                   Trocterr, Zoraspin) ~
@@ -75,7 +75,7 @@ p1cqo <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
 nos <- ncol(depvar(p1cqo))
 clr <- 1:nos # OR (1:(nos+1))[-7]  to omit yellow
 
-trplot(p1cqo, whichSpecies = 1:3, log = "xy",
+trplot(p1cqo, which.species = 1:3, log = "xy",
        col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii
 legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "),
        lwd = 2, lty = 1, col = c("blue", "orange", "green"))
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 9ba1e90..11c5458 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -11,7 +11,7 @@ It is only applicable for rank-1 models with argument
 
 }
 \usage{
-trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
+trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
                label.sites = FALSE, sitenames = rownames(object at y), 
                axes.equal = TRUE, cex = par()$cex, 
                col = 1:(nos * (nos - 1)/2), log = "", 
@@ -24,7 +24,7 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{object}{ Object of class \code{"qrrvglm"}, i.e., a CQO object. }
-  \item{whichSpecies}{ Integer or character vector specifying the
+  \item{which.species}{ Integer or character vector specifying the
   species to be plotted. If integer, these are the columns of the
   response matrix. If character, these must match exactly with the
   species' names. 
@@ -34,7 +34,7 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
   }
   \item{add}{ Logical. Add to an existing plot? If \code{FALSE} (default),
   a new plot is made. }
-  \item{plot.it}{ Logical. Plot it? }
+  \item{show.plot}{ Logical. Plot it? }
   \item{label.sites}{ Logical. If \code{TRUE}, the points on the
   curves/trajectories are labelled with the \code{sitenames}. }
   \item{sitenames}{ Character vector. The names of the sites. }
@@ -118,7 +118,7 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
 }
 \details{
  A trajectory plot plots the fitted values of a `second' species
- against a `first' species. The argument \code{whichSpecies} must
+ against a `first' species. The argument \code{which.species} must
  therefore contain at least two species. By default, all of the
  species that were fitted in \code{object} are plotted. 
  With more than a few species
@@ -182,19 +182,19 @@ quadratic ordination.
 
 }
 
-\examples{\dontrun{ set.seed(111) # This leads to the global solution
-# hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental variables
+\examples{\dontrun{ set.seed(111)  # This leads to the global solution
+# hspider[,1:6] <- scale(hspider[,1:6])  # Standardize the environmental variables
 p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
                 Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           poissonff, data = hspider, trace = FALSE)
 
-trplot(p1, whichSpecies = 1:3, log = "xy", type = "b", lty = 1,
+trplot(p1, which.species = 1:3, log = "xy", type = "b", lty = 1,
        main = "Trajectory plot of three hunting spiders species",
        col = c("blue","red","green"), lwd = 2, label = TRUE) -> ii
 legend(0.00005, 0.3, lwd = 2, lty = 1, col = c("blue", "red", "green"),
        with(ii, paste(species.names[,1], species.names[,2], sep = " and ")))
-abline(a = 0, b = 1, lty = "dashed", col = "grey") # Useful reference line
+abline(a = 0, b = 1, lty = "dashed", col = "grey")  # Useful reference line
 }
 }
 \keyword{models}
diff --git a/man/tparetoUC.Rd b/man/truncparetoUC.Rd
similarity index 55%
rename from man/tparetoUC.Rd
rename to man/truncparetoUC.Rd
index ebb4e39..fd06ba4 100644
--- a/man/tparetoUC.Rd
+++ b/man/truncparetoUC.Rd
@@ -1,9 +1,9 @@
-\name{Tpareto}
-\alias{Tpareto}
-\alias{dtpareto}
-\alias{ptpareto}
-\alias{qtpareto}
-\alias{rtpareto}
+\name{Truncpareto}
+\alias{Truncpareto}
+\alias{dtruncpareto}
+\alias{ptruncpareto}
+\alias{qtruncpareto}
+\alias{rtruncpareto}
 \title{The Truncated Pareto Distribution}
 \description{
   Density, distribution function, quantile function and random generation
@@ -13,30 +13,36 @@
 
 }
 \usage{
-dtpareto(x, lower, upper, shape, log = FALSE)
-ptpareto(q, lower, upper, shape)
-qtpareto(p, lower, upper, shape)
-rtpareto(n, lower, upper, shape)
+dtruncpareto(x, lower, upper, shape, log = FALSE)
+ptruncpareto(q, lower, upper, shape)
+qtruncpareto(p, lower, upper, shape)
+rtruncpareto(n, lower, upper, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n, log}{Same meaning as \code{\link[stats:Uniform]{runif}}. }
+  \item{n, log}{Same meaning as \code{\link[stats:Uniform]{runif}}.
+
+
+  }
   \item{lower, upper, shape}{
   the lower, upper and shape (\eqn{k}) parameters.
   If necessary, values are recycled.
 
+
   }
 
 }
 \value{
-  \code{dtpareto} gives the density,
-  \code{ptpareto} gives the distribution function,
-  \code{qtpareto} gives the quantile function, and
-  \code{rtpareto} generates random deviates.
+  \code{dtruncpareto} gives the density,
+  \code{ptruncpareto} gives the distribution function,
+  \code{qtruncpareto} gives the quantile function, and
+  \code{rtruncpareto} generates random deviates.
+
 
 }
 \references{
+
   Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006)
   Parameter estimation for the truncated Pareto distribution,
   \emph{Journal of the American Statistical Association},
@@ -44,10 +50,12 @@ rtpareto(n, lower, upper, shape)
   270--277.
 
 
+
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{tpareto1}}, the \pkg{VGAM} family function
+
+  See \code{\link{truncpareto}}, the \pkg{VGAM} family function
   for estimating the parameter \eqn{k} by maximum likelihood estimation,
   for the formula of the probability density function and the
   range restrictions imposed on the parameters.
@@ -59,28 +67,28 @@ rtpareto(n, lower, upper, shape)
 %%  The truncated Pareto distribution is 
 %%}
 \seealso{
-  \code{\link{tpareto1}}.
+  \code{\link{truncpareto}}.
 
 
 }
 \examples{ lower <- 3; upper <- 8; kay <- exp(0.5)
 \dontrun{ xx <- seq(lower - 0.5, upper + 0.5, len = 401)
-plot(xx, dtpareto(xx, low = lower, upp = upper, shape = kay),
+plot(xx, dtruncpareto(xx, low = lower, upp = upper, shape = kay),
      main = "Truncated Pareto density split into 10 equal areas",
      type = "l", ylim = 0:1, xlab = "x")
 abline(h = 0, col = "blue", lty = 2)
-qq <- qtpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper,
-               shape = kay)
-lines(qq, dtpareto(qq, low = lower, upp = upper, shape = kay),
+qq <- qtruncpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper,
+                   shape = kay)
+lines(qq, dtruncpareto(qq, low = lower, upp = upper, shape = kay),
       col = "purple", lty = 3, type = "h")
-lines(xx, ptpareto(xx, low = lower, upp = upper, shape = kay),
+lines(xx, ptruncpareto(xx, low = lower, upp = upper, shape = kay),
       col = "orange") }
 pp <- seq(0.1, 0.9, by = 0.1)
-qq <- qtpareto(pp, low = lower, upp = upper, shape = kay)
+qq <- qtruncpareto(pp, low = lower, upp = upper, shape = kay)
 
-ptpareto(qq, low = lower, upp = upper, shape = kay)
-qtpareto(ptpareto(qq, low = lower, upp = upper, shape = kay),
-         low = lower, upp = upper, shape = kay) - qq # Should be all 0
+ptruncpareto(qq, low = lower, upp = upper, shape = kay)
+qtruncpareto(ptruncpareto(qq, low = lower, upp = upper, shape = kay),
+         low = lower, upp = upper, shape = kay) - qq  # Should be all 0
 }
 \keyword{distribution}
 
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
index a111b99..775247c 100644
--- a/man/truncweibull.Rd
+++ b/man/truncweibull.Rd
@@ -127,7 +127,7 @@ truncweibull(lower.limit = 1e-5,
 nn <- 5000; prop.lost <- 0.40   # Proportion lost to truncation
 wdata <- data.frame(x2 = runif(nn))  # Complete Weibull data
 wdata <- transform(wdata,
-                   Betaa = exp(1)) # > 2 is okay (satisfies regularity conds)
+                   Betaa = exp(1))  # > 2 is okay (satisfies regularity conds)
 wdata <- transform(wdata, Alpha = exp(0.5 - 1 * x2))
 wdata <- transform(wdata, Shape = Betaa,
 #                         aaa   = Betaa,
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index f23c3b7..c692548 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,6 +4,18 @@
 %\alias{ccoef-method}
 %
 %
+% 20131104
+\alias{family.name,ANY-method}
+\alias{family.name,vlm-method}
+\alias{family.name,vglmff-method}
+% 20130903
+\alias{BIC,ANY-method}
+\alias{BIC,vlm-method}
+\alias{BIC,vglm-method}
+\alias{BIC,vgam-method}
+\alias{BIC,rrvglm-method}
+\alias{BIC,qrrvglm-method}
+\alias{BIC,cao-method}
 %
 % 20121105
 \alias{Rank,qrrvglm-method}
@@ -29,6 +41,7 @@
 %
 % 20120112
 \alias{AIC,ANY-method}
+\alias{AICc,ANY-method}
 \alias{coef,ANY-method}
 \alias{logLik,ANY-method}
 \alias{plot,ANY-method}
@@ -37,7 +50,7 @@
 \alias{plot,qrrvglm,ANY-method}
 \alias{plot,rcim,ANY-method}
 \alias{plot,rcim0,ANY-method}
-\alias{plot,uqo,ANY-method}
+%\alias{plot,uqo,ANY-method}
 \alias{plot,vgam,ANY-method}
 \alias{plot,vglm,ANY-method}
 \alias{plot,vlm,ANY-method}
@@ -52,34 +65,42 @@
 \alias{AIC,vgam-method}
 \alias{AIC,rrvglm-method}
 \alias{AIC,qrrvglm-method}
+\alias{AIC,cao-method}
+\alias{AICc,vlm-method}
+\alias{AICc,vglm-method}
+%\alias{AICc,vgam-method}
+%\alias{AICc,rrvglm-method}
+%\alias{AICc,qrrvglm-method}
 \alias{attrassign,lm-method}
 \alias{calibrate,qrrvglm-method}
 \alias{calibrate,cao-method}
-\alias{calibrate,uqo-method}
+%\alias{calibrate,uqo-method}
 \alias{cdf,vglm-method}
 \alias{cdf,vgam-method}
 \alias{coefficients,cao-method}
 \alias{coefficients,vlm-method}
 \alias{coefficients,vglm-method}
 \alias{coefficients,qrrvglm-method}
-\alias{coefficients,uqo-method}
+%\alias{coefficients,uqo-method}
 \alias{coefficients,vsmooth.spline-method}
 \alias{coefficients,vsmooth.spline.fit-method}
 \alias{coefficients,summary.vglm-method}
+\alias{coefficients,summary.rrvglm-method}
 \alias{Coefficients,vlm-method}
 \alias{coef,cao-method}
 \alias{coef,vlm-method}
 \alias{coef,vglm-method}
 \alias{coef,qrrvglm-method}
-\alias{coef,uqo-method}
+%\alias{coef,uqo-method}
 \alias{coef,vsmooth.spline-method}
 \alias{coef,vsmooth.spline.fit-method}
 \alias{coef,summary.vglm-method}
+\alias{coef,summary.rrvglm-method}
 \alias{Coef,cao-method}
 \alias{Coef,vlm-method}
 \alias{Coef,qrrvglm-method}
 \alias{Coef,rrvglm-method}
-\alias{Coef,uqo-method}
+%\alias{Coef,uqo-method}
 \alias{constraints,vlm-method}
 \alias{deplot,vglm-method}
 \alias{deplot,vgam-method}
@@ -95,18 +116,18 @@
 \alias{deviance,qrrvglm-method}
 \alias{deviance,vlm-method}
 \alias{deviance,vglm-method}
-\alias{deviance,uqo-method}
+%\alias{deviance,uqo-method}
 \alias{df.residual,vlm-method}
 \alias{effects,vlm-method}
 \alias{fitted.values,qrrvglm-method}
 \alias{fitted.values,vlm-method}
 \alias{fitted.values,vglm-method}
-\alias{fitted.values,uqo-method}
+%\alias{fitted.values,uqo-method}
 \alias{fitted.values,vsmooth.spline-method}
 \alias{fitted,qrrvglm-method}
 \alias{fitted,vlm-method}
 \alias{fitted,vglm-method}
-\alias{fitted,uqo-method}
+%\alias{fitted,uqo-method}
 \alias{fitted,vsmooth.spline-method}
 %
 %
@@ -185,7 +206,7 @@
 \alias{predictors,vglm-method}
 \alias{rlplot,vglm-method}
 \alias{terms,vlm-method}
-\alias{is.bell,uqo-method}
+%\alias{is.bell,uqo-method}
 \alias{is.bell,qrrvglm-method}
 \alias{is.bell,rrvglm-method}
 \alias{is.bell,vlm-method}
@@ -195,29 +216,38 @@
 \alias{logLik,summary.vglm-method}
 \alias{logLik,vglm-method}
 \alias{logLik,vgam-method}
+\alias{logLik,qrrvglm-method}
+\alias{logLik,cao-method}
+%
 \alias{lvplot,cao-method}
 \alias{lvplot,qrrvglm-method}
 \alias{lvplot,rrvglm-method}
-\alias{lvplot,uqo-method}
-\alias{lv,cao-method}
-\alias{lv,Coef.cao-method}
+%\alias{lvplot,uqo-method}
+%
 \alias{lv,rrvglm-method}
 \alias{lv,qrrvglm-method}
+\alias{lv,cao-method}
 \alias{lv,Coef.rrvglm-method}
 \alias{lv,Coef.qrrvglm-method}
-\alias{lv,uqo-method}
+\alias{lv,Coef.cao-method}
+% \alias{lv,uqo-method} defunct
+%\alias{latvar,uqo-method}
+\alias{latvar,cao-method}
 \alias{latvar,Coef.qrrvglm-method}
 \alias{latvar,Coef.rrvglm-method}
-\alias{latvar,qrrvglm-method}
 \alias{latvar,rrvglm-method}
+\alias{latvar,qrrvglm-method}
+%
 \alias{Max,qrrvglm-method}
 \alias{Max,Coef.qrrvglm-method}
-\alias{Max,uqo-method}
+%\alias{Max,uqo-method}
+\alias{Max,cao-method}
 \alias{meplot,numeric-method}
 \alias{meplot,vlm-method}
 %\alias{model.matrix,ANY-method}
 \alias{model.matrix,qrrvglm-method}
 \alias{model.matrix,vlm-method}
+\alias{model.matrix,vgam-method}
 \alias{nobs,ANY-method}
 \alias{nobs,vlm-method}
 \alias{npred,ANY-method}
@@ -236,17 +266,18 @@
 \alias{nvar,rcim-method}
 \alias{Opt,qrrvglm-method}
 \alias{Opt,Coef.qrrvglm-method}
-\alias{Opt,uqo-method}
+%\alias{Opt,uqo-method}
+\alias{Opt,cao-method}
 \alias{persp,cao-method}
 \alias{persp,qrrvglm-method}
-\alias{persp,uqo-method}
+%\alias{persp,uqo-method}
 \alias{predict,cao-method}
 \alias{predict,qrrvglm-method}
 \alias{predict,vgam-method}
 \alias{predict,vglm-method}
 \alias{predict,rrvglm-method}
 \alias{predict,vlm-method}
-\alias{predict,uqo-method}
+%\alias{predict,uqo-method}
 \alias{predict,vsmooth.spline-method}
 \alias{predict,vsmooth.spline.fit-method}
 %
@@ -279,9 +310,9 @@
 \alias{print,summary.vgam-method}
 \alias{print,summary.vglm-method}
 \alias{print,summary.vlm-method}
-\alias{print,uqo-method}
-\alias{print,Coef.uqo-method}
-\alias{print,summary.uqo-method}
+%\alias{print,uqo-method}
+%\alias{print,Coef.uqo-method}
+%\alias{print,summary.uqo-method}
 \alias{print,vsmooth.spline-method}
 \alias{print,cao-method}
 \alias{qtplot,vglm-method}
@@ -290,13 +321,13 @@
 \alias{residuals,vlm-method}
 \alias{residuals,vglm-method}
 \alias{residuals,vgam-method}
-\alias{residuals,uqo-method}
+%\alias{residuals,uqo-method}
 \alias{residuals,vsmooth.spline-method}
 \alias{resid,qrrvglm-method}
 \alias{resid,vlm-method}
 \alias{resid,vglm-method}
 \alias{resid,vgam-method}
-\alias{resid,uqo-method}
+%\alias{resid,uqo-method}
 \alias{resid,vsmooth.spline-method}
 \alias{show,Coef.cao-method}
 \alias{show,summary.cao-method}
@@ -313,9 +344,9 @@
 \alias{show,summary.vgam-method}
 \alias{show,summary.vglm-method}
 \alias{show,summary.vlm-method}
-\alias{show,uqo-method}
-\alias{show,Coef.uqo-method}
-\alias{show,summary.uqo-method}
+%\alias{show,uqo-method}
+%\alias{show,Coef.uqo-method}
+%\alias{show,summary.uqo-method}
 \alias{show,vsmooth.spline-method}
 \alias{show,cao-method}
 \alias{summary,grc-method}
@@ -327,14 +358,14 @@
 \alias{summary,vgam-method}
 \alias{summary,vglm-method}
 \alias{summary,vlm-method}
-\alias{summary,uqo-method}
+%\alias{summary,uqo-method}
 \alias{Tol,cao-method}
 \alias{Tol,qrrvglm-method}
 \alias{Tol,Coef.qrrvglm-method}
-\alias{Tol,uqo-method}
-\alias{Tol,Coef.uqo-method}
+%\alias{Tol,uqo-method}
+%\alias{Tol,Coef.uqo-method}
 \alias{trplot,qrrvglm-method}
-\alias{trplot,uqo-method}
+%\alias{trplot,uqo-method}
 \alias{trplot,cao-method}
 \alias{vcov,rrvglm-method}
 \alias{vcov,qrrvglm-method}
diff --git a/man/normal1.Rd b/man/uninormal.Rd
similarity index 62%
rename from man/normal1.Rd
rename to man/uninormal.Rd
index 26b6513..ec1b7a1 100644
--- a/man/normal1.Rd
+++ b/man/uninormal.Rd
@@ -1,4 +1,5 @@
-\name{normal1}
+\name{uninormal}
+\alias{uninormal}
 \alias{normal1}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Univariate Normal Distribution }
@@ -6,13 +7,15 @@
   Maximum likelihood estimation of the two parameters of a univariate
   normal distribution.
 
+
 }
 \usage{
-normal1(lmean = "identity", lsd = "loge", lvar = "loge",
-        var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
-        apply.parint = FALSE, smallno = 1e-05, zero = -2)
+uninormal(lmean = "identity", lsd = "loge", lvar = "loge",
+          var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
+          smallno = 1e-05, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
+%         apply.parint = FALSE,
 \arguments{
   \item{lmean, lsd, lvar}{
   Link functions applied to the mean and standard deviation/variance.
@@ -49,11 +52,11 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
 
 
   }
-  \item{imethod, parallel, isd, apply.parint, zero}{
+  \item{imethod, parallel, isd, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
   If \code{lmean = loge} then try \code{imethod = 2}.
-  Argument \code{apply.parint} refers to whether the parallelism
-  constraint is applied to the intercept too.
+  If \code{parallel = TRUE} then the parallelism constraint
+  is not applied to the intercept.
 
 
   }
@@ -78,14 +81,21 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
 }
 \references{
 
-  Evans, M., Hastings, N. and Peacock, B. (2000)
-  \emph{Statistical Distributions},
-  New York: Wiley-Interscience, Third edition.
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
+\emph{Statistical Distributions},
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
 
 \author{ T. W. Yee }
+\section{Warning}{
+  \code{uninormal()} is the new name;
+  \code{normal1()} is old and will be decommissioned soon.
+
+
+}
+
 \note{
   Yet to do: allow an argument such as \code{eq.sd} that enables
   the standard devations to be the same.
@@ -94,15 +104,15 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
 }
 \seealso{
     \code{\link{gaussianff}},
-    \code{\link{posnormal1}},
-    \code{\link{mix2normal1}},
-%   \code{\link{normal1sum1}},
+    \code{\link{posnormal}},
+    \code{\link{mix2normal}},
+    \code{\link{normal.vcm}},
     \code{\link{Qvar}},
     \code{\link{tobit}},
-    \code{\link{cennormal1}},
-    \code{\link{fnormal1}},
-    \code{\link{skewnormal1}},
-    \code{\link{dcennormal1}},
+    \code{\link{cennormal}},
+    \code{\link{foldnormal}},
+    \code{\link{skewnormal}},
+    \code{\link{double.cennormal}},
     \code{\link{SUR}},
     \code{\link{huber2}},
     \code{\link{studentt}},
@@ -114,25 +124,25 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
 \examples{
 ndata <- data.frame(x2 = rnorm(nn <- 200))
 ndata <- transform(ndata,
-                   y1 = rnorm(nn, mean = 1-3*x2, sd = exp(1+0.2*x2)),
-                   y2 = rnorm(nn, mean = 1+2*x2, sd = exp(1+  2*x2)^0.5),
-                   y3 = rnorm(nn, mean = 1+2*x2, sd = exp(1+  2*x2)^0.5))
-fit1 <- vglm(y1 ~ x2, normal1(zero = NULL), ndata, trace = TRUE)
+                   y1  = rnorm(nn, m = 1 - 3*x2, sd = exp(1 + 0.2*x2)),
+                   y2a = rnorm(nn, m = 1 + 2*x2, sd = exp(1 + 2.0*x2)^0.5),
+                   y2b = rnorm(nn, m = 1 + 2*x2, sd = exp(1 + 2.0*x2)^0.5))
+fit1 <- vglm(y1 ~ x2, uninormal(zero = NULL), ndata, trace = TRUE)
 coef(fit1, matrix = TRUE)
-fit2 <- vglm(cbind(y2, y3) ~ x2, data = ndata, trace = TRUE,
-             normal1(var = TRUE, parallel = TRUE,
-                     apply.parint = TRUE, zero = NULL))
+fit2 <- vglm(cbind(y2a, y2b) ~ x2, data = ndata, trace = TRUE,
+             uninormal(var = TRUE, parallel = TRUE ~ x2,
+                       zero = NULL))
 coef(fit2, matrix = TRUE)
 
 # Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
 theta <- 10
-ndata <- data.frame(y = rnorm(100, m = theta, sd = theta))
-fit3 <- vglm(y ~ 1, normal1(lsd = "identity"), ndata,
+ndata <- data.frame(y3 = rnorm(100, m = theta, sd = theta))
+fit3a <- vglm(y3 ~ 1, uninormal(lsd = "identity"), ndata,
              constraints = list("(Intercept)" = rbind(1, 1)))
-fit4 <- vglm(y ~ 1, normal1(lsd = "identity", parallel = TRUE,
-                            apply.parint = TRUE, zero = NULL), ndata)
-coef(fit3, matrix = TRUE)
-coef(fit4, matrix = TRUE) # Same as fit3
+fit3b <- vglm(y3 ~ 1, uninormal(lsd = "identity", parallel = TRUE ~ 1,
+                                zero = NULL), ndata)
+coef(fit3a, matrix = TRUE)
+coef(fit3b, matrix = TRUE)  # Same as fit3a
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/uqo.Rd b/man/uqo.Rd
deleted file mode 100644
index 7d8084f..0000000
--- a/man/uqo.Rd
+++ /dev/null
@@ -1,322 +0,0 @@
-\name{uqo}
-\alias{uqo}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Fitting Unconstrained Quadratic Ordination (UQO)}
-\description{
-  An \emph{unconstrained quadratic ordination} (UQO)
-  (equivalently, noncanonical Gaussian ordination) model
-  is fitted using the 
-  \emph{quadratic unconstrained vector generalized linear model}
-  (QU-VGLM) framework.
-  In this documentation, \eqn{M} is the number of linear predictors
-  or species.
-
-}
-\usage{
-uqo(formula, family, data = list(), weights = NULL, subset = NULL,
-    na.action = na.fail, etastart = NULL, mustart = NULL,
-    coefstart = NULL, control = uqo.control(...), offset = NULL,
-    method = "uqo.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
-    contrasts = NULL, constraints = NULL, extra = NULL,
-    qr.arg = FALSE, ...)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{formula}{ a symbolic description of the model to be fit.
-    Since there is no \eqn{x_2} vector by definition, the RHS of
-    the formula has all terms belonging to the \eqn{x_1} vector.
-
-
-  }
-  \item{family}{ a function of class \code{"vglmff"} describing
-    what statistical model is to be fitted. Currently two families
-    are supported: Poisson and binomial.
-
-
-  }
-  \item{data}{ an optional data frame containing the variables
-    in the model. By default the variables are taken from
-    \code{environment(formula)}, typically the environment from
-    which \code{uqo} is called.
-
-
- }
-  \item{weights}{ an optional vector or matrix of (prior) weights 
-    to be used in the fitting process.
-    This argument should not be used.
-
-
-}
-  \item{subset}{ an optional logical vector specifying a subset of
-          observations to 
-          be used in the fitting process.
-
-
-    }
-    \item{na.action}{
-      a function which indicates what should happen when
-      the data contain \code{NA}s. 
-      The default is set by the \code{na.action} setting
-      of \code{\link[base]{options}}, and is \code{na.fail}
-      if that is unset.
-      The ``factory-fresh'' default is \code{na.omit}.
-
-
-    }
-  \item{etastart}{ starting values for the linear predictors.
-    It is a \eqn{M}-column matrix. If \eqn{M = 1} then it may be a vector.
-
-
-    }
-  \item{mustart}{ starting values for the 
-    fitted values. It can be a vector or a matrix. 
-    Some family functions do not make use of this argument.
-
-
-  }
-  \item{coefstart}{ starting values for the
-    coefficient vector.
-
-
-  }
-  \item{control}{ a list of parameters for controlling the fitting process. 
-          See \code{\link{uqo.control}} for details.
-
-
- }
-  \item{offset}{ a vector or \eqn{M}-column matrix of offset values.
-   This argument should not be used.
-
-
- }
-  \item{method}{
-    the method to be used in fitting the model.
-    The default (and presently only) method \code{uqo.fit}
-    uses iteratively reweighted least squares (IRLS).
-
-
-    }
-  \item{model}{ a logical value indicating whether the
-    \emph{model frame}
-    should be assigned in the \code{model} slot.
-
-  }
-
-  \item{x.arg, y.arg}{ logical values indicating whether
-    the model matrix and response matrix used in the fitting
-    process should be assigned in the \code{x} and \code{y} slots.
-    Note the model matrix is the LM model matrix.
-
-
-    }
-  \item{contrasts}{ an optional list. See the \code{contrasts.arg}
-    of \code{\link{model.matrix.default}}.
-
-
-  }
-
-  \item{constraints}{ an optional list  of constraint matrices.
-    This argument should not be used.
-
-
-    }
-  \item{extra}{ an optional list with any extra information that  
-    might be needed by the family function. 
-
-
-    }
-  \item{qr.arg}{ logical value indicating whether
-    the slot \code{qr}, which returns the QR decomposition of the
-    VLM model matrix, is returned on the object.
-    This argument should not be set \code{TRUE}.
-
-
-    }
-  \item{\dots}{ further arguments passed into \code{\link{uqo.control}}.
-
-
-  }
-
-}
-
-\details{
-  \emph{Unconstrained quadratic ordination} models fit symmetric bell-shaped
-  response curves/surfaces to response data, but the latent variables
-  are largely free parameters and are not constrained to be linear
-  combinations of the environmental variables.  This poses a
-  difficult optimization problem.  The current algorithm is very simple
-  and will often fail (even for \code{Rank = 1}) but hopefully this will
-  be improved in the future.
-
-
-  The central formula is given by
-  \deqn{\eta = B_1^T x_1 + A \nu +
-               \sum_{m = 1}^M (\nu^T D_m \nu) e_m}{%
-         eta = B_1^T x_1 + A nu +
-         sum_{m = 1}^M (nu^T D_m nu) e_m}
-  where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept),
-  \eqn{\nu}{nu} is a \eqn{R}-vector of latent variables, \eqn{e_m} is
-  a vector of 0s but with a 1 in the \eqn{m}th position.
-  The \eqn{\eta}{eta} are a vector of linear/additive predictors,
-  e.g., the \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m =
-  log(E[Y_m])} for the \eqn{m}th species.  The matrices \eqn{B_1},
-  \eqn{A}, and \eqn{D_m} are estimated from the data, i.e.,
-  contain the regression coefficients. Also, \eqn{\nu}{nu} is
-  estimated.
-  The tolerance matrices satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s =
-  -(0.5 D_s^(-1)}.  Many important UQO details are directly related to
-  arguments in \code{\link{uqo.control}};
-  see also \code{\link{cqo}} and \code{\link{qrrvglm.control}}.
-
-
-Currently, only Poisson and binomial \pkg{VGAM} family functions are
-implemented for this function, and dispersion parameters for these are
-assumed known.  Thus the Poisson is catered for by
-\code{\link{poissonff}}, and the binomial by \code{\link{binomialff}}.
-Those beginning with \code{"quasi"} have dispersion parameters that are
-estimated for each species, hence will give an error message here.
-
-
-}
-\value{
-  An object of class \code{"uqo"}
-  (this may change to \code{"quvglm"} in the future).
-
-
-}
-\references{
-
-
-Yee, T. W. (2004)
-A new technique for maximum-likelihood
-canonical Gaussian ordination.
-\emph{Ecological Monographs},
-\bold{74}, 685--701.
-
-
-%Yee, T. W. (2005)
-%On constrained and unconstrained quadratic ordination.
-%\emph{Manuscript in preparation}.
-
-
-Yee, T. W. (2006)
-Constrained additive ordination.
-\emph{Ecology}, \bold{87}, 203--213.
-
-
-}
-\author{Thomas W. Yee} 
-
-\note{
-
-  The site scores are centered.
-  When \eqn{R>1}, they are uncorrelated and should be unique up
-  to a rotation.
-
-
-The argument \code{Bestof} in \code{\link{uqo.control}} controls
-the number of models fitted (each uses different starting values) to
-the data. This argument is important because convergence may be to a
-\emph{local} solution rather than the \emph{global} solution. Using more
-starting values increases the chances of finding the global solution.
-Local solutions arise because the optimization problem is highly
-nonlinear.
-
-
-In the example below, a CQO model is fitted and used for providing
-initial values for a UQO model.
-
-
-}
-\section{Warning }{
-
-  Local solutions are not uncommon when fitting UQO models.  To increase
-  the chances of obtaining the global solution, set
-  \code{ITolerances = TRUE} or \code{EqualTolerances = TRUE} and increase
-  the value of the argument \code{Bestof} in \code{\link{uqo.control}}.
-  For reproducibility of the results, it pays to set a different random
-  number seed before calling \code{uqo} (the function
-  \code{\link[base:Random]{set.seed}} does this).
-
-
-The function \code{uqo} is very sensitive to initial values, and there
-is a lot of room for improvement here.
-
-
-UQO is computationally expensive.  It pays to keep the rank to no more
-than 2, and 1 is much preferred over 2.
-The data needs to conform closely to the statistical model.
-
-
-Currently there is a bug with the argument \code{Crow1positive}
-in \code{\link{uqo.control}}. This argument might be interpreted
-as controlling the sign of the first site score, but currently
-this is not done.
-
-
-}
-
-\seealso{
-  \code{\link{uqo.control}},
-  \code{\link{cqo}},
-  \code{\link{qrrvglm.control}},
-  \code{\link{rcqo}},
-% \code{\link{cao}},
-\code{\link{poissonff}},
-\code{\link{binomialff}},
-  \code{Coef.uqo},
-  \code{lvplot.uqo},
-  \code{persp.uqo},
-  \code{trplot.uqo},
-  \code{vcov.uqo},
-  \code{\link[base:Random]{set.seed}},
-  \code{\link{hspider}}.
-
-
-}
-\examples{ \dontrun{ set.seed(123) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
-p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
-                Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
-                Trocterr, Zoraspin) ~
-          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-          ITolerances = TRUE, fam = poissonff, data = hspider, 
-          Crow1positive = TRUE, Bestof=3, trace = FALSE)
-if (deviance(p1) > 1589.0) stop("suboptimal fit obtained")
-
-set.seed(111)
-up1 <- uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
-                Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
-                Trocterr, Zoraspin) ~ 1,
-          family = poissonff, data = hspider,
-          ITolerances = TRUE,
-          Crow1positive = TRUE, lvstart = -lv(p1))
-if (deviance(up1) > 1310.0) stop("suboptimal fit obtained")
-
-nos <- ncol(depvar(up1))  # Number of species
-clr <- (1:(nos+1))[-7] # Omit yellow
-lvplot(up1, las = 1, y = TRUE, pch = 1:nos, scol = clr, lcol = clr, 
-       pcol = clr, llty = 1:nos, llwd = 2)
-legend(x = 2, y = 135, colnames(up1 at y), col = clr, lty = 1:nos,
-       lwd = 2, merge = FALSE, ncol = 1, x.inter = 4.0, bty = "l", cex = 0.9)
-
-# Compare the site scores between the two models
-plot(lv(p1), lv(up1), xlim = c(-3, 4), ylim = c(-3, 4), las = 1)
-abline(a = 0, b = -1, lty = 2, col = "blue", xpd = FALSE)
-cor(lv(p1, ITol = TRUE), lv(up1))
-
-# Another comparison between the constrained and unconstrained models
-# The signs are not right so they are similar when reflected about 0 
-par(mfrow = c(2, 1))
-persp(up1, main = "Red/Blue are the constrained/unconstrained models",
-      label = TRUE, col = "blue", las = 1)
-persp(p1, add = FALSE, col = "red")
-pchisq(deviance(p1) - deviance(up1), df = 52-30, lower.tail = FALSE)
-}}
-\keyword{models}
-\keyword{regression}
-
-% 6/10/06; when the bug is fixed:
-%persp(p1, add = TRUE, col = "red")
-
-
diff --git a/man/uqo.control.Rd b/man/uqo.control.Rd
deleted file mode 100644
index 127d2e1..0000000
--- a/man/uqo.control.Rd
+++ /dev/null
@@ -1,303 +0,0 @@
-\name{uqo.control}
-\alias{uqo.control}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Control Function for UQO models }
-\description{
-  Algorithmic constants and parameters for an
-  unconstrained quadratic ordination (UQO) model, by fitting a
-  \emph{quadratic unconstrained vector generalized additive model}
-  (QU-VGLM), are set using this function.
-  It is the control function of \code{\link{uqo}}.
-  
-}
-\usage{
-uqo.control(Rank=1, Bestof = if (length(lvstart) &&
-            !jitter.sitescores) 1 else 10, CA1 = FALSE, Crow1positive
-            = TRUE, epsilon = 1.0e-07, EqualTolerances = ITolerances,
-            Etamat.colmax = 10, GradientFunction=TRUE, Hstep = 0.001,
-            isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
-            ITolerances = FALSE, lvstart = NULL, jitter.sitescores
-            = FALSE, maxitl = 40, Maxit.optim = 250, MUXfactor =
-            rep(3, length=Rank), optim.maxit = 20, nRmax = 250,
-            SD.sitescores = 1.0, SmallNo = 5.0e-13, trace = TRUE,
-            Use.Init.Poisson.QO=TRUE, ...)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{Rank}{ The numerical rank \eqn{R} of the model,
-    i.e., the number of latent variables or ordination axes.
-    Currently only \eqn{R=1} is recommended.
-
-
-  }
-  \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is
-    returned. This argument helps guard against local solutions by
-    (hopefully) finding the global solution from many fits.
-    The argument has value 1 if an initial value for the site scores is
-    inputted using \code{lvstart}.
-
-
-  }
-  \item{CA1}{ 
-    Logical. If \code{TRUE} the site scores from a correspondence analysis
-    (CA) are computed and used on the first axis as initial values.
-    Both \code{CA1} and \code{Use.Init.Poisson.QO} cannot both be
-    \code{TRUE}.
-
-
-  }
-  \item{Crow1positive}{ 
-    Logical vector of length \code{Rank} (recycled if necessary):
-    are the elements of the first row of the latent variable matrix
-    \eqn{\nu}{nu} positive?
-      For example, if \code{Rank} is 2, then specifying
-      \code{Crow1positive=c(FALSE, TRUE)} will force the first
-      site score's first element to be negative, and the first site
-      score's second element to be positive.  Note that there is no
-      \eqn{C} matrix with UQO, but the argument's name comes from
-      \code{\link{qrrvglm.control}} and is left unchanged for convenience.
-
-
-  }
-    \item{epsilon}{
-      Positive numeric. Used to test for convergence for GLMs fitted
-      in FORTRAN.  Larger values mean a loosening of the convergence
-      criterion.
-
-
-    }
-    \item{EqualTolerances}{
-      Logical indicating whether each (quadratic) predictor will have
-      equal tolerances. Setting \code{EqualTolerances=TRUE} can
-      help avoid numerical problems, especially with binary data.
-      Note that the estimated (common) tolerance matrix may or may not be
-      positive-definite. If it is, then it can be scaled to the \eqn{R}
-      x \eqn{R} identity matrix.  Setting \code{ITolerances=TRUE} will
-      fit a common \eqn{R} x \eqn{R} identity matrix as the tolerance
-      matrix to the data, but this is model-driven rather than being
-      data-driven because it \emph{forces} bell-shaped curves/surfaces
-      onto the data.  If the estimated (common) tolerance matrix happens
-      to be positive-definite, then this model is essentially equivalent
-      to the model with \code{ITolerances=TRUE}.
-      See \bold{Details} in \code{\link{cqo}} and \code{\link{qrrvglm.control}}
-      for more details.
-
-
-    }
-  \item{Etamat.colmax}{
-    Positive integer, no smaller than \code{Rank}.  Controls the amount
-    of memory used by \code{.Init.Poisson.QO()}.  It is the maximum
-    number of columns allowed for the pseudo-response and its weights.
-    In general, the larger the value, the better the initial value.
-    Used only if \code{Use.Init.Poisson.QO=TRUE}.
-
-
-  }
-
-  \item{GradientFunction}{ 
-   Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr} is
-   used or not, i.e., to compute gradient values.  The default value is
-   usually faster on most problems.
-
-
-  }
-  \item{Hstep}{ 
-   Positive value. Used as the step size in the finite difference
-   approximation to the derivatives by \code{\link[stats]{optim}}.
-
-
-  }
-  \item{isdlv}{
-   Initial standard deviations for the latent variables (site scores).
-   Numeric, positive and of length \eqn{R} (recycled if necessary).
-   This argument is used only if \code{ITolerances=TRUE}.  Used by
-   \code{.Init.Poisson.QO()} to obtain initial values for the constrained
-   coefficients \eqn{C} adjusted to a reasonable value. It adjusts the
-   spread of the site scores relative to a common species tolerance of 1
-   for each ordination axis.  A value between 0.5 and 10 is recommended;
-   a value such as 10 means that the range of the environmental space is
-   very large relative to the niche width of the species.  The successive
-   values should decrease because the first ordination axis should have
-   the most spread of site scores, followed by the second ordination
-   axis, etc.
-
-
- }
-  \item{ITolerances}{
-   Logical. If \code{TRUE} then the (common) tolerance matrix is
-   the \eqn{R} x \eqn{R} identity matrix by definition.  Note that
-   \code{ITolerances=TRUE} implies \code{EqualTolerances=TRUE}, but
-   not vice versa.  Internally, the quadratic terms will be treated
-   as offsets (in GLM jargon) and so the models can potentially be
-   fitted very efficiently. 
-   See \bold{Details} in \code{\link{cqo}} and \code{\link{qrrvglm.control}}
-   for more details.
-   more details.  The success of \code{ITolerances=TRUE} often depends
-   on suitable values for \code{isdlv} and/or \code{MUXfactor}.
-
- }
- \item{lvstart}{ 
-   Optional matrix of initial values of the site scores. If given, the
-   matrix must be \eqn{n} by \eqn{R}, where \eqn{n} is the number
-   of sites and \eqn{R} is the rank.  This argument overrides the
-   arguments \code{Use.Init.Poisson.QO} and \code{CA1}.
-   Good possibilities for \code{lvstart} are the site scores from a
-   constrained ordination, e.g., from \code{\link{cqo}}.
-
-
-  }
-  \item{jitter.sitescores}{ Logical.
-   If \code{TRUE} the initial values for the site scores are jittered
-   to add a random element to the starting values.
-
-
-  }
-
-  \item{maxitl}{ 
-    Positive integer.  Number of iterations allowed for the IRLS algorithm
-    implemented in the compiled code.
-
-
-    }
-  \item{Maxit.optim}{ 
-    Positive integer.  Number of iterations given to the function
-    \code{\link[stats]{optim}} at each of the \code{optim.maxit}
-    iterations.
-
-  }
-  \item{MUXfactor}{
-   Multiplication factor for detecting large offset values.  Numeric,
-   positive and of length \eqn{R} (recycled if necessary).  This argument
-   is used only if \code{ITolerances=TRUE}. Offsets are \eqn{-0.5}
-   multiplied by the sum of the squares of all \eqn{R} latent variable
-   values. If the latent variable values are too large then this will
-   result in numerical problems. By too large, it is meant that the
-   standard deviation of the latent variable values are greater than
-   \code{MUXfactor[r] * isdlv[r]} for \code{r=1:Rank} (this is why
-   centering and scaling all the numerical predictor variables in
-   \eqn{x_2} is recommended).  A value about 3 or 4 is recommended.
-   If failure to converge occurs, try a slightly lower value.
-
-
-}
-  \item{optim.maxit}{ 
-    Positive integer.  Number of times \code{\link[stats]{optim}}
-    is invoked.
-
-
-%   At iteration \code{i}, the \code{i}th value of \code{Maxit.optim}
-%   is fed into \code{\link[stats]{optim}}.
-
-
-  }
-  \item{nRmax}{ 
-    Positive integer.  If the number of parameters making up the latent
-    variable values (\eqn{n} multiplied by \eqn{R}) is greater than this
-    argument then a conjugate-gradients algorithm is used, otherwise a
-    quasi-Newton algorithm is used by \code{\link[stats]{optim}}. The
-    conjugate-gradients method is more suitable when the number of
-    parameters is large because it requires less memory.
-
-
-    }
-  \item{SD.sitescores}{ Numeric. Standard deviation of the
-    initial values of the site scores, which are generated from
-    a normal distribution.
-
-
-    }
-% \item{Dzero}{ Integer vector specifying which squared terms
-%     are to be zeroed. These linear predictors will correspond to
-%     a RR-VGLM.
-%     The values must be elements from the set \{1,2,\ldots,\eqn{M}\}.
-%     Used only if \code{Quadratic=TRUE} and \code{FastAlgorithm=FALSE}.
-% }
-  \item{SmallNo}{ Positive numeric between \code{.Machine$double.eps} and
-      \code{0.0001}.
-      Used to avoid under- or over-flow in the IRLS algorithm.
-
-
-  }
-  \item{trace}{ Logical indicating if output should be produced for
-    each iteration.
-
-
-  }
-%  \item{Kinit}{ Initial values for the index parameters \code{k} in the
-%   negative binomial distribution (one per species).
-%   In general, a smaller number is preferred over a larger number.
-%   The vector is recycled to the number of responses (species).
-% }
-
-  \item{Use.Init.Poisson.QO}{
-    Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO()} is
-    used to obtain initial values for the site scores.  If \code{FALSE}
-    then random numbers are used instead.  Both \code{CA1} and
-    \code{Use.Init.Poisson.QO} cannot both be \code{TRUE}.
-
-
-  }
-  \item{\dots}{ Ignored at present. }
-}
-\details{
-   The algorithm currently used by \code{\link{uqo}} is unsophisticated
-   and fails often. Improvements will hopefully be made soon.
-
-
-   See \code{\link{cqo}} and \code{\link{qrrvglm.control}} for more details
-   that are equally pertinent to UQO.
-
-
-% zz site scores are centered. Possibly uncorrelated too?
-
-   To reduce the number of parameters being estimated, setting
-   \code{ITolerances = TRUE} or \code{EqualTolerances = TRUE} is advised.
-
-
-}
-\value{
-  A list with the components corresponding to its arguments, after
-  some basic error checking.
-
-
-}
-\references{
-
-
-Yee, T. W. (2006)
-Constrained additive ordination.
-\emph{Ecology}, \bold{87}, 203--213.
-
-
-%Yee, T. W. (2012)
-%On constrained and unconstrained quadratic ordination.
-%\emph{Manuscript in preparation}.
-
-
-}
-\author{T. W. Yee}
-\note{
-  This is a difficult optimization problem, and the current
-  algorithm needs to be improved.
-
-
-}
-\seealso{
-  \code{\link{uqo}}.
-
-
-}
-
-\section{Warning }{
-  This function is currently very sensitive to initial values. Setting
-  \code{Bestof} some reasonably large integer is recommended.
-
-
-}
-
-\examples{
-uqo.control()
-}
-\keyword{models}
-\keyword{regression}
-
diff --git a/man/venice.Rd b/man/venice.Rd
index 8c0c0ee..23be5e5 100644
--- a/man/venice.Rd
+++ b/man/venice.Rd
@@ -138,7 +138,7 @@ qtplot(fit1, mpv = TRUE, lcol = c(1, 2, 5), tcol = c(1, 2, 5),
 plot(sealevel ~ Year, data = venice90, type = "h", col = "blue")
 summary(venice90)
 dim(venice90)
-round(100 * nrow(venice90) / ((2009 - 1940 + 1) * 365.26 * 24), dig = 3)
+round(100 * nrow(venice90) / ((2009 - 1940 + 1) * 365.26 * 24), digits = 3)
 }
 }
 \keyword{datasets}
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
index ce769b1..e083016 100644
--- a/man/vgam-class.Rd
+++ b/man/vgam-class.Rd
@@ -132,7 +132,7 @@ Numerical rank of the fitted model.
  from class \code{ "vlm"}.
 The \emph{working} residuals at the final IRLS iteration.
  }
-    \item{\code{rss}:}{Object of class \code{"numeric"},
+    \item{\code{res.ss}:}{Object of class \code{"numeric"},
  from class \code{ "vlm"}.
 Residual sum of squares at the final IRLS iteration with
 the adjusted dependent vectors and weight matrices.
@@ -248,7 +248,7 @@ Vector generalized additive models.
 # Fit a nonparametric proportional odds model
 pneumo <- transform(pneumo, let = log(exposure.time))
 vgam(cbind(normal, mild, severe) ~ s(let),
-     cumulative(parallel = TRUE), pneumo)
+     cumulative(parallel = TRUE), data = pneumo)
 }
 \keyword{classes}
 \keyword{models}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 9e10fce..02d0ea3 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -15,7 +15,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
      coefstart = NULL, control = vgam.control(...), offset = NULL, 
      method = "vgam.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, 
      contrasts = NULL, constraints = NULL, 
-     extra = list(), qr.arg = FALSE, smart = TRUE, ...)
+     extra = list(), form2 = NULL, qr.arg = FALSE, smart = TRUE, ...)
 }
 %- maybe also `usage' for other objects documented here.
 \arguments{
@@ -80,7 +80,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
   }
-  \item{contrasts, extra, qr.arg, smart}{
+  \item{contrasts, extra, form2, qr.arg, smart}{
   Same as for \code{\link{vglm}}.
 
 
@@ -208,6 +208,11 @@ The \code{VGAM} Package.
 
 %~Make other sections like WARNING with \section{WARNING }{....} ~
 \section{WARNING}{
+  Currently \code{vgam} can only handle constraint matrices \code{cmat},
+  say, such that \code{crossprod(cmat)} is diagonal.
+  This is a bug that I will try to fix up soon.
+
+
   See warnings in \code{\link{vglm.control}}.
 
 
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
index 4625e5f..b50d27f 100644
--- a/man/vglm-class.Rd
+++ b/man/vglm-class.Rd
@@ -120,7 +120,7 @@ Numerical rank of the fitted model.
  from class \code{ "vlm"}.
 The \emph{working} residuals at the final IRLS iteration.
  }
-    \item{\code{rss}:}{Object of class \code{"numeric"},
+    \item{\code{res.ss}:}{Object of class \code{"numeric"},
  from class \code{ "vlm"}.
 Residual sum of squares at the final IRLS iteration with
 the adjusted dependent vectors and weight matrices.
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 1353841..fb4dedd 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -292,7 +292,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
   \item{R}{the \bold{R} matrix in the QR decomposition used in the fitting.}
   \item{rank}{numerical rank of the fitted model.}
   \item{residuals}{the \emph{working} residuals at the final IRLS iteration.}
-  \item{rss}{residual sum of squares at the final IRLS iteration with
+  \item{res.ss}{residual sum of squares at the final IRLS iteration with
   the adjusted dependent vectors and weight matrices.}
   \item{smart.prediction}{
   a list of data-dependent parameters (if any)
@@ -435,14 +435,14 @@ vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
 fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
 coef(fit3, matrix = TRUE) 
 constraints(fit3)
-model.matrix(fit3, type = "lm") # LM model matrix
-model.matrix(fit3)              # Larger VGLM (or VLM) model matrix
+model.matrix(fit3, type = "lm")  # LM model matrix
+model.matrix(fit3)               # Larger VGLM (or VLM) model matrix
 
 
 # Example 4. Bivariate logistic model 
 fit4 <- vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
 coef(fit4, matrix = TRUE)
-depvar(fit4) # Response are proportions
+depvar(fit4)  # Response are proportions
 weights(fit4, type = "prior")
 
 
@@ -451,12 +451,12 @@ weights(fit4, type = "prior")
 nn <- 1000
 eyesdat <- round(data.frame(lop = runif(nn),
                             rop = runif(nn),
-                             op = runif(nn)), dig = 2)
+                             op = runif(nn)), digits = 2)
 eyesdat <- transform(eyesdat, eta1 = -1 + 2 * lop,
                               eta2 = -1 + 2 * lop)
 eyesdat <- transform(eyesdat,
-           leye = rbinom(nn, size = 1, prob = logit(eta1, inv = TRUE)),
-           reye = rbinom(nn, size = 1, prob = logit(eta2, inv = TRUE)))
+           leye = rbinom(nn, size = 1, prob = logit(eta1, inverse = TRUE)),
+           reye = rbinom(nn, size = 1, prob = logit(eta2, inverse = TRUE)))
 head(eyesdat)
 fit5 <- vglm(cbind(leye, reye) ~ op,
              binom2.or(exchangeable = TRUE, zero = 3),
@@ -498,7 +498,7 @@ constraints(fit5)
 %coef(fit6, matrix = TRUE)
 %head(predict(fit6))
 %\dontrun{
-%plotvgam(fit6, se = TRUE) # Wrong since it plots against op, not lop.
+%plotvgam(fit6, se = TRUE)  # Wrong since it plots against op, not lop.
 %}
 %
 %
@@ -510,7 +510,7 @@ constraints(fit5)
 %                   X2=runif(n), Z2=runif(n))
 %mydat <- round(mydat, dig=2)
 %fit7 <- vglm(ymat ~ X2 + Z2, data=mydat, crit="c",
-%           fam = dirichlet(parallel = TRUE), # Intercept is also parallel.
+%           fam = dirichlet(parallel = TRUE),  # Intercept is also parallel.
 %           xij = list(Z2 ~ z1 + z2 + z3 + z4,
 %                      X2 ~ x1 + x2 + x3 + x4),
 %           form2 =  ~ Z2 + z1 + z2 + z3 + z4 +
@@ -519,7 +519,7 @@ constraints(fit5)
 %head(model.matrix(fit7, type="vlm"))  # Big VLM model matrix
 %coef(fit7)
 %coef(fit7, matrix = TRUE)
-%max(abs(predict(fit7)-predict(fit7, new=mydat))) # Predicts correctly
+%max(abs(predict(fit7)-predict(fit7, new=mydat)))  # Predicts correctly
 %summary(fit7)
 
 
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index 4adb313..b0831b3 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -241,24 +241,24 @@ ymat <- rdiric(n <- 1000, shape = rep(exp(2), len = 4))
 mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n),
                     z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n))
 mydat <- transform(mydat, X = x1, Z = z1)
-mydat <- round(mydat, dig = 2)
+mydat <- round(mydat, digits = 2)
 fit2 <- vglm(ymat ~ X + Z,
              dirichlet(parallel = TRUE), data = mydat, trace = TRUE,
              xij = list(Z ~ z1 + z2 + z3 + z4,
                         X ~ x1 + x2 + x3 + x4),
              form2 = ~  Z + z1 + z2 + z3 + z4 +
                         X + x1 + x2 + x3 + x4)
-head(model.matrix(fit2, type =  "lm")) # LM model matrix
-head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
+head(model.matrix(fit2, type =  "lm"))  # LM model matrix
+head(model.matrix(fit2, type = "vlm"))  # Big VLM model matrix
 coef(fit2)
 coef(fit2, matrix = TRUE)
-max(abs(predict(fit2)-predict(fit2, new = mydat))) # Predicts correctly
+max(abs(predict(fit2)-predict(fit2, new = mydat)))  # Predicts correctly
 summary(fit2)
 \dontrun{
-# plotvgam(fit2, se = TRUE, xlab = "x1", which.term = 1) # Bug!
-# plotvgam(fit2, se = TRUE, xlab = "z1", which.term = 2) # Bug!
-plotvgam(fit2, xlab = "x1") # Correct
-plotvgam(fit2, xlab = "z1") # Correct
+# plotvgam(fit2, se = TRUE, xlab = "x1", which.term = 1)  # Bug!
+# plotvgam(fit2, se = TRUE, xlab = "z1", which.term = 2)  # Bug!
+plotvgam(fit2, xlab = "x1")  # Correct
+plotvgam(fit2, xlab = "z1")  # Correct
 }
 
 
@@ -266,10 +266,10 @@ plotvgam(fit2, xlab = "z1") # Correct
 set.seed(123)
 coalminers <- transform(coalminers,
                         Age = (age - 42) / 5,
-                        dum1 = round(runif(nrow(coalminers)), dig = 2),
-                        dum2 = round(runif(nrow(coalminers)), dig = 2),
-                        dum3 = round(runif(nrow(coalminers)), dig = 2),
-                        dumm = round(runif(nrow(coalminers)), dig = 2))
+                        dum1 = round(runif(nrow(coalminers)), digits = 2),
+                        dum2 = round(runif(nrow(coalminers)), digits = 2),
+                        dum3 = round(runif(nrow(coalminers)), digits = 2),
+                        dumm = round(runif(nrow(coalminers)), digits = 2))
 BS <- function(x, ..., df = 3) bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
 NS <- function(x, ..., df = 3) ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
 
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index d61db46..54dcc04 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -89,9 +89,9 @@ vonmises(llocation = elogit(min = 0, max = 2 * pi), lscale = "loge",
 }
 \references{ 
 
-Evans, M., Hastings, N. and Peacock, B. (2000)
+Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
-New York: Wiley-Interscience, Third edition.
+Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
@@ -125,12 +125,12 @@ New York: Wiley-Interscience, Third edition.
 }
 \examples{
 vdata <- data.frame(x2 = runif(nn <- 1000))
-vdata <- transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!!
+vdata <- transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2)))  # Bad data!!
 fit <- vglm(y  ~ x2, vonmises(zero = 2), vdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
-with(vdata, range(y)) # Original data
-range(depvar(fit))    # Processed data is in [0,2*pi)
+with(vdata, range(y))  # Original data
+range(depvar(fit))     # Processed data is in [0,2*pi)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
index 00a0b38..9380a58 100644
--- a/man/vsmooth.spline.Rd
+++ b/man/vsmooth.spline.Rd
@@ -178,6 +178,15 @@ Heidelberg: Physica-Verlag.
 
 }
 
+%~Make other sections like WARNING with \section{WARNING }{....} ~
+\section{WARNING}{
+  See \code{\link{vgam}} for information about an important bug.
+
+
+}
+
+
+
 \seealso{
 \code{vsmooth.spline-class},
 \code{plot.vsmooth.spline},
@@ -209,11 +218,11 @@ mycols <- c("orange", "blue", "orange")
 \dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) }
 
 p <- predict(fit, x = model.matrix(fit, type = "lm"), deriv = 0)
-max(abs(depvar(fit) - with(p, y))) # Should be 0; and fit at y is not good
+max(abs(depvar(fit) - with(p, y)))  # Should be 0; and fit at y is not good
 
 par(mfrow = c(3, 1))
 ux <- seq(1, 8, len = 100)
-for(dd in 1:3) {
+for (dd in 1:3) {
   pp <- predict(fit, x = ux, deriv = dd)
 \dontrun{with(pp, matplot(x, y, type = "l", main = paste("deriv =", dd),
                           lwd = 2, ylab = "", cex.axis = 1.5,
diff --git a/man/wald.Rd b/man/waldff.Rd
similarity index 86%
rename from man/wald.Rd
rename to man/waldff.Rd
index 1ccc35a..d07f8b8 100644
--- a/man/wald.Rd
+++ b/man/waldff.Rd
@@ -1,14 +1,15 @@
-\name{wald}
-\alias{wald}
+\name{waldff}
+\alias{waldff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Wald Distribution Family Function }
 \description{
 Estimates the parameter of the standard Wald distribution
 by maximum likelihood estimation.
 
+
 }
 \usage{
-wald(link.lambda = "loge", init.lambda = NULL)
+waldff(link.lambda = "loge", init.lambda = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,11 +17,13 @@ wald(link.lambda = "loge", init.lambda = NULL)
   Parameter link function for the \eqn{\lambda}{lambda} parameter. 
   See \code{\link{Links}} for more choices and general information.
 
+
   }
   \item{init.lambda}{
   Initial value for the \eqn{\lambda}{lambda} parameter.
   The default means an initial value is chosen internally.
 
+
   }
 }
 \details{
@@ -48,6 +51,7 @@ wald(link.lambda = "loge", init.lambda = NULL)
 
 }
 \references{ 
+
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
 2nd edition,
@@ -66,13 +70,14 @@ New York: Wiley.
 
 
 \seealso{ 
-  \code{\link{inv.gaussianff}}.
+  \code{\link{inv.gaussianff}},
+  \code{\link{rinv.gaussian}}.
 
 
 }
 \examples{
-wdata <- data.frame(y = rgamma(n = 1000, shape = 1)) # Not inverse Gaussian!!
-fit <- vglm(y ~ 1, wald(init = 0.2), wdata, trace = TRUE)
+wdata <- data.frame(y = rinv.gaussian(n = 1000, mu =  1, lambda = exp(1)))
+fit <- vglm(y ~ 1, waldff(init = 0.2), wdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/weibull.Rd b/man/weibull.Rd
index b4fe17c..9a60082 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -214,7 +214,7 @@ Concerns about Maximum Likelihood Estimation for
 
 }
 \examples{
-wdata <- data.frame(x2 = runif(nn <- 1000)) # Complete data
+wdata <- data.frame(x2 = runif(nn <- 1000))  # Complete data
 wdata <- transform(wdata,
             y1 = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)),
             y2 = rweibull(nn, shape = exp(2 - x2), scale = exp( 1)))
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index 53b5fa8..0542b1a 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -120,9 +120,9 @@ weightsvglm(object, type = c("prior", "working"),
 \examples{
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit <- vglm(cbind(normal, mild, severe) ~ let,
-            cumulative(parallel = TRUE, reverse = TRUE), pneumo))
-depvar(fit) # These are sample proportions 
-weights(fit, type = "prior", matrix = FALSE) # Number of observations
+             cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+depvar(fit)  # These are sample proportions 
+weights(fit, type = "prior", matrix = FALSE)  # Number of observations
 
 # Look at the working residuals
 nn <- nrow(model.matrix(fit, type = "lm"))
@@ -132,11 +132,11 @@ temp <- weights(fit, type = "working", deriv = TRUE)
 wz <- m2adefault(temp$weights, M = M)  # In array format
 wzinv <- array(apply(wz, 3, solve), c(M, M, nn))
 wresid <- matrix(NA, nn, M)  # Working residuals 
-for(ii in 1:nn)
+for (ii in 1:nn)
   wresid[ii,] <- wzinv[, , ii, drop = TRUE] \%*\% temp$deriv[ii, ]
-max(abs(c(resid(fit, type = "work")) - c(wresid))) # Should be 0
+max(abs(c(resid(fit, type = "work")) - c(wresid)))  # Should be 0
 
-(zedd <- predict(fit) + wresid) # Adjusted dependent vector
+(zedd <- predict(fit) + wresid)  # Adjusted dependent vector
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/wffc.P2star.Rd b/man/wffc.P2star.Rd
deleted file mode 100644
index 1d49982..0000000
--- a/man/wffc.P2star.Rd
+++ /dev/null
@@ -1,102 +0,0 @@
-\name{wffc.points}
-\alias{wffc.P1}
-\alias{wffc.P1star}
-\alias{wffc.P2}
-\alias{wffc.P2star}
-\alias{wffc.P3}
-\alias{wffc.P3star}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Point System for the 2008 World Fly Fishing Championships }
-\description{
-  Point system for the 2008 World Fly Fishing Championships:
-  current and some proposals.
-
-}
-\usage{
-wffc.P1(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P2(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P3(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P1star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P2star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P3star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{length}{ Length of the fish, in meters. Numeric vector. }
-  \item{c1}{ Points added to each eligible fish. }
-  \item{min.eligible}{ The 2008 WFFC regulations stipulated that the
-  smallest eligible fish was 0.180 m, which is 180 mm. }
-  \item{ppm}{ Points per meter of length of the fish. }
-
-}
-\details{
-  The official website contains a document with the official rules and
-  regulations of the competition.
-  The function \code{wffc.P1()} implements the current WFFC point system,
-  and is `discrete' in that fish lengths are rounded up to the nearest
-  centimeter (provided it is greater or equal to \code{min.eligible} m).
-  \code{wffc.P1star()} is a `continuous' version of it.
-
-
-  The function \code{wffc.P2()} is a new proposal which
-  rewards catching bigger fish.
-  It is based on a quadratic polynomial.
-  \code{wffc.P2star()} is a `continuous' version of it.
-
-
-  The function \code{wffc.P3()} is another new proposal which
-  rewards catching bigger fish.
-  Named a \emph{cumulative linear proposal},
-  it adds \code{ppm} to each multiple of \code{min.eligible} of length.
-  One adds one lot of \code{c1} to each eligible fish.
-  \code{wffc.P3star()} is a `continuous' version of \code{wffc.P3()}.
-
-}
-\value{
-  A vector with the number of points.
-
-}
-
-\references{
-%  \url{http://www.2008worldflyfishingchamps.com}
-%  was the official 2008 website.
-%  \url{http://www.http://san2010.pl}
-%  was the official 2010 website.
-
-
-  Yee, T. W. (2013)
-  On strategies and issues raised by an analysis of
-  the 2008 World Fly Fishing Championships data.
-  \emph{In preparation}.
-
-
-
-}
-
-\author{ T. W. Yee. }
-\note{
-  \code{wffc.P2} and \code{wffc.P2star} may change in the future,
-  as well as possibly
-  \code{wffc.P3} and \code{wffc.P3star}.
-
-}
-\seealso{ \code{\link[VGAM]{wffc}}. }
-\examples{
-\dontrun{ fishlength <- seq(0.0, 0.72, by = 0.001)
-plot(fishlength, wffc.P2star(fishlength), type = "l", col = "blue",
-     las = 1, lty = "dashed", lwd = 2, las = 1, cex.main = 0.8,
-     xlab = "Fish length (m)", ylab = "Competition points",
-     main = "Current (red) and proposed (blue and green) WFFC point system")
-lines(fishlength, wffc.P1star(fishlength), type = "l", col = "red", lwd = 2)
-lines(fishlength, wffc.P3star(fishlength), type = "l", col = "darkgreen",
-      lwd = 2, lty = "dashed")
-abline(v = (1:4) * 0.18, lty = "dotted")
-abline(h = (1:9) * wffc.P1star(0.18), lty = "dotted") }
-
-# Successive slopes:
-(wffc.P1star((2:8)*0.18) - wffc.P1star((1:7)*0.18)) / (0.18 * 2000)
-(wffc.P3star((2:8)*0.18) - wffc.P3star((1:7)*0.18)) / (0.18 * 2000)
-}
-% Add one or more standard keywords, see file 'KEYWORDS' in the
-% R documentation directory.
-\keyword{ models }
diff --git a/man/wffc.Rd b/man/wffc.Rd
deleted file mode 100644
index f0083ab..0000000
--- a/man/wffc.Rd
+++ /dev/null
@@ -1,214 +0,0 @@
-\name{wffc}
-\alias{wffc}
-\docType{data}
-\title{ 2008 World Fly Fishing Championships Data}
-\description{
-  Capture records of the 2008 FIPS-MOUCHE
-  World Fly Fishing Championships held in Rotorua, New Zealand during
-  22--30 March 2008.
-
-}
-\usage{data(wffc)}
-\format{
-  A data frame with 4267 observations on the following 8 variables.
-  Each row is a recorded capture.
-  \describe{
-    \item{\code{length}}{a numeric vector; length of fish in mm.}
-    \item{\code{water}}{a factor with levels \code{Waihou},
-    \code{Waimakariri}, \code{Whanganui}, \code{Otamangakau}, \code{Rotoaira}.
-    These are known as Sectors IV, V, I, II, III respectively, and
-    are also represented by the variable \code{sector}.
-    }
-    \item{\code{session}}{a numeric vector; a value from the set 1,2,\ldots,6.
-    These are time ordered, and there were two sessions per competition day.}
-    \item{\code{sector}}{a numeric vector; a value from the set 1,2,\ldots,5.}
-    \item{\code{beatboat}}{a numeric vector; beat or boat number,
-    a value from the set 1,2,\ldots,19.}
-    \item{\code{comid}}{a numeric vector; the competitor's ID number.
-    Uniquely identifies each competitor.
-    These ID numbers actually correspond to their rankings
-    in the individual level. }
-    \item{\code{iname}}{a character vector; the individual competitor's name. }
-    \item{\code{country}}{a character vector;
-    what country the competitors represented.
-The countries represented were
-Australia (AUS),
-Canada (CAN),
-Croatia (CRO),
-Czech Republic (CZE),
-England (ENG),
-Finland (FIN),
-France (FRA),
-Holland (NED),
-Ireland (IRE),
-Italy (ITA),
-Japan (JPN),
-Malta (MAL),
-New Zealand (NZL),
-Poland (POL),
-Portugal (POR),
-South Africa (RSA),
-Slovakia (SVK),
-USA (USA),
-Wales (WAL).
-  }
-  }
-
-}
-\details{
-  Details may be obtained at Yee (2010) and Yee (2013).
-  Here is a brief summary.
-  The three competition days were 28--30 March.
-  Each session was fixed at 9.00am--12.00pm and 2.30--5.30pm daily.
-  One of the sessions was a rest session.
-  Each of 19 teams had 5 members, called A, B, C, D and E
-  (there was a composite team, actually).
-  The scoring system allocated 100 points to each eligible fish
-  (minimum length was 18 cm)
-  and 20 points for each cm of its length
-  (rounded up to the nearest centimeter).
-  Thus a 181mm or 190mm fish was worth 480 points.
-  Each river was divided into 19 contiguous downstream beats labelled
-  1,2,\ldots,19.
-  Each lake was fished by 9 boats, each with two competitors
-  except for one boat which only had one.
-  Each competitor was randomly assigned to a beat/boat.
-
-
-Competitors were ranked according to their placings at each sector-session
-combination, and then these placings were summed. Those with the minimum
-total placings were the winners, thus it was not necessarily those who had
-the maximum points who won. For example, in Session 1 at the Waihou River,
-each of the 19 competitors was ranked 1 (best) to 19 (worst) according
-to the point system. This is the ``placing'' for that session. These
-placings were added up over the 5 sessions to give the ``total placings''.
-
-
-All sectors have naturally wild Rainbow trout (\emph{Oncorhynchus mykiss})
-while Lake Otamangakau and the Whanganui River also holds Brown trout
-(\emph{Salmo trutta}). Only these two species were targetted.
-The species was not recorded electronically, however a post-analysis
-of the paper score sheets from the two lakes showed that, approximately,
-less than 5 percent were Brown trout.
-It may be safely assumed that all the Waihou and Waimakariri
-fish were Rainbow trout.
-The gender of the fish were also not recorded electronically, and
-anyway, distinguishing between male and female was very difficult
-for small fish.
-
-
-Although species and gender data were supposed to have been
-collected at the time of capture the quality of these variables 
-is rather poor and furthermore they were not recorded electronically.
-
-
-% 11 out of (11 + 210) were brown trout, in Otamangakau.
-%  52 were NAs.
-%
-%  3 out of ( 3 + 179) were brown trout, in Rotoaira.
-%  19 were NAs.
-
-
-  Note that some fish may have been caught more than once, hence
-  these data do not represent individual fish but rather recorded captures.
-
-
-  Note also that a few internal discrepancies may be found within
-  and between the data frames
-  \code{\link[VGAM]{wffc}},
-  \code{\link[VGAM]{wffc.nc}},
-  \code{\link[VGAM]{wffc.indiv}},
-  \code{\link[VGAM]{wffc.teams}}.
-  This is due to various reasons, such as
-  competitors being replaced by reserves when sick,
-  fish that were included or excluded upon the local judge's decision,
-  competitors who fished two hours instead of three by mistake, etc.
-  The data has already been cleaned of errors and internal inconsistencies
-  but a few may remain.
-
-
-}
-
-\seealso{
-  \code{\link[VGAM]{wffc.indiv}},
-  \code{\link[VGAM]{wffc.teams}},
-  \code{\link[VGAM]{wffc.nc}},
-  \code{\link[VGAM]{wffc.P1}}.
-
-}
-\source{
-  This data frame was adapted from the WFFC's spreadsheet.
-  Special thanks goes to 
-  Paul Dewar,
-  Jill Mandeno,
-  Ilkka Pirinen,
-  and the other members of the Organising Committee of the 28th FIPS-Mouche
-  World Fly Fishing Championships for access to the data.
-  The assistance and feedback of Colin Shepherd is gratefully
-  acknowledged.
-
-
-}
-\references{
-%  \url{http://www.2008worldflyfishingchamps.com}
-%  is the official website.
-
-
-  Yee, T. W. (2010)
-  VGLMs and VGAMs: an overview for applications in fisheries research.
-  \emph{Fisheries Research},
-  \bold{101}, 116--126.
-
-
-  Yee, T. W. (2013)
-  On strategies and issues raised by an analysis of
-  the 2008 World Fly Fishing Championships data.
-  \emph{In preparation}.
-  
-
-}
-\examples{
-summary(wffc)
-with(wffc, table(water, session))
-
-# Obtain some simple plots
-waihou <- subset(wffc, water == "Waihou")
-waimak <- subset(wffc, water == "Waimakariri")
-whang  <- subset(wffc, water == "Whanganui")
-otam   <- subset(wffc, water == "Otamangakau")
-roto   <- subset(wffc, water == "Rotoaira")
-minlength <- min(wffc[,"length"])
-maxlength <- max(wffc[,"length"])
-nwater <- c("Waihou" = nrow(waihou), "Waimakariri" = nrow(waimak),
-            "Whanganui" = nrow(whang), "Otamangakau" = nrow(otam),
-            "Rotoaira" = nrow(roto))
-\dontrun{
-par(mfrow = c(2,3), las = 1)
-# Overall distribution of length
-with(wffc, boxplot(length/10 ~ water, ylim = c(minlength, maxlength)/10,
-                   border = "blue", main = "Length (cm)", cex.axis = 0.5))
-
-# Overall distribution of LOG length
-with(wffc, boxplot(length/10 ~ water, ylim = c(minlength, maxlength)/10,
-                   border = "blue", log = "y", cex.axis = 0.5,
-                   main = "Length (cm) on a log scale"))
-
-# Overall distribution of number of captures
-pie(nwater, border = "blue", main = "Proportion of captures",
-    labels = names(nwater), density = 10, col = 1:length(nwater),
-    angle = 85+30* 1:length(nwater))
-
-# Overall distribution of number of captures
-with(wffc, barplot(nwater, main = "Number of captures", cex.names = 0.5,
-                   col = "lightblue"))
-
-# Overall distribution of proportion of number of captures
-with(wffc, barplot(nwater / sum(nwater), cex.names = 0.5, col = "lightblue",
-                   main = "Proportion of captures"))
-# An interesting lake
-with(roto, hist(length/10, xlab = "Fish length (cm)", col = "lightblue",
-                breaks = seq(18, 70, by = 3), prob = TRUE, ylim = c(0, 0.08),
-                border = "blue", ylab = "", main = "Lake Rotoaira", lwd = 2))
-}
-}
-\keyword{datasets}
diff --git a/man/wffc.indiv.Rd b/man/wffc.indiv.Rd
deleted file mode 100644
index dbd0958..0000000
--- a/man/wffc.indiv.Rd
+++ /dev/null
@@ -1,50 +0,0 @@
-\name{wffc.indiv}
-\alias{wffc.indiv}
-\docType{data}
-\title{ 2008 World Fly Fishing Championships (Individual results) Data}
-\description{
-  Individual competitors' results of the 2008 FIPS-MOUCHE
-  World Fly Fishing Championships held in Rotorua, New Zealand during
-  22--30 March 2008.
-
-}
-\usage{data(wffc.indiv)}
-\format{
-  A data frame with 99 observations on the following 8 variables.
-  Some of these variable are described in \code{\link[VGAM]{wffc}}.
-  \describe{
-    \item{\code{totalPlacings}}{a numeric vector; these are the summed
-    placings over the 5 sessions.}
-    \item{\code{points}}{a numeric vector.}
-    \item{\code{noofcaptures}}{a numeric vector.}
-    \item{\code{longest}}{a numeric vector.}
-    \item{\code{individual}}{a numeric vector; did the competitor
-    fish in a team or as an individual?
-   (one team was made of composite countries due to low numbers).}
-    \item{\code{country}}{a character vector.}
-    \item{\code{iname}}{a character vector.}
-    \item{\code{comid}}{a numeric vector.}
-  }
-}
-\details{
-  This data frame gives the individual results of the competition.
-  See also \code{\link[VGAM]{wffc}} and \code{\link[VGAM]{wffc.teams}} for more
-  details and links.
-
-
-}
-%\source{
-%  \url{http://www.2008worldflyfishingchamps.com/}.
-%}
-\references{
-  Yee, T. W. (2010)
-  VGLMs and VGAMs: an overview for applications in fisheries research.
-  \emph{Fisheries Research},
-  \bold{101}, 116--126.
-
-
-}
-\examples{
-summary(wffc.indiv)
-}
-\keyword{datasets}
diff --git a/man/wffc.nc.Rd b/man/wffc.nc.Rd
deleted file mode 100644
index 8c9895c..0000000
--- a/man/wffc.nc.Rd
+++ /dev/null
@@ -1,61 +0,0 @@
-\name{wffc.nc}
-\alias{wffc.nc}
-\docType{data}
-\title{ 2008 World Fly Fishing Championships (Number of captures) Data}
-\description{
-  Number of captures in the 2008 FIPS-MOUCHE
-  World Fly Fishing Championships held in Rotorua, New Zealand during
-  22--30 March 2008.
-
-}
-\usage{data(wffc.nc)}
-\format{
-  A data frame with 475 observations on the following 7 variables.
-  Most of these variable are described in \code{\link[VGAM]{wffc}}.
-  Each row is sorted by sector, session and beat.
-  \describe{
-    \item{\code{sector}}{a numeric vector.}
-    \item{\code{session}}{a numeric vector.}
-    \item{\code{beatboat}}{a numeric vector.}
-    \item{\code{numbers}}{a numeric vector.}
-    \item{\code{comid}}{a numeric vector.}
-    \item{\code{iname}}{a character vector.}
-    \item{\code{country}}{a character vector.}
-  }
-}
-\details{
-  This data frame was obtained by processing \code{\link[VGAM]{wffc}}.
-  The key variable is \code{numbers}, which is
-  sector-session-beat specific.
-
-
-  Note that some fish may have been caught more than once, hence
-  these data do not represent individual fish.
-
-
-}
-%\source{
-%  \url{http://www.2008worldflyfishingchamps.com/}.
-%}
-\references{
-  Yee, T. W. (2010)
-  VGLMs and VGAMs: an overview for applications in fisheries research.
-  \emph{Fisheries Research},
-  \bold{101}, 116--126.
-
-
-}
-
-\seealso{
-\code{\link[VGAM]{DeLury}}.
-
-}
-
-\examples{
-xtabs( ~ sector + session, wffc.nc)
-}
-\keyword{datasets}
-
-% with(wffc.nc, table(sector, session))
-
-
diff --git a/man/wffc.teams.Rd b/man/wffc.teams.Rd
deleted file mode 100644
index 50c15d0..0000000
--- a/man/wffc.teams.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-\name{wffc.teams}
-\alias{wffc.teams}
-\docType{data}
-\title{ 2008 World Fly Fishing Championships (Team results) Data}
-\description{
-  Team results of the 2008 FIPS-MOUCHE
-  World Fly Fishing Championships held in Rotorua, New Zealand during
-  22--30 March 2008.
-
-}
-\usage{data(wffc.teams)}
-\format{
-  A data frame with 18 observations on the following 5 variables.
-  Some of these variable are described in \code{\link[VGAM]{wffc}}.
-  \describe{
-    \item{\code{country}}{a character vector.}
-    \item{\code{totalPlacings}}{a numeric vector; these are the summed
-    placings over the 5 sessions and 5 team members. }
-    \item{\code{points}}{a numeric vector; see \code{\link[VGAM]{wffc}}.}
-    \item{\code{noofcaptures}}{a numeric vector.}
-    \item{\code{longestfish}}{a numeric vector.}
-  }
-}
-\details{
-  This data frame gives the team results of the competition.
-  See also \code{\link[VGAM]{wffc}} and \code{\link[VGAM]{wffc.indiv}} for more
-  details and links.
-
-
-}
-%\source{
-%  \url{http://www.2008worldflyfishingchamps.com/}.
-%}
-%\references{
-%
-%}
-\examples{
-wffc.teams
-}
-\keyword{datasets}
diff --git a/man/yeo.johnson.Rd b/man/yeo.johnson.Rd
index d49cec4..879319e 100644
--- a/man/yeo.johnson.Rd
+++ b/man/yeo.johnson.Rd
@@ -72,11 +72,11 @@ Quantile regression via vector generalized additive models.
 }
 \examples{
 y <- seq(-4, 4, len = (nn <- 200))
-ltry <- c(0, 0.5, 1, 1.5, 2) # Try these values of lambda
+ltry <- c(0, 0.5, 1, 1.5, 2)  # Try these values of lambda
 lltry <- length(ltry)
 psi <- matrix(as.numeric(NA), nn, lltry)
-for(ii in 1:lltry)
-  psi[,ii] <- yeo.johnson(y, lambda = ltry[ii])
+for (ii in 1:lltry)
+  psi[, ii] <- yeo.johnson(y, lambda = ltry[ii])
 
 \dontrun{
 matplot(y, psi, type = "l", ylim = c(-4, 4), lwd = 2, lty = 1:lltry,
diff --git a/man/yip88.Rd b/man/yip88.Rd
index ca52e03..e012d94 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -115,7 +115,7 @@ model.
 }
 
 \examples{
-phi <- 0.35; lambda <- 2 # Generate some artificial data
+phi <- 0.35; lambda <- 2  # Generate some artificial data
 y <- rzipois(n <- 1000, lambda, phi)
 table(y)
 
@@ -125,14 +125,14 @@ fit2 <- vglm(y ~ 1, yip88, trace = TRUE, crit = "coef")
 (true.mean <- (1-phi) * lambda)
 mean(y) 
 head(fitted(fit1))
-fit1 at misc$pstr0 # The estimate of phi
+fit1 at misc$pstr0  # The estimate of phi
 
 # Compare the ZIP with the positive Poisson distribution 
 pp <- vglm(y ~ 1, pospoisson, subset = y > 0, crit = "c")
 coef(pp)
 Coef(pp)
-coef(fit1) - coef(pp)           # Same 
-head(fitted(fit1) - fitted(pp)) # Different 
+coef(fit1) - coef(pp)            # Same 
+head(fitted(fit1) - fitted(pp))  # Different 
 
 # Another example (Angers and Biswas, 2003) ---------------------
 abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
@@ -140,11 +140,11 @@ abdata <- subset(abdata, w > 0)
 
 yy <- with(abdata, rep(y, w))
 fit3 <- vglm(yy ~ 1, yip88(n = length(yy)), subset = yy > 0)
-fit3 at misc$pstr0 # Estimate of phi (they get 0.5154 with SE 0.0707)
+fit3 at misc$pstr0  # Estimate of phi (they get 0.5154 with SE 0.0707)
 coef(fit3, matrix = TRUE)
-Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520)
+Coef(fit3)  # Estimate of lambda (they get 0.6997 with SE 0.1520)
 head(fitted(fit3))
-mean(yy) # Compare this with fitted(fit3)
+mean(yy)  # Compare this with fitted(fit3)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/zabinomUC.Rd b/man/zabinomUC.Rd
index 0bcf65c..7b9e571 100644
--- a/man/zabinomUC.Rd
+++ b/man/zabinomUC.Rd
@@ -51,7 +51,7 @@ rzabinom(n, size, prob, pobs0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index d893524..1eb190c 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -1,5 +1,6 @@
 \name{zabinomial}
 \alias{zabinomial}
+\alias{zabinomialff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Altered Binomial Distribution }
 \description{
@@ -9,9 +10,12 @@
 
 }
 \usage{
-zabinomial(lprob = "logit", lpobs0 = "logit",
-           iprob = NULL,    ipobs0 = NULL,
-           imethod = 1, zero = 2)
+zabinomial(lpobs0 = "logit", lprob = "logit",
+           type.fitted = c("mean", "pobs0"),
+           ipobs0 = NULL, iprob = NULL, imethod = 1, zero = NULL)
+zabinomialff(lprob = "logit", lonempobs0 = "logit",
+             type.fitted = c("mean", "pobs0", "onempobs0"),
+             iprob = NULL, ionempobs0 = NULL, imethod = 1, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -26,15 +30,33 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
     See \code{\link{Links}} for more choices.
 
   }
+
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
   \item{iprob, ipobs0}{ 
   See
   \code{\link{CommonVGAMffArguments}}.
 
+
   }
+  \item{lonempobs0, ionempobs0}{
+  Corresponding argument  for the other parameterization.
+  See details below.
+
+
+  }
+
+
   \item{imethod, zero}{
   See
   \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 
@@ -53,8 +75,27 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
 
 
   The input is currently a vector or one-column matrix.
-  Dy default, the two linear/additive
-  predictors are \eqn{(\log(p), logit(p_0))^T}{(log(prob), logit(pobs0))^T}.
+  By default, the two linear/additive
+  predictors for \code{zabinomial()}
+  are \eqn{(logit(p_0), \log(p))^T}{(logit(pobs0), log(prob))^T}.
+
+
+  The \pkg{VGAM} family function \code{zabinomialff()} has a few
+  changes compared to \code{zabinomial()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        binomial probability comes first;
+  (ii)  argument \code{onempobs0} is now 1 minus the probability of an observed 0,
+        i.e., the probability of the positive binomial distribution,
+        i.e., \code{onempobs0} is \code{1-pobs0};
+  (iii)  argument \code{zero} has a new default so that the \code{onempobs0}
+        is intercept-only by default.
+  Now \code{zabinomialff()} is generally recommended over
+  \code{zabinomial()}.
+  Both functions implement Fisher scoring and neither can handle
+  multiple responses.
+
+
 
 
 }
@@ -66,10 +107,12 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by 
   \deqn{\mu = (1-p_0) \mu_{b} / [1 - (1 - \mu_{b})^N]}{%
          mu = (1-pobs0) * mub / [1 - (1 - mub)^N]}
   where \eqn{\mu_{b}}{mub} is the usual binomial mean.
+  If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
+
 
 }
 %\references{
@@ -92,7 +135,7 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
   It is a conditional model, not a mixture model.
 
 
-  This family function effectively combines
+  These family functions effectively combine
   \code{\link{posbinomial}} and \code{\link{binomialff}} into
   one family function.
 
@@ -106,20 +149,19 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
   \code{\link[stats:Binomial]{dbinom}},
   \code{\link{CommonVGAMffArguments}}.
 
+
 }
 
 \examples{
 zdata <- data.frame(x2 = runif(nn <- 1000))
-zdata <- transform(zdata,
-                   size  = 10,
-                   prob  = logit(-2 + 3*x2, inverse = TRUE),
-                   pobs0 = logit(-1 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, size  = 10,
+                          prob  = logit(-2 + 3*x2, inverse = TRUE),
+                          pobs0 = logit(-1 + 2*x2, inverse = TRUE))
 zdata <- transform(zdata,
                    y1 = rzabinom(nn, size = size, prob = prob, pobs0 = pobs0))
 with(zdata, table(y1))
 
-fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL),
-            zdata, trace = TRUE)
+fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL), zdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 head(predict(fit))
diff --git a/man/zageomUC.Rd b/man/zageomUC.Rd
index 9f02f39..b47bf98 100644
--- a/man/zageomUC.Rd
+++ b/man/zageomUC.Rd
@@ -51,7 +51,7 @@ rzageom(n, prob, pobs0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index f795625..497961f 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -1,5 +1,6 @@
 \name{zageometric}
 \alias{zageometric}
+\alias{zageometricff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Altered Geometric Distribution }
 \description{
@@ -9,8 +10,12 @@
 
 }
 \usage{
-zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
-            ipobs0 = NULL,    iprob = NULL, zero = NULL)
+zageometric(lpobs0 = "logit", lprob = "logit",
+            type.fitted = c("mean", "pobs0", "onempobs0"),
+            imethod = 1, ipobs0 = NULL, iprob = NULL, zero = NULL)
+zageometricff(lprob = "logit", lonempobs0 = "logit",
+              type.fitted = c("mean", "pobs0", "onempobs0"),
+              imethod = 1, iprob = NULL, ionempobs0 = NULL, zero = -2)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -29,6 +34,14 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
 
   }
 
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+
 % \item{epobs0, eprob}{
 % List. Extra argument for the respective links.
 % See \code{earg} in \code{\link{Links}} for general information.
@@ -41,6 +54,15 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
     For multi-column responses, these are recycled sideways.
 
   }
+
+  \item{lonempobs0, ionempobs0}{
+  Corresponding argument  for the other parameterization.
+  See details below.
+
+
+  }
+
+
   \item{zero, imethod}{
   See
   \code{\link{CommonVGAMffArguments}}.
@@ -62,8 +84,26 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
 
 
   The input can be a matrix (multiple responses).
-  By default, the two linear/additive
-  predictors are \eqn{(\log(\phi), logit(p))^T}{(log(phi), logit(prob))^T}.
+  By default, the two linear/additive predictors
+  of \code{zageometric} 
+  are \eqn{(logit(\phi), logit(p))^T}{(logit(phi), logit(prob))^T}.
+
+
+  The \pkg{VGAM} family function \code{zageometricff()} has a few
+  changes compared to \code{zageometric()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        geometric probability comes first;
+  (ii)  argument \code{onempobs0} is now 1 minus the probability of an observed 0,
+        i.e., the probability of the positive geometric distribution,
+        i.e., \code{onempobs0} is \code{1-pobs0};
+  (iii) argument \code{zero} has a new default so that the \code{pobs0}
+        is intercept-only by default.
+  Now \code{zageometricff()} is generally recommended over
+  \code{zageometric()}.
+  Both functions implement Fisher scoring and can handle
+  multiple responses.
+
 
 
 }
@@ -75,9 +115,12 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by 
   \deqn{\mu = (1-\phi) / p.}{%
          mu = (1- phi) / p.}
+  If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
+
+
 
 }
 %\references{
@@ -117,7 +160,6 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
 
 \seealso{
   \code{\link{dzageom}},
-% \code{\link{posgeometric}},
   \code{\link{geometric}},
   \code{\link{zigeometric}},
   \code{\link[stats:Geometric]{dgeom}},
@@ -125,18 +167,18 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
 
 
 }
+% \code{\link{posgeometric}},
+
 
 \examples{
 zdata <- data.frame(x2 = runif(nn <- 1000))
-zdata <- transform(zdata,
-                   pobs0 = logit(-1 + 2*x2, inverse = TRUE),
-                   prob  = logit(-2 + 3*x2, inverse = TRUE))
-zdata <- transform(zdata,
-                   y1 = rzageom(nn, prob = prob, pobs0 = pobs0),
-                   y2 = rzageom(nn, prob = prob, pobs0 = pobs0))
+zdata <- transform(zdata, pobs0 = logit(-1 + 2*x2, inverse = TRUE),
+                          prob  = logit(-2 + 3*x2, inverse = TRUE))
+zdata <- transform(zdata, y1 = rzageom(nn, prob = prob, pobs0 = pobs0),
+                          y2 = rzageom(nn, prob = prob, pobs0 = pobs0))
 with(zdata, table(y1))
 
-fit <- vglm(cbind(y1, y2) ~ x2, zageometric, zdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, zageometric, data = zdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 head(predict(fit))
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index 25f1b03..ef24c66 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -51,7 +51,7 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
@@ -65,14 +65,14 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
 }
 \examples{
 munb <- 3; size <- 4; pobs0 <- 0.3; x <- (-1):7
-dzanegbin(x,    munb = munb, size = size, pobs0 = pobs0)
+dzanegbin(x, munb = munb, size = size, pobs0 = pobs0)
 table(rzanegbin(100, munb = munb, size = size, pobs0 = pobs0))
 
 \dontrun{ x <- 0:10
 barplot(rbind(dzanegbin(x, munb = munb, size = size, pobs0 = pobs0),
                 dnbinom(x, mu   = munb, size = size)),
-        beside = TRUE, col = c("blue","green"), cex.main = 0.7, las = 1,
-        ylab = "Probability",names.arg = as.character(x),
+        beside = TRUE, col = c("blue", "green"), cex.main = 0.7, las = 1,
+        ylab = "Probability", names.arg = as.character(x),
         main = paste("ZANB(munb = ", munb, ", size = ", size,",
                      pobs0 = ", pobs0, 
                    ") [blue] vs",  " NB(mu = ", munb, ", size = ", size,
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 8b1fd1a..37d78dd 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -1,5 +1,6 @@
 \name{zanegbinomial}
 \alias{zanegbinomial}
+\alias{zanegbinomialff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Altered Negative Binomial Distribution }
 \description{
@@ -10,9 +11,13 @@
 }
 \usage{
 zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
-              ipobs0 = NULL,                    isize = NULL,
-              zero = c(-1, -3), imethod = 1,
+              type.fitted = c("mean", "pobs0"),
+              ipobs0 = NULL, isize = NULL, zero = -3, imethod = 1,
               nsimEIM = 250, shrinkage.init = 0.95)
+zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
+                type.fitted = c("mean", "pobs0", "onempobs0"),
+                isize = NULL, ionempobs0 = NULL, zero = c(-2, -3),
+                imethod = 1, nsimEIM = 250, shrinkage.init = 0.95)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -35,6 +40,24 @@ zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
 
   }
 
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+
+  \item{lonempobs0, ionempobs0}{
+  Corresponding argument  for the other parameterization.
+  See details below.
+
+
+  }
+
+
+
+
 % \item{epobs0, emunb, esize}{
 % List. Extra argument for the respective links.
 % See \code{earg} in \code{\link{Links}} for general information.
@@ -43,25 +66,30 @@ zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
 
   \item{ipobs0, isize}{ 
     Optional initial values for \eqn{p_0}{pobs0} and \code{k}.
-    If given, it is okay to give one value
+    If given then it is okay to give one value
     for each response/species by inputting a vector whose length
     is the number of columns of the response matrix.
 
+
   }
   \item{zero}{ 
-    Integer valued vector, may be assigned, e.g., \eqn{-3} or \eqn{3} if
-    the probability of an observed value is to be modelled with the
-    covariates.
+%   Integer valued vector, may be assigned, e.g., \eqn{-3} or \eqn{3} if
+%   the probability of an observed value is to be modelled with the
+%   covariates.
     Specifies which of the three linear predictors are
-    modelled as an intercept only. By default, the \code{k} and \eqn{p_0}{pobs0}
-    parameters for each response are modelled as
-    single unknown numbers that are estimated.
+    modelled as an intercept only.
+%   By default, the \code{k} and \eqn{p_0}{pobs0}
+%   parameters for each response are modelled as
+%   single unknown numbers that are estimated.
     All parameters can be modelled as a
-    function of the explanatory variables by setting \code{zero = NULL}.
-    A negative value means that the value is recycled, so setting \eqn{-3}
-    means all \code{k} are intercept-only.
+    function of the explanatory variables by setting \code{zero = NULL}
+    (not recommended).
+    A negative value means that the value is recycled, e.g.,
+    setting \eqn{-3} means all \code{k} are intercept-only
+    for \code{zanegbinomial}.
     See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{nsimEIM, imethod}{
   See \code{\link{CommonVGAMffArguments}}.
@@ -88,10 +116,28 @@ zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
 
 
   For one response/species, by default, the three linear/additive
-  predictors are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(pobs0),
+  predictors
+  for \code{zanegbinomial()}
+  are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(pobs0),
   log(munb), log(k))^T}.  This vector is recycled for multiple species.
 
 
+  The \pkg{VGAM} family function \code{zanegbinomialff()} has a few
+  changes compared to \code{zanegbinomial()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        negative binomial mean comes first;
+  (ii)  argument \code{onempobs0} is now 1 minus the probability of an observed 0,
+        i.e., the probability of the positive negative binomial distribution,
+        i.e., \code{onempobs0} is \code{1-pobs0};
+  (iii) argument \code{zero} has a new default so that the \code{pobs0}
+        is intercept-only by default.
+  Now \code{zanegbinomialff()} is generally recommended over
+  \code{zanegbinomial()}.
+  Both functions implement Fisher scoring and can handle
+  multiple responses.
+
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -101,12 +147,16 @@ zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by 
   \deqn{\mu = (1-p_0) \mu_{nb} / [1 - (k/(k+\mu_{nb}))^k].}{%
          mu = (1-pobs0) * munb / [1 - (k/(k+munb))^k].}
+  If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
+
+
 
 }
 \references{
+
 Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
 D. B. (1996)
 Modelling the abundances of rare species: statistical models
@@ -116,6 +166,11 @@ for counts with extra zeros.
 297--308.
 
 
+  Yee, T. W. (2014)
+  Reduced-rank vector generalized linear models with two linear predictors.
+  \emph{Computational Statistics and Data Analysis}.
+
+
 }
 \section{Warning }{
   Convergence for this \pkg{VGAM} family function seems to depend quite
@@ -178,7 +233,7 @@ zdata <- transform(zdata,
 with(zdata, table(y1))
 with(zdata, table(y2))
 
-fit <- vglm(cbind(y1, y2) ~ x2, zanegbinomial, zdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, zanegbinomial, data = zdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 head(predict(fit))
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index 1c12c03..54b26bf 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -44,7 +44,7 @@ rzapois(n, lambda, pobs0 = 0)
   \code{rzapois} generates random deviates.
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
@@ -52,16 +52,18 @@ rzapois(n, lambda, pobs0 = 0)
 }
 
 \seealso{ 
-    \code{\link{zapoisson}}.
+    \code{\link{zapoisson}},
+    \code{\link{dzipois}}.
+
 
 }
 \examples{
 lambda <- 3; pobs0 <- 0.2; x <- (-1):7
 (ii <- dzapois(x, lambda, pobs0))
-max(abs(cumsum(ii) - pzapois(x, lambda, pobs0))) # Should be 0
+max(abs(cumsum(ii) - pzapois(x, lambda, pobs0)))  # Should be 0
 table(rzapois(100, lambda, pobs0))
 table(qzapois(runif(100), lambda, pobs0))
-round(dzapois(0:10, lambda, pobs0) * 100) # Should be similar
+round(dzapois(0:10, lambda, pobs0) * 100)  # Should be similar
 
 \dontrun{ x <- 0:10
 barplot(rbind(dzapois(x, lambda, pobs0), dpois(x, lambda)),
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index d1d19f5..3253f1d 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -1,5 +1,6 @@
 \name{zapoisson}
 \alias{zapoisson}
+\alias{zapoissonff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Altered Poisson Distribution }
 \description{
@@ -9,7 +10,10 @@
 
 }
 \usage{
-zapoisson(lpobs0 = "logit", llambda = "loge", zero = NULL)
+zapoisson(lpobs0 = "logit", llambda = "loge",
+          type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL)
+zapoissonff(llambda = "loge", lonempobs0 = "logit",
+            type.fitted = c("mean", "pobs0", "onempobs0"), zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -24,6 +28,20 @@ zapoisson(lpobs0 = "logit", llambda = "loge", zero = NULL)
 
   }
 
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+  \item{lonempobs0}{
+  Corresponding argument  for the other parameterization.
+  See details below.
+
+
+  }
+
 
 % \item{epobs0, elambda}{
 %         epobs0 = list(), elambda = list(),
@@ -63,9 +81,25 @@ zapoisson(lpobs0 = "logit", llambda = "loge", zero = NULL)
 
 
   For one response/species, by default, the two linear/additive
-  predictors are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(pobs0),
+  predictors for \code{zapoisson()}
+  are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(pobs0),
   log(lambda))^T}.
-  Fisher scoring is implemented.
+
+
+  The \pkg{VGAM} family function \code{zapoissonff()} has a few
+  changes compared to \code{zapoisson()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        Poisson mean comes first;
+  (ii)  argument \code{onempobs0} is now 1 minus the probability of an observed 0,
+        i.e., the probability of the positive Poisson distribution,
+        i.e., \code{onempobs0} is \code{1-pobs0};
+  (iii) argument \code{zero} has a new default so that the \code{onempobs0}
+        is intercept-only by default.
+  Now \code{zapoissonff()} is generally recommended over
+  \code{zapoisson()}.
+  Both functions implement Fisher scoring and can handle
+  multiple responses.
 
 
 }
@@ -77,13 +111,16 @@ zapoisson(lpobs0 = "logit", llambda = "loge", zero = NULL)
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted},
-  returns the mean \eqn{\mu}{mu} which is given by 
+  returns the mean \eqn{\mu}{mu} (default) which is given by 
   \deqn{\mu = (1-p_0)  \lambda / [1 - \exp(-\lambda)].}{%
          mu = (1-pobs0) * lambda / [1 - exp(-lambda)].}
+  If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
+
 
 
 }
 \references{
+
 Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and
 Lindenmayer, D. B. (1996)
 Modelling the abundances of rare species: statistical models
@@ -99,6 +136,12 @@ A Bayesian analysis of zero-inflated generalized Poisson model.
 \bold{42}, 37--46.
 
 
+  Yee, T. W. (2014)
+  Reduced-rank vector generalized linear models with two linear predictors.
+  \emph{Computational Statistics and Data Analysis}.
+
+
+
 Documentation accompanying the \pkg{VGAM} package at
 \url{http://www.stat.auckland.ac.nz/~yee}
 contains further information and examples.
@@ -156,14 +199,14 @@ contains further information and examples.
 }
 
 \examples{
-zapdata <- data.frame(x2 = runif(nn <- 1000))
-zapdata <- transform(zapdata, pobs0  = logit( -1 + 1*x2, inverse = TRUE),
-                              lambda = loge(-0.5 + 2*x2, inverse = TRUE))
-zapdata <- transform(zapdata, y = rzapois(nn, lambda, pobs0 = pobs0))
-
-with(zapdata, table(y))
-fit <- vglm(y ~ x2, zapoisson, zapdata, trace = TRUE)
-fit <- vglm(y ~ x2, zapoisson, zapdata, trace = TRUE, crit = "coef")
+zdata <- data.frame(x2 = runif(nn <- 1000))
+zdata <- transform(zdata, pobs0  = logit( -1 + 1*x2, inverse = TRUE),
+                          lambda = loge(-0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y = rzapois(nn, lambda, pobs0 = pobs0))
+
+with(zdata, table(y))
+fit <- vglm(y ~ x2, zapoisson, data = zdata, trace = TRUE)
+fit <- vglm(y ~ x2, zapoisson, data = zdata, trace = TRUE, crit = "coef")
 head(fitted(fit))
 head(predict(fit))
 head(predict(fit, untransform = TRUE))
@@ -178,9 +221,9 @@ abdata <- subset(abdata, w > 0)
 yy <- with(abdata, rep(y, w))
 fit3 <- vglm(yy ~ 1, zapoisson, trace = TRUE, crit = "coef")
 coef(fit3, matrix = TRUE)
-Coef(fit3) # Estimate lambda (they get 0.6997 with SE 0.1520)
+Coef(fit3)  # Estimate lambda (they get 0.6997 with SE 0.1520)
 head(fitted(fit3), 1)
-mean(yy) # compare this with fitted(fit3)
+mean(yy)  # Compare this with fitted(fit3)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index bd0d549..de5d05e 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -92,9 +92,9 @@ Boca Raton: Chapman & Hall/CRC Press.
 
 }
 \examples{
-zdata <- data.frame(y = 1:5, w =  c(63, 14, 5, 1, 2)) # Knight, p.304
+zdata <- data.frame(y = 1:5, w =  c(63, 14, 5, 1, 2))  # Knight, p.304
 fit <- vglm(y ~ 1, zetaff, zdata, trace = TRUE, weight = w, crit = "coef")
-(phat <- Coef(fit)) # 1.682557
+(phat <- Coef(fit))  # 1.682557
 with(zdata, cbind(round(dzeta(y, phat) * sum(w), 1), w))
 
 with(zdata, weighted.mean(y, w))
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index 4bdabe0..5de6b76 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -56,7 +56,7 @@ rzibinom(n, size, prob, pstr0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
   The argument \code{pstr0} is recycled to the required length,
   and must have values which lie in the interval \eqn{[0,1]}.
@@ -80,11 +80,11 @@ rzibinom(n, size, prob, pstr0 = 0)
 \examples{
 prob <- 0.2; size <- 10; pstr0 <- 0.5
 (ii <- dzibinom(0:size, size, prob, pstr0 = pstr0))
-max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0))) # Should be 0
+max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0)))  # Should be 0
 table(rzibinom(100, size, prob, pstr0 = pstr0))
 
 table(qzibinom(runif(100), size, prob, pstr0 = pstr0))
-round(dzibinom(0:10, size, prob, pstr0 = pstr0) * 100) # Should be similar
+round(dzibinom(0:10, size, prob, pstr0 = pstr0) * 100)  # Should be similar
 
 \dontrun{ x <- 0:size
 barplot(rbind(dzibinom(x, size, prob, pstr0 = pstr0),
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 4236bf6..673b417 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -1,5 +1,6 @@
 \name{zibinomial}
 \alias{zibinomial}
+\alias{zibinomialff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Inflated Binomial Distribution Family Function }
 \description{
@@ -9,11 +10,15 @@
 }
 \usage{
 zibinomial(lpstr0 = "logit", lprob = "logit",
-           ipstr0 = NULL, zero = 1, mv = FALSE, imethod = 1)
+           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           ipstr0 = NULL, zero = NULL, mv = FALSE, imethod = 1)
+zibinomialff(lprob = "logit", lonempstr0 = "logit",
+             type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+             ionempstr0 = NULL, zero = 2, mv = FALSE, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpstr0, lprob}{ 
+  \item{lpstr0, lprob}{
   Link functions for the parameter \eqn{\phi}{phi}
   and the usual binomial probability \eqn{\mu}{prob} parameter.
   See \code{\link{Links}} for more choices.
@@ -27,12 +32,27 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
 % See \code{earg} in \code{\link{Links}} for general information.
 % }
 
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}}.
+
+
+  }
+
   \item{ipstr0}{
   Optional initial values for \eqn{\phi}{phi}, whose values must lie
   between 0 and 1. The default is to compute an initial value internally.
   If a vector then recyling is used.
 
   }
+
+  \item{lonempstr0, ionempstr0}{
+  Corresponding arguments for the other parameterization.
+  See details below.
+
+
+  }
+
+
 % \item{zero}{ 
 % An integer specifying which linear/additive predictor is modelled
 % as intercepts only.  If given, the value must be either 1 or 2,
@@ -50,11 +70,13 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
   }
   \item{zero, imethod}{ 
   See \code{\link{CommonVGAMffArguments}} for information.
+  Argument \code{zero} has changed its default value for version 0.9-2.
+
 
   }
 }
 \details{
-  This function uses Fisher scoring and is based on
+  These functions are based on
   \deqn{P(Y=0) =  \phi + (1-\phi) (1-\mu)^N,}{%
         P(Y=0) =   phi + (1- phi) * (1-prob)^N,}
   for \eqn{y=0}, and 
@@ -66,11 +88,31 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
   The parameter \eqn{\phi}{phi} is the probability of a structural zero,
   and it satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
   The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) \mu}{E(Y) = (1-phi) * prob}
-  and these are returned as the fitted values.
+  and these are returned as the fitted values
+  by default.
   By default, the two linear/additive predictors
+  for \code{zibinomial()}
   are \eqn{(logit(\phi), logit(\mu))^T}{(logit(phi), logit(prob))^T}.
 
 
+
+  The \pkg{VGAM} family function \code{zibinomialff()} has a few
+  changes compared to \code{zibinomial()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        binomial probability comes first;
+  (ii)  argument \code{onempstr0} is now 1 minus
+        the probability of a structural zero, i.e.,
+        the probability of the parent (binomial) component,
+        i.e., \code{onempstr0} is \code{1-pstr0};
+  (iii) argument \code{zero} has a new default so that the \code{onempstr0}
+        is intercept-only by default.
+  Now \code{zibinomialff()} is generally recommended over
+  \code{zibinomial()}.
+  Both functions implement Fisher scoring.
+
+
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -79,9 +121,17 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
 
 
 }
-%\references{
+\references{
 
-%}
+
+Welsh, A. H., Lindenmayer, D. B. and Donnelly, C. F. (2013)
+Fitting and interpreting occupancy models.
+\emph{PLOS One},
+\bold{8},
+1--21.
+
+
+}
 
 \author{ T. W. Yee }
 \note{
@@ -130,7 +180,9 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
 \section{Warning }{
   Numerical problems can occur.
   Half-stepping is not uncommon.
-  If failure to converge occurs, make use of the argument \code{ipstr0}.
+  If failure to converge occurs, make use of the argument \code{ipstr0}
+  or \code{ionempstr0},
+  or \code{imethod}.
 
 
 } 
@@ -146,22 +198,23 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
 \examples{
 size <- 10  # Number of trials; N in the notation above
 nn <- 200
-zibdata <- data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
-                      mubin = logit(-1, inverse = TRUE), # Mean of usual binomial
-                      sv    = rep(size, length = nn))
-zibdata <- transform(zibdata, 
-                     y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
-with(zibdata, table(y))
-fit <- vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
-fit <- vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE, stepsize = 0.5)
+zdata <- data.frame(pstr0 = logit( 0, inverse = TRUE),  # 0.50
+                    mubin = logit(-1, inverse = TRUE),  # Mean of usual binomial
+                    sv    = rep(size, length = nn))
+zdata <- transform(zdata, 
+                   y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
+with(zdata, table(y))
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, zdata, trace = TRUE)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, zdata, trace = TRUE, stepsize = 0.5)
 
 coef(fit, matrix = TRUE)
 Coef(fit)  # Useful for intercept-only models
-fit at misc$pobs0  # Estimate of P(Y = 0)
+fitted(fit, type = "pobs0")  # Estimate of P(Y = 0)
 head(fitted(fit))
-with(zibdata, mean(y))  # Compare this with fitted(fit)
+with(zdata, mean(y))  # Compare this with fitted(fit)
 summary(fit)
 }
 \keyword{models}
 \keyword{regression}
 
+% fit at misc$pobs0  # Estimate of P(Y = 0)
diff --git a/man/zigeomUC.Rd b/man/zigeomUC.Rd
index cb49cfd..9a8dbba 100644
--- a/man/zigeomUC.Rd
+++ b/man/zigeomUC.Rd
@@ -51,7 +51,7 @@ rzigeom(n, prob, pstr0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
     The argument \code{pstr0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
@@ -74,7 +74,7 @@ rzigeom(n, prob, pstr0 = 0)
 \examples{
 prob <- 0.5; pstr0 <- 0.2; x <- (-1):20
 (ii <- dzigeom(x, prob, pstr0))
-max(abs(cumsum(ii) - pzigeom(x, prob, pstr0))) # Should be 0
+max(abs(cumsum(ii) - pzigeom(x, prob, pstr0)))  # Should be 0
 table(rzigeom(1000, prob, pstr0))
 
 \dontrun{ x <- 0:10
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 4909689..5cbf1c1 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -1,5 +1,6 @@
 \name{zigeometric}
 \alias{zigeometric}
+\alias{zigeometricff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Inflated Geometric Distribution Family Function }
 \description{
@@ -8,17 +9,24 @@
 
 }
 \usage{
-zigeometric(lprob = "logit", lpstr0  = "logit",
-            iprob = NULL, ipstr0  = NULL,
-            imethod = 1, bias.red = 0.5, zero = 2)
+zigeometric(lpstr0  = "logit", lprob = "logit",
+            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+            ipstr0  = NULL, iprob = NULL,
+            imethod = 1, bias.red = 0.5, zero = NULL)
+zigeometricff(lprob = "logit", lonempstr0 = "logit",
+              type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+              iprob = NULL, ionempstr0 = NULL,
+              imethod = 1, bias.red = 0.5, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lprob, lpstr0}{ 
-  Link functions for the parameters \eqn{p}{prob} (\code{prob})
-  and \eqn{\phi}{phi}.
-  The usual geometric probability parameter is the former.
-  The probability of a structural zero is the latter.
+  \item{lpstr0, lprob}{ 
+  Link functions for the parameters
+  \eqn{\phi}{phi}
+  and
+  \eqn{p}{prob} (\code{prob}).
+  The usual geometric probability parameter is the latter.
+  The probability of a structural zero is the former.
   See \code{\link{Links}} for more choices.
   For the zero-\emph{deflated} model see below.
 
@@ -29,22 +37,39 @@ zigeometric(lprob = "logit", lpstr0  = "logit",
 % See \code{earg} in \code{\link{Links}} for general information.
 % }
 
+
+  \item{lonempstr0, ionempstr0}{
+  Corresponding arguments for the other parameterization.
+  See details below.
+
+
+  }
+
   \item{bias.red}{
   A constant used in the initialization process of \code{pstr0}.
   It should lie between 0 and 1, with 1 having no effect.
 
+
   }
-  \item{iprob, ipstr0}{
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+  \item{ipstr0, iprob}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
+
   }
   \item{zero, imethod}{ 
   See \code{\link{CommonVGAMffArguments}} for information.
 
+
   }
 }
 \details{
-  This function uses Fisher scoring and is based on
+  Function \code{zigeometric()} is based on
   \deqn{P(Y=0) =  \phi + (1-\phi) p,}{%
         P(Y=0) =  phi + (1-phi) * prob,}
   for \eqn{y=0}, and 
@@ -53,15 +78,34 @@ zigeometric(lprob = "logit", lpstr0  = "logit",
   for \eqn{y=1,2,\ldots}.
   The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 <
   phi < 1}.  The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) p / (1-p)}{E(Y)
-  = (1-phi) * prob / (1-prob)} and these are returned as the fitted values.
-  By default, the two linear/additive predictors are \eqn{(logit(p),
-  logit(\phi))^T}{(logit(prob), logit(phi))^T}.
+  = (1-phi) * prob / (1-prob)} and these are returned as the fitted values
+  by default.
+  By default, the two linear/additive predictors
+  are \eqn{(logit(\phi), logit(p))^T}{(logit(phi), logit(prob))^T}.
   Multiple responses are handled.
 
 
 % 20130316:
   Estimated probabilities of a structural zero and an
-  observed zero are returned as in \code{\link{zipoisson}}.
+  observed zero can be returned, as in \code{\link{zipoisson}};
+  see \code{\link{fittedvlm}} for information.
+
+
+  The \pkg{VGAM} family function \code{zigeometricff()} has a few
+  changes compared to \code{zigeometric()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        geometric probability comes first;
+  (ii)  argument \code{onempstr0} is now 1 minus
+        the probability of a structural zero, i.e.,
+        the probability of the parent (geometric) component,
+        i.e., \code{onempstr0} is \code{1-pstr0};
+  (iii) argument \code{zero} has a new default so that the \code{onempstr0}
+        is intercept-only  by default.
+  Now \code{zigeometricff()} is generally recommended over
+  \code{zigeometric()}.
+  Both functions implement Fisher scoring and can handle
+  multiple responses.
 
 
 }
@@ -124,10 +168,11 @@ with(gdata, table(y2))
 with(gdata, table(y3))
 head(gdata)
 
-fit1 <- vglm(y1 ~ x2 + x3 + x4, zigeometric, gdata, trace = TRUE)
+fit1 <- vglm(y1 ~ x2 + x3 + x4, zigeometric(zero = 1), data = gdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
+head(fitted(fit1, type = "pstr0"))
 
-fit2 <- vglm(cbind(y2, y3) ~ 1, zigeometric, gdata, trace = TRUE)
+fit2 <- vglm(cbind(y2, y3) ~ 1, zigeometric(zero = 1), data = gdata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 summary(fit2)
 }
diff --git a/man/zinegbinUC.Rd b/man/zinegbinUC.Rd
index 1c3e37a..470f71e 100644
--- a/man/zinegbinUC.Rd
+++ b/man/zinegbinUC.Rd
@@ -63,7 +63,7 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
   The argument \code{pstr0} is recycled to the required
   length, and must have values which lie in the interval
@@ -89,11 +89,11 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
 \examples{
 munb <- 3; pstr0 <- 0.2; size <- k <- 10; x <- 0:10
 (ii <- dzinegbin(x, pstr0 = pstr0, mu = munb, size = k))
-max(abs(cumsum(ii) - pzinegbin(x, pstr0 = pstr0, mu = munb, size = k))) # 0
+max(abs(cumsum(ii) - pzinegbin(x, pstr0 = pstr0, mu = munb, size = k)))  # 0
 table(rzinegbin(100, pstr0 = pstr0, mu = munb, size = k))
 
 table(qzinegbin(runif(1000), pstr0 = pstr0, mu = munb, size = k))
-round(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k) * 1000) # Should be similar
+round(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k) * 1000)  # Should be similar
 
 \dontrun{barplot(rbind(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k),
                 dnbinom(x, mu = munb, size = k)), las = 1,
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 30dee03..05df6ff 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -1,5 +1,6 @@
 \name{zinegbinomial}
 \alias{zinegbinomial}
+\alias{zinegbinomialff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Inflated Negative Binomial Distribution Family Function }
 \description{
@@ -9,8 +10,13 @@
 }
 \usage{
 zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
-              ipstr0 = NULL, isize = NULL, zero = c(-1, -3),
+              type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+              ipstr0 = NULL, isize = NULL, zero = -3,
               imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
+zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
+                type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+                isize = NULL, ionempstr0 = NULL, zero = c(-2, -3),
+                imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -29,12 +35,28 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
 % See \code{earg} in \code{\link{Links}} for general information.
 % }
 
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}} 
+  and \code{\link{fittedvlm}} for more information.
+
+
+  }
+
   \item{ipstr0, isize}{
   Optional initial values for \eqn{\phi}{pstr0} and \eqn{k}{k}.
   The default is to compute an initial value internally for both.
   If a vector then recycling is used.
 
   }
+
+  \item{lonempstr0, ionempstr0}{
+  Corresponding arguments for the other parameterization.
+  See details below.
+
+
+  }
+
+
   \item{imethod}{
   An integer with value \code{1} or \code{2} or \code{3} which
   specifies the initialization method for the mean parameter.
@@ -57,7 +79,7 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
   }
 }
 \details{
-  This function uses simulation and Fisher scoring, and is based on
+  These functions are based on
   \deqn{P(Y=0) =  \phi + (1-\phi) (k/(k+\mu))^k,}{%
         P(Y=0) =  \phi + (1-\phi) * (k/(k+\mu))^k,}
   and for \eqn{y=1,2,\ldots},
@@ -65,8 +87,10 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
         P(Y=y) =  (1-\phi) * dnbinom(y, \mu, k).}
   The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
   The mean of \eqn{Y} is \eqn{(1-\phi) \mu}{(1-phi)*munb}
-  (returned as the fitted values).  By default, the three linear/additive
-  predictors are \eqn{(logit(\phi), \log(\mu), \log(k))^T}{(logit(phi),
+  (returned as the fitted values).
+  By default, the three linear/additive predictors
+  for \code{zinegbinomial()}
+  are \eqn{(logit(\phi), \log(\mu), \log(k))^T}{(logit(phi),
   log(munb), log(k))^T}.
   See \code{\link{negbinomial}}, another \pkg{VGAM} family function,
   for the formula of the probability density function and other details
@@ -78,6 +102,23 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
   with length equal to the number of responses.
 
 
+  The \pkg{VGAM} family function \code{zinegbinomialff()} has a few
+  changes compared to \code{zinegbinomial()}.
+  These are:
+  (i)   the order of the linear/additive predictors is switched so the
+        NB mean comes first;
+  (ii)  \code{onempstr0} is now 1 minus the probability of a structural 0,
+        i.e., the probability of the parent (NB) component,
+        i.e., \code{onempstr0} is \code{1-pstr0};
+  (iii) argument \code{zero} has a new default so that the \code{onempstr0}
+        is intercept-only by default.
+  Now \code{zinegbinomialff()} is generally recommended over
+  \code{zinegbinomial()}.
+  Both functions implement Fisher scoring and can handle
+  multiple responses.
+
+
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -100,7 +141,8 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
 
 % 20130316: adding this:
   Estimated probabilities of a structural zero and an 
-  observed zero are returned, as in \code{\link{zipoisson}}.
+  observed zero can be returned, as in \code{\link{zipoisson}};
+  see \code{\link{fittedvlm}} for more information.
 
 
 
@@ -126,7 +168,8 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
   zero is actually less than, not more than, the nominal
   probability of zero.
   Half-stepping is not uncommon.
-  If failure to converge occurs, try using combinations of
+  If failure to converge occurs, try using combinations of arguments
+  \code{stepsize} (in \code{\link{vglm.control}}),
   \code{imethod},
   \code{shrinkage.init},
   \code{ipstr0},
@@ -134,6 +177,10 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
   \code{zero} if there are explanatory variables.
 
 
+  An infinite loop might occur if some of the fitted values
+  (the means) are too close to 0.
+
+
   This \pkg{VGAM} family function is computationally expensive
   and usually runs slowly;
   setting \code{trace = TRUE} is useful for monitoring convergence.
@@ -159,7 +206,7 @@ ndata <- transform(ndata,
                    y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0),
                    y2 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
 with(ndata, table(y1)["0"] / sum(table(y1)))
-fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), ndata)
+fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), data = ndata)
 coef(fit, matrix = TRUE)
 summary(fit)
 head(cbind(fitted(fit), with(ndata, (1 - pstr0) * munb)))
@@ -176,8 +223,8 @@ ndata <- transform(ndata, pstr0  = logit(-1.5 + 0.5 * eta1, inverse = TRUE),
 ndata <- transform(ndata,
                    y1 = rzinegbin(nn, pstr0 = pstr0, mu = munb, size = size))
 with(ndata, table(y1)["0"] / sum(table(y1)))
-rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), ndata,
-                 Index.corner = 2, szero = 3, trace = TRUE)
+rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), data = ndata,
+                 Index.corner = 2, str0 = 3, trace = TRUE)
 coef(rrzinb, matrix = TRUE)
 Coef(rrzinb)
 }
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index 0ab6ecc..ad9dc7b 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -183,7 +183,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
 
 
 }
-%\author{ Thomas W. Yee }
+%\author{ T. W. Yee }
 \note{
   The \code{"12"} in the argument names reinforce the user about the
   exchangeability assumption.
@@ -208,8 +208,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
   \code{\link{binom2.or}},
   \code{\link{zipoisson}},
   \code{\link{cloglog}},
-  \code{\link{CommonVGAMffArguments}},
-  \code{\link{posbernoulli.tb}}.
+  \code{\link{CommonVGAMffArguments}}.
 
 
 }
diff --git a/man/zipf.Rd b/man/zipf.Rd
index aa01ce5..35bb7f5 100644
--- a/man/zipf.Rd
+++ b/man/zipf.Rd
@@ -18,17 +18,20 @@ zipf(N = NULL, link = "loge", init.s = NULL)
   If \code{N = Inf} and \eqn{s>1} then this is the zeta
   distribution (use \code{\link{zetaff}} instead).
 
+
   }
   \item{link}{
   Parameter link function applied to the (positive) parameter \eqn{s}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{init.s}{
   Optional initial value for the parameter \eqn{s}.
   The default is to choose an initial value internally.
   If converge failure occurs use this argument to input a value.
 
+
   }
 }
 \details{
@@ -70,6 +73,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
   3rd edition,
   Hoboken, New Jersey, USA: Wiley.
 
+
 }
 \author{ T. W. Yee }
 \note{
@@ -82,6 +86,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
   \code{\link{dzipf}},
   \code{\link{zetaff}}.
 
+
 }
 \examples{
 zdata <- data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index ca311f3..a985feb 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -57,8 +57,8 @@ proby <- dzipf(y, N = N, s = s)
 \dontrun{ plot(proby ~ y, type = "h", col = "blue", ylab = "Probability",
      ylim = c(0, 0.2), main = paste("Zipf(N = ",N,", s = ",s,")", sep = ""),
      lwd = 2, las = 1) }
-sum(proby) # Should be 1
-max(abs(cumsum(proby) - pzipf(y, N = N, s = s))) # Should be 0
+sum(proby)  # Should be 1
+max(abs(cumsum(proby) - pzipf(y, N = N, s = s)))  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
index ee6b1c6..62275b8 100644
--- a/man/zipoisUC.Rd
+++ b/man/zipoisUC.Rd
@@ -52,7 +52,7 @@ rzipois(n, lambda, pstr0 = 0)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ T. W. Yee }
 \note{ 
   The argument \code{pstr0} is recycled to the required length, and
   must have values which lie in the interval \eqn{[0,1]}.
@@ -81,22 +81,22 @@ rzipois(n, lambda, pstr0 = 0)
 \examples{
 lambda <- 3; pstr0 <- 0.2; x <- (-1):7
 (ii <- dzipois(x, lambda, pstr0 = pstr0))
-max(abs(cumsum(ii) - pzipois(x, lambda, pstr0 = pstr0))) # Should be 0
+max(abs(cumsum(ii) - pzipois(x, lambda, pstr0 = pstr0)))  # Should be 0
 table(rzipois(100, lambda, pstr0 = pstr0))
 
 table(qzipois(runif(100), lambda, pstr0))
-round(dzipois(0:10, lambda, pstr0 = pstr0) * 100) # Should be similar
+round(dzipois(0:10, lambda, pstr0 = pstr0) * 100)  # Should be similar
 
 \dontrun{ x <- 0:10
-par(mfrow = c(2, 1)) # Zero-inflated Poisson
+par(mfrow = c(2, 1))  # Zero-inflated Poisson
 barplot(rbind(dzipois(x, lambda, pstr0 = pstr0), dpois(x, lambda)),
-        beside = TRUE, col = c("blue","orange"),
+        beside = TRUE, col = c("blue", "orange"),
         main = paste("ZIP(", lambda, ", pstr0 = ", pstr0, ") (blue) vs",
                      " Poisson(", lambda, ") (orange)", sep = ""),
         names.arg = as.character(x))
 
-deflat_limit <- -1 / expm1(lambda) # Zero-deflated Poisson
-newpstr0 <- round(deflat_limit / 1.5, 3)
+deflat.limit <- -1 / expm1(lambda)  # Zero-deflated Poisson
+newpstr0 <- round(deflat.limit / 1.5, 3)
 barplot(rbind(dzipois(x, lambda, pstr0 = newpstr0),
                 dpois(x, lambda)),
         beside = TRUE, col = c("blue","orange"),
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 516068e..8bf3df0 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -9,12 +9,14 @@
 
 }
 \usage{
-zipoissonff(llambda = "loge", lprobp = "logit",
-            ilambda = NULL,   iprobp = NULL,
-            imethod = 1, shrinkage.init = 0.8, zero = -2)
 zipoisson(lpstr0 = "logit", llambda = "loge",
-          ipstr0 = NULL,    ilambda = NULL,
+          type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+          ipstr0 = NULL, ilambda = NULL,
           imethod = 1, shrinkage.init = 0.8, zero = NULL)
+zipoissonff(llambda = "loge", lonempstr0 = "logit",
+            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+            ilambda = NULL,   ionempstr0 = NULL,
+            imethod = 1, shrinkage.init = 0.8, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -36,11 +38,25 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
   The defaults are to compute an initial value internally for each.
   If a vector then recycling is used.
 
+
   }
-  \item{lprobp, iprobp}{
+  \item{lonempstr0, ionempstr0}{
   Corresponding arguments for the other parameterization.
   See details below.
 
+
+  }
+
+  \item{type.fitted}{
+  Character. The type of fitted value to be returned.
+  The first choice (the expected value) is the default.
+  The estimated probability of an observed 0 is an alternative, else
+  the estimated probability of a  structural 0,
+  or one minus the estimated probability of a  structural 0.
+  See \code{\link{CommonVGAMffArguments}} 
+  and \code{\link{fittedvlm}} for more information.
+
+
   }
 
   \item{imethod}{
@@ -74,7 +90,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
   }
 }
 \details{
-  This model is a mixture of a Poisson distribution and the value 0;
+  These models are a mixture of a Poisson distribution and the value 0;
   it has value 0 with probability \eqn{\phi}{phi} else is
   Poisson(\eqn{\lambda}{lambda}) distributed.
   Thus there are two sources for zero values, and \eqn{\phi}{phi}
@@ -88,11 +104,13 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
   Here, the parameter \eqn{\phi}{phi} satisfies
   \eqn{0 < \phi < 1}{0 < phi < 1}.
   The mean of \eqn{Y} is \eqn{(1-\phi) \lambda}{(1-phi)*lambda} and these
-  are returned as the fitted values.
+  are returned as the fitted values,
+  by default.
   The variance of \eqn{Y} is \eqn{(1-\phi) \lambda (1 + \phi \lambda)}{
   (1-phi)*lambda*(1 + phi lambda)}.
-  By default, the two linear/additive predictors are
-  \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi), log(lambda))^T}.
+  By default, the two linear/additive predictors
+  of \code{zipoisson()}
+  are \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi), log(lambda))^T}.
 
 
   The \pkg{VGAM} family function \code{zipoissonff()} has a few
@@ -100,11 +118,11 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
   These are:
   (i)   the order of the linear/additive predictors is switched so the
         Poisson mean comes first;
-  (ii)  \code{probp} is now the probability of the Poisson component,
-        i.e., \code{probp} is \code{1-pstr0};
-  (iii) it can handle multiple responses;
-  (iv)  argument \code{zero} has a new default so that the \code{probp}
-        is an intercept-only  by default.
+  (ii)  \code{onempstr0} is now 1 minus the probability of a structural 0,
+        i.e., the probability of the parent (Poisson) component,
+        i.e., \code{onempstr0} is \code{1-pstr0};
+  (iii) argument \code{zero} has a new default so that the \code{onempstr0}
+        is intercept-only  by default.
   Now \code{zipoissonff()} is generally recommended over
   \code{zipoisson()} (and definitely recommended over \code{\link{yip88}}).
   Both functions implement Fisher scoring and can handle
@@ -140,7 +158,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
   Cambridge University Press: Cambridge.
 
 
-  Yee, T. W. (2013)
+  Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
   \emph{Computational Statistics and Data Analysis}.
 
@@ -148,11 +166,11 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
 }
 \author{ T. W. Yee }
 \note{
-  The \code{misc} slot has a component called
-  \code{pobs0} which is the estimate of \eqn{P(Y = 0)}.
-  Note that \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}.
-  The estimated probability of a structural 0 is returned in
-  the \code{misc} slot with component name \code{pstr0}.
+% The \code{misc} slot has a component called
+% \code{pobs0} which is the estimate of \eqn{P(Y = 0)}.
+% Note that \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}.
+% The estimated probability of a structural 0 is returned in
+% the \code{misc} slot with component name \code{pstr0}.
 
 
   Although the functions in \code{\link{Zipois}}
@@ -214,8 +232,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
 }
 \examples{
 # Example 1: simulated ZIP data
-set.seed(123)
-zdata <- data.frame(x2 = runif(nn <- 2000))
+zdata <- data.frame(x2 = runif(nn <- 1000))
 zdata <- transform(zdata, pstr01  = logit(-0.5 + 1*x2, inverse = TRUE),
                           pstr02  = logit( 0.5 - 1*x2, inverse = TRUE),
                           Ps01    = logit(-0.5       , inverse = TRUE),
@@ -225,62 +242,58 @@ zdata <- transform(zdata, pstr01  = logit(-0.5 + 1*x2, inverse = TRUE),
 zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01),
                           y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02))
 
-with(zdata, table(y1)) # Eyeball the data
+with(zdata, table(y1))  # Eyeball the data
 with(zdata, table(y2))
-fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), zdata, crit = "coef")
-fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), zdata, crit = "coef")
-coef(fit1, matrix = TRUE) # These should agree with the above values
-coef(fit2, matrix = TRUE) # These should agree with the above values
+fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef")
+fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef")
+coef(fit1, matrix = TRUE)  # These should agree with the above values
+coef(fit2, matrix = TRUE)  # These should agree with the above values
 
 # Fit all two simultaneously, using a different parameterization:
-fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, zdata, crit = "coef")
-coef(fit12, matrix = TRUE) # These should agree with the above values
+fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, data = zdata, crit = "coef")
+coef(fit12, matrix = TRUE)  # These should agree with the above values
 
 # For the first observation compute the probability that y1 is
 # due to a structural zero.
-head(zdata, 1)
-pfit1 <- predict(fit1, zdata[1, ])
-pstr0 <- logit(pfit1[1], inverse = TRUE)
-lambda <- loge(pfit1[2], inverse = TRUE)
-(prob.struc.0 <- pstr0 / dzipois(x = 0, lambda = lambda, pstr0 = pstr0))
+(fitted(fit1, type = "pstr0") / fitted(fit1, type = "pobs0"))[1]
 
 
 # Example 2: McKendrick (1926). Data from 223 Indian village households
-cholera <- data.frame(ncases = 0:4, # Number of cholera cases,
-                      wfreq  = c(168, 32, 16, 6, 1)) # Frequencies
+cholera <- data.frame(ncases = 0:4,  # Number of cholera cases,
+                      wfreq  = c(168, 32, 16, 6, 1))  # Frequencies
 fit <- vglm(ncases ~ 1, zipoisson, wei = wfreq, cholera, trace = TRUE)
 coef(fit, matrix = TRUE)
 with(cholera, cbind(actual = wfreq,
                     fitted = round(dzipois(ncases, lambda = Coef(fit)[2],
                                            pstr0 = Coef(fit)[1]) *
-                                   sum(wfreq), dig = 2)))
+                                   sum(wfreq), digits = 2)))
 
 # Example 3: data from Angers and Biswas (2003)
 abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
 abdata <- subset(abdata, w > 0)
 fit <- vglm(y ~ 1, zipoisson(lpstr0 = probit, ipstr0 = 0.8),
             abdata, weight = w, trace = TRUE)
-fit at misc$pobs0  # Estimate of P(Y = 0)
+fitted(fit, type = "pobs0")  # Estimate of P(Y = 0)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Estimate of pstr0 and lambda
 fitted(fit)
-with(abdata, weighted.mean(y, w)) # Compare this with fitted(fit)
+with(abdata, weighted.mean(y, w))  # Compare this with fitted(fit)
 summary(fit)
 
 # Example 4: zero-deflated model for an intercept-only data
-zdata <- transform(zdata, lambda3 = loge( 0.0       , inverse = TRUE))
-zdata <- transform(zdata, deflat_limit = -1 / expm1(lambda3)) # Boundary
+zdata <- transform(zdata, lambda3 = loge(0.0, inverse = TRUE))
+zdata <- transform(zdata, deflat.limit = -1 / expm1(lambda3))  # Boundary
 # The 'pstr0' parameter is negative and in parameter space:
-zdata <- transform(zdata, usepstr0 = deflat_limit / 1.5)
+zdata <- transform(zdata, usepstr0 = deflat.limit / 1.5)
 zdata <- transform(zdata, y3 = rzipois(nn, lambda3, pstr0 = usepstr0))
 head(zdata)
-with(zdata, table(y3)) # A lot of deflation
+with(zdata, table(y3))  # A lot of deflation
 fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identity),
-             zdata, trace = TRUE, crit = "coef")
+             data = zdata, trace = TRUE, crit = "coef")
 coef(fit3, matrix = TRUE)
 # Check how accurate it was:
-zdata[1, 'usepstr0'] # Answer
-coef(fit3)[1]        # Estimate
+zdata[1, "usepstr0"]  # Answer
+coef(fit3)[1]         # Estimate
 Coef(fit3)
 
 # Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
@@ -295,8 +308,8 @@ summary(rrzip)
 \keyword{models}
 \keyword{regression}
 
-% Yee, T. W. (2012)
-% An alternative to quasi-Poisson vs. negative binomial
-% regression: the reduced-rank negative binomial model.
-% \emph{In preparation}.
-
+%# head(zdata, 1); pfit1 <- predict(fit1, zdata[1, ]);
+%# lambda <- loge(pfit1[2], inverse = TRUE)
+%# lambda <- (fitted(fit1, type = "mean") / fitted(fit1, type = "onempstr0"))[1]
+%# (prob.struc.0 <- pstr0 / dzipois(x = 0, lambda = lambda, pstr0 = pstr0))
+% fit at misc$pobs0  # Estimate of P(Y = 0)
diff --git a/src/caqo3.c b/src/caqo3.c
index 1f5ccec..a41c98a 100644
--- a/src/caqo3.c
+++ b/src/caqo3.c
@@ -18,6 +18,11 @@
 
 
 
+
+
+
+
+
 #include<math.h>
 #include<stdio.h>
 #include<stdlib.h>
@@ -91,7 +96,8 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
                  int jnxpuym2[], int hnpt1zym[],
                  int iz2nbfjc[],
                  double ifys6woa[], double rpyis2kc[], double gkdx5jals[],
-                 int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]);
+                 int nbzjkpi3[], int lindex[],
+                 int acpios9q[], int jwbkl9fp[]);
 void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
                 double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[],
                 double t8hwvalr[], double ghz9vuba[], double rbne6ouj[],
@@ -125,7 +131,8 @@ void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
                   int iz2nbfjc[],
                   double ifys6woa[],
                   double rpyis2kc[], double gkdx5jals[],
-                  int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]);
+                  int nbzjkpi3[], int lindex[],
+                  int acpios9q[], int jwbkl9fp[]);
 
 double fvlmz9iyC_tldz5ion(double xx);
 void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu);
@@ -143,7 +150,8 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
        int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[],
        double ifys6woa[],
        double rpyis2kc[], double gkdx5jals[],
-       int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]);
+       int nbzjkpi3[], int lindex[],  // 20130525; lindex added
+       int acpios9q[], int jwbkl9fp[]);
 
 
 
@@ -163,15 +171,15 @@ void yiumjq3nn2howibc2a(double *objzgdk0, double *i9mwnvqt, double *lfu2qhid) {
 
 
   if (1.0e0 - *objzgdk0 >= 1.0e0) {
-      *lfu2qhid = -8.12589e0 / (3.0 * sqrt(*i9mwnvqt));
+    *lfu2qhid = -8.12589e0 / (3.0 * sqrt(*i9mwnvqt));
   } else
   if (1.0e0 - *objzgdk0 <= 0.0e0) {
-      *lfu2qhid =  8.12589e0 / (3.0 * sqrt(*i9mwnvqt));
+    *lfu2qhid =  8.12589e0 / (3.0 * sqrt(*i9mwnvqt));
   } else {
-      pq0hfucn = 1.0e0 - *objzgdk0;
-      yiumjq3npnm1or(&pq0hfucn, &xd4mybgj);
-      xd4mybgj  /=  3.0e0 * sqrt(*i9mwnvqt);
-      *lfu2qhid = -3.0e0 * log(1.0e0 + xd4mybgj);
+    pq0hfucn = 1.0e0 - *objzgdk0;
+    yiumjq3npnm1or(&pq0hfucn, &xd4mybgj);
+    xd4mybgj  /=  3.0e0 * sqrt(*i9mwnvqt);
+    *lfu2qhid = -3.0e0 * log(1.0e0 + xd4mybgj);
   }
 }
 
@@ -180,15 +188,15 @@ void yiumjq3nbewf1pzv9(double *objzgdk0, double *lfu2qhid) {
 
 
   if (*objzgdk0 <= 2.0e-200) {
-      *lfu2qhid = -460.0e0;
+    *lfu2qhid = -460.0e0;
   } else
   if (*objzgdk0 <= 1.0e-14) {
-      *lfu2qhid = log( *objzgdk0 );
+    *lfu2qhid = log( *objzgdk0 );
   } else
   if (1.0e0 - *objzgdk0 <= 0.0e0) {
-      *lfu2qhid = 3.542106e0;
+    *lfu2qhid = 3.542106e0;
   } else {
-      *lfu2qhid = log(-log(1.0e0 - *objzgdk0));
+    *lfu2qhid = log(-log(1.0e0 - *objzgdk0));
   }
 }
 
@@ -196,7 +204,7 @@ void yiumjq3nbewf1pzv9(double *objzgdk0, double *lfu2qhid) {
 void yiumjq3ng2vwexyk9(double *objzgdk0, double *lfu2qhid) {
 
   if (*objzgdk0 <= 2.0e-200) {
-      *lfu2qhid = -460.0e0;
+    *lfu2qhid = -460.0e0;
   } else
   if (*objzgdk0 <= 1.0e-14) {
     *lfu2qhid = log( *objzgdk0 );
@@ -222,103 +230,103 @@ void yiumjq3npkc4ejib(double w8znmyce[], double zshtfg8c[], double m0ibglfx[],
          *fpdlcqk9m0ibglfx, *fpdlcqk9vm4xjosb;
 
   if (*vtsou9pz == 1) {
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-          sedf7mxb = 2 * *hj3ftvzu - 1;
-
-          if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
-              Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_pkc4ejib\n");
-          fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx  = 0.0;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+      sedf7mxb = 2 * *hj3ftvzu - 1;
 
-          fpdlcqk9zshtfg8c = zshtfg8c;
-          for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
-              fpdlcqk9w8znmyce = w8znmyce + 0 + (gp1jxzuh-1) * *br5ovgcj;
-              fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
-                   fpdlcqk9w8znmyce++;
-                   fpdlcqk9m0ibglfx += *wy1vqfzu;
-              }
-              fpdlcqk9zshtfg8c++;
-          }
+      if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
+        Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_pkc4ejib\n");
+      fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx  = 0.0;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
+      }
+
+      fpdlcqk9zshtfg8c = zshtfg8c;
+      for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
+        fpdlcqk9w8znmyce = w8znmyce + 0 + (gp1jxzuh-1) * *br5ovgcj;
+        fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
+           fpdlcqk9w8znmyce++;
+           fpdlcqk9m0ibglfx += *wy1vqfzu;
+        }
+        fpdlcqk9zshtfg8c++;
+      }
 
-          sedf7mxb = 2 * *hj3ftvzu;
+      sedf7mxb = 2 * *hj3ftvzu;
 
-          fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx  = 0.0;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+      fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx  = 0.0;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
+      }
 
-          fpdlcqk9zshtfg8c = zshtfg8c;
-          for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
-              fpdlcqk9w8znmyce = w8znmyce + 1 + (gp1jxzuh-1) * *br5ovgcj;
-              fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
-                   fpdlcqk9w8znmyce++;
-                   fpdlcqk9m0ibglfx += *wy1vqfzu;
-              }
-              fpdlcqk9zshtfg8c++;
-          }
+      fpdlcqk9zshtfg8c = zshtfg8c;
+      for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
+        fpdlcqk9w8znmyce = w8znmyce + 1 + (gp1jxzuh-1) * *br5ovgcj;
+        fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
+           fpdlcqk9w8znmyce++;
+           fpdlcqk9m0ibglfx += *wy1vqfzu;
+        }
+        fpdlcqk9zshtfg8c++;
+      }
 
 
-      } else {
+    } else {
 
-          fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
-          for (ayfnwr1v = 0; ayfnwr1v < *br5ovgcj; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx  = 0.0;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+      fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
+      for (ayfnwr1v = 0; ayfnwr1v < *br5ovgcj; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx  = 0.0;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
+      }
 
-          fpdlcqk9zshtfg8c = zshtfg8c;
-          fpdlcqk9w8znmyce  = w8znmyce; // +     (gp1jxzuh-1) * *br5ovgcj;
-          for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
-              fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) {
-                  *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
-                   fpdlcqk9m0ibglfx += *wy1vqfzu;
-              }
-              fpdlcqk9zshtfg8c++;
-          }
+      fpdlcqk9zshtfg8c = zshtfg8c;
+      fpdlcqk9w8znmyce  = w8znmyce; // +     (gp1jxzuh-1) * *br5ovgcj;
+      for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
+        fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
+        for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) {
+          *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c;
+           fpdlcqk9m0ibglfx += *wy1vqfzu;
+        }
+        fpdlcqk9zshtfg8c++;
       }
+    }
   } else {
-      if (*br5ovgcj != *wy1vqfzu * *ftnjamu2)  //Rprinf
-          Rprintf("Error: *br5ovgcj != *wy1vqfzu * *ftnjamu2 in C_pkc4ejib\n");
-      fpdlcqk9m0ibglfx  = m0ibglfx;
-      fpdlcqk9f9piukdx = w8znmyce;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              *fpdlcqk9m0ibglfx = 0.0e0;
-              fpdlcqk9zshtfg8c = zshtfg8c;
-              fpdlcqk9w8znmyce  = fpdlcqk9f9piukdx++;
-              for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
-                  *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce * *fpdlcqk9zshtfg8c++;
-                  fpdlcqk9w8znmyce  += *br5ovgcj;
-              }
-              fpdlcqk9m0ibglfx++;
-          }
-      }
+    if (*br5ovgcj != *wy1vqfzu * *ftnjamu2)  //Rprinf
+      Rprintf("Error: *br5ovgcj != *wy1vqfzu * *ftnjamu2 in C_pkc4ejib\n");
+    fpdlcqk9m0ibglfx  = m0ibglfx;
+    fpdlcqk9f9piukdx = w8znmyce;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        *fpdlcqk9m0ibglfx = 0.0e0;
+        fpdlcqk9zshtfg8c = zshtfg8c;
+        fpdlcqk9w8znmyce  = fpdlcqk9f9piukdx++;
+        for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) {
+          *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce * *fpdlcqk9zshtfg8c++;
+          fpdlcqk9w8znmyce  += *br5ovgcj;
+        }
+        fpdlcqk9m0ibglfx++;
+      }
+    }
   }
 
   fpdlcqk9vm4xjosb = vm4xjosb;
   if (*unhycz0e == 1) {
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-          fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu - 2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
-      } else {
-          fpdlcqk9m0ibglfx = m0ibglfx +     *hj3ftvzu - 1;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+      fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu - 2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
       }
+    } else {
+      fpdlcqk9m0ibglfx = m0ibglfx +     *hj3ftvzu - 1;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
+      }
+    }
   }
 }
 
@@ -332,86 +340,91 @@ void yiumjq3nnipyajc1(double m0ibglfx[], double t8hwvalr[], int *ftnjamu2, int *
 
 
   if (*hj3ftvzu == 0) {
-      fpdlcqk9t8hwvalr  = t8hwvalr;
-      fpdlcqk9m0ibglfx = m0ibglfx;
-      if (*qfx3vhct == 1) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-                  tmpwk = exp(*fpdlcqk9m0ibglfx++);
-                  *fpdlcqk9t8hwvalr++ = tmpwk / (1.0 + tmpwk);
-              }
-      }
-      if (*qfx3vhct == 2) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
-                  *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++);
-      }
-      if (*qfx3vhct == 4) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
-                  *fpdlcqk9t8hwvalr++ = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx++));
-      }
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-          if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf
-              Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          } //Rprintf
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                  *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++);
-                   fpdlcqk9m0ibglfx++;
-              }
-      }
-      if (*qfx3vhct == 8) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
-                  *fpdlcqk9t8hwvalr++ = *fpdlcqk9m0ibglfx++;
-      }
+    fpdlcqk9t8hwvalr  = t8hwvalr;
+    fpdlcqk9m0ibglfx = m0ibglfx;
+    if (*qfx3vhct == 1) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+          tmpwk = exp(*fpdlcqk9m0ibglfx++);
+          *fpdlcqk9t8hwvalr++ = tmpwk / (1.0 + tmpwk);
+        }
+    }
+    if (*qfx3vhct == 2) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
+          *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++);
+    }
+    if (*qfx3vhct == 4) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
+          *fpdlcqk9t8hwvalr++ = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx++));
+    }
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+      if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf
+        Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      } //Rprintf
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+            *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++);
+             fpdlcqk9m0ibglfx++;
+        }
+    }
+    if (*qfx3vhct == 8) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
+          *fpdlcqk9t8hwvalr++ = *fpdlcqk9m0ibglfx++;
+    }
   } else {
-      fpdlcqk9t8hwvalr  =  t8hwvalr + *hj3ftvzu-1;
-      fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
-      if (*qfx3vhct == 1) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              tmpwk = exp(*fpdlcqk9m0ibglfx);
-              *fpdlcqk9t8hwvalr   = tmpwk / (1.0 + tmpwk);
-               fpdlcqk9t8hwvalr  += *afpc0kns;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
-      }
-      if (*qfx3vhct == 2) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9t8hwvalr   = exp(*fpdlcqk9m0ibglfx);
-               fpdlcqk9t8hwvalr  += *afpc0kns;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    fpdlcqk9t8hwvalr  =  t8hwvalr + *hj3ftvzu-1;
+    fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1;
+    if (*qfx3vhct == 1) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        tmpwk = exp(*fpdlcqk9m0ibglfx);
+        *fpdlcqk9t8hwvalr   = tmpwk / (1.0 + tmpwk);
+         fpdlcqk9t8hwvalr  += *afpc0kns;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
       }
-      if (*qfx3vhct == 4) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9t8hwvalr   = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx));
-               fpdlcqk9t8hwvalr  += *afpc0kns;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    }
+    if (*qfx3vhct == 2) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9t8hwvalr   = exp(*fpdlcqk9m0ibglfx);
+         fpdlcqk9t8hwvalr  += *afpc0kns;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
       }
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-          fpdlcqk9t8hwvalr  =  t8hwvalr +     *hj3ftvzu-1;
-          fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9t8hwvalr   = exp(*fpdlcqk9m0ibglfx);
-               fpdlcqk9t8hwvalr  += *afpc0kns;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    }
+    if (*qfx3vhct == 4) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9t8hwvalr   = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx));
+         fpdlcqk9t8hwvalr  += *afpc0kns;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
+      }
+    }
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+      fpdlcqk9t8hwvalr  =  t8hwvalr +     *hj3ftvzu-1;
+      fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9t8hwvalr   = exp(*fpdlcqk9m0ibglfx);
+         fpdlcqk9t8hwvalr  += *afpc0kns;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
       }
-      if (*qfx3vhct == 8) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9t8hwvalr   = *fpdlcqk9m0ibglfx;
-               fpdlcqk9t8hwvalr  += *afpc0kns;
-               fpdlcqk9m0ibglfx += *wy1vqfzu;
-          }
+    }
+    if (*qfx3vhct == 8) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9t8hwvalr   = *fpdlcqk9m0ibglfx;
+         fpdlcqk9t8hwvalr  += *afpc0kns;
+         fpdlcqk9m0ibglfx += *wy1vqfzu;
       }
+    }
   }
 }
 
@@ -433,168 +446,169 @@ void yiumjq3nshjlwft5(int *qfx3vhct, double tlgduey8[], double ufgqj9ck[],
 
 
   if (*hj3ftvzu == 0) {
-      fpdlcqk9tlgduey8 = tlgduey8;
+    fpdlcqk9tlgduey8 = tlgduey8;
 
-      if (*qfx3vhct == 1 || *qfx3vhct == 4) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
-          fpdlcqk9tlgduey8 = tlgduey8;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { // yyy
-              fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
-              fpdlcqk9ufgqj9ck = ufgqj9ck;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { // bbb
-                  ivqk2ywz = *fpdlcqk9tlgduey8 > 0.0 ? *fpdlcqk9tlgduey8*log(*fpdlcqk9tlgduey8) :0.0;
-                  if (*fpdlcqk9tlgduey8 < 1.0e0)
-                    ivqk2ywz += (1.0e0 - *fpdlcqk9tlgduey8) * log(1.0e0 - *fpdlcqk9tlgduey8);
-                  xd4mybgj = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr);
-                  if (xd4mybgj < *dn3iasxug) {
-                    smmu  = *fpdlcqk9t8hwvalr;
-                    qvd7yktm = *fpdlcqk9tlgduey8 *
-                            ((smmu < *dn3iasxug) ? *vsoihn1r : log(smmu));
-                    afwp5imx = 1.0e0 - smmu;
-                    qvd7yktm += (afwp5imx < *dn3iasxug ? *vsoihn1r : log(afwp5imx))*
-                             (1.0 - *fpdlcqk9tlgduey8);
-                  } else {
-                      qvd7yktm =     *fpdlcqk9tlgduey8  * log(      *fpdlcqk9t8hwvalr) +
-                           (1.0 - *fpdlcqk9tlgduey8) * log(1.0 - *fpdlcqk9t8hwvalr);
-                  }
-                  lfu2qhid += *fpdlcqk9ufgqj9ck++ * (ivqk2ywz - qvd7yktm);
-                  fpdlcqk9t8hwvalr += *afpc0kns;
-                  fpdlcqk9tlgduey8++;
-              } // bbb
-              jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
-              prev_lfu2qhid = lfu2qhid;
-          } // yyy
-      }
-      if (*qfx3vhct == 2) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
-          fpdlcqk9tlgduey8 = tlgduey8;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
-              fpdlcqk9ufgqj9ck = ufgqj9ck;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ?  *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 +
-                          *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) :
-                          *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8;
-                  lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
-                  fpdlcqk9t8hwvalr += *afpc0kns;
-                  fpdlcqk9tlgduey8++;
-              }
-              jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
-              prev_lfu2qhid = lfu2qhid;
+    if (*qfx3vhct == 1 || *qfx3vhct == 4) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
+      fpdlcqk9tlgduey8 = tlgduey8;
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { // yyy
+        fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
+        fpdlcqk9ufgqj9ck = ufgqj9ck;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { // bbb
+          ivqk2ywz = *fpdlcqk9tlgduey8 > 0.0 ? *fpdlcqk9tlgduey8*log(*fpdlcqk9tlgduey8) :0.0;
+          if (*fpdlcqk9tlgduey8 < 1.0e0)
+            ivqk2ywz += (1.0e0 - *fpdlcqk9tlgduey8) * log(1.0e0 - *fpdlcqk9tlgduey8);
+          xd4mybgj = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr);
+          if (xd4mybgj < *dn3iasxug) {
+            smmu  = *fpdlcqk9t8hwvalr;
+            qvd7yktm = *fpdlcqk9tlgduey8 *
+                    ((smmu < *dn3iasxug) ? *vsoihn1r : log(smmu));
+            afwp5imx = 1.0e0 - smmu;
+            qvd7yktm += (afwp5imx < *dn3iasxug ? *vsoihn1r : log(afwp5imx))*
+                     (1.0 - *fpdlcqk9tlgduey8);
+          } else {
+            qvd7yktm =     *fpdlcqk9tlgduey8  * log(      *fpdlcqk9t8hwvalr) +
+                 (1.0 - *fpdlcqk9tlgduey8) * log(1.0 - *fpdlcqk9t8hwvalr);
           }
+          lfu2qhid += *fpdlcqk9ufgqj9ck++ * (ivqk2ywz - qvd7yktm);
+          fpdlcqk9t8hwvalr += *afpc0kns;
+          fpdlcqk9tlgduey8++;
+        } // bbb
+        jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
+        prev_lfu2qhid = lfu2qhid;
+      } // yyy
+    }
+    if (*qfx3vhct == 2) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
+      fpdlcqk9tlgduey8 = tlgduey8;
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
+        fpdlcqk9ufgqj9ck = ufgqj9ck;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ?  *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 +
+                  *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) :
+                  *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8;
+          lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
+          fpdlcqk9t8hwvalr += *afpc0kns;
+          fpdlcqk9tlgduey8++;
+        }
+        jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
+        prev_lfu2qhid = lfu2qhid;
       }
-      if (*qfx3vhct == 5) {
-          fpdlcqk9tlgduey8 = tlgduey8;
-          if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf
-              Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
-          } //Rprintf
-          for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-              fpdlcqk9m0ibglfx = m0ibglfx + 2*yq6lorbx-1;
-              fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
-              fpdlcqk9ufgqj9ck = ufgqj9ck;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  jtnbu2hz = exp(*fpdlcqk9m0ibglfx);
-                  uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz);
-                  xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ?  (jtnbu2hz - 1.0e0) *
-                          log(*fpdlcqk9tlgduey8) + (log(jtnbu2hz) -
-                              *fpdlcqk9tlgduey8  / *fpdlcqk9t8hwvalr -
-                          log(*fpdlcqk9t8hwvalr)) * jtnbu2hz - uqnkc6zg :
-                         -1000.0e0;
-                  xd4mybgj   = -xd4mybgj;
-                  lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
-                  fpdlcqk9m0ibglfx += *wy1vqfzu;
-                  fpdlcqk9t8hwvalr  += *afpc0kns;
-                  fpdlcqk9tlgduey8++;
-              }
-              jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
-              prev_lfu2qhid = lfu2qhid;
+    }
+    if (*qfx3vhct == 5) {
+      fpdlcqk9tlgduey8 = tlgduey8;
+      if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf
+        Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n");
+      } //Rprintf
+      for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+        fpdlcqk9m0ibglfx = m0ibglfx + 2*yq6lorbx-1;
+        fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
+        fpdlcqk9ufgqj9ck = ufgqj9ck;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            jtnbu2hz = exp(*fpdlcqk9m0ibglfx);
+            uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz);
+              xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ?  (jtnbu2hz - 1.0e0) *
+                      log(*fpdlcqk9tlgduey8) + (log(jtnbu2hz) -
+                          *fpdlcqk9tlgduey8  / *fpdlcqk9t8hwvalr -
+                      log(*fpdlcqk9t8hwvalr)) * jtnbu2hz - uqnkc6zg :
+                     -1000.0e0;
+              xd4mybgj   = -xd4mybgj;
+              lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
+              fpdlcqk9m0ibglfx += *wy1vqfzu;
+              fpdlcqk9t8hwvalr  += *afpc0kns;
+              fpdlcqk9tlgduey8++;
           }
-      }
-      if (*qfx3vhct == 3) {
-          if (*dqk5muto == 0) {
-              anopu9vi = 34.0e0;
-              for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] >  anopu9vi) {
-                          hdqsx7bk = exp(anopu9vi);
-                          lbgwvp3q = 1;
-                      } else
-                      if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) {
-                          hdqsx7bk = exp(-anopu9vi);
-                          lbgwvp3q = 1;
-                      } else {
-                          hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1) *
-                                       *wy1vqfzu]);
-                          lbgwvp3q = 0;
-                      }
-                      xd4mybgj = (tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2] < 1.0e0) ?
-                       1.0e0 : tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2];
-                       lfu2qhid += ufgqj9ck[ayfnwr1v-1] *
-                                 (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
-                       log(xd4mybgj/t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]) +
-                                 (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] +
-                                     hdqsx7bk) *
-                            log((t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns ] +
-                                     hdqsx7bk) / (hdqsx7bk +
-                                  tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2])));
-                  }
-                  jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
-                  prev_lfu2qhid = lfu2qhid;
-              }
-          } else {
-              anopu9vi = 34.0e0;
-              for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] >  anopu9vi) {
-                          hdqsx7bk = exp(anopu9vi);
-                          lbgwvp3q = 1;
-                      } else
-                      if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) {
-                          hdqsx7bk = exp(-anopu9vi);
-                          lbgwvp3q = 1;
-                      } else {
-                        hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu]);
-                        lbgwvp3q = 0;
-                      }
-                      if (lbgwvp3q) {
-                          uqnkc6zg = hofjnx2e = 0.0e0;
-                      } else {
-                          uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk +
-                                  tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]);
-                          hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk);
-                      }
-                      txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 +
-                              tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]);
-                      xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk +
-                              t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns])) +
-                              uqnkc6zg - hofjnx2e - txlvcey5;
-                      if (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] > 0.0e0) {
-                 xd4mybgj += tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
-                     log(t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]
-             / (hdqsx7bk + t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]));
-                      }
-                      lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj;
-                  }
-                  jxacz5qu[yq6lorbx] = 2.0 * (-0.5 * lfu2qhid + 0.5 * prev_lfu2qhid);
-                  prev_lfu2qhid = lfu2qhid;
-              }
-              lfu2qhid *= (-0.5);
+          jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
+          prev_lfu2qhid = lfu2qhid;
+        }
+    }
+    if (*qfx3vhct == 3) {
+      if (*dqk5muto == 0) {
+        anopu9vi = 34.0e0;
+        for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] >  anopu9vi) {
+              hdqsx7bk = exp(anopu9vi);
+              lbgwvp3q = 1;
+            } else
+            if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) {
+              hdqsx7bk = exp(-anopu9vi);
+              lbgwvp3q = 1;
+            } else {
+              hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu]);
+              lbgwvp3q = 0;
+            }
+            xd4mybgj = (tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2] < 1.0e0) ?
+             1.0e0 : tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2];
+            lfu2qhid += ufgqj9ck[ayfnwr1v-1] *
+                      (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
+            log(xd4mybgj/t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]) +
+                      (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] +
+                          hdqsx7bk) *
+                 log((t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns ] +
+                          hdqsx7bk) / (hdqsx7bk +
+                       tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2])));
           }
-      }
-      if (*qfx3vhct == 8) {
-          fpdlcqk9tlgduey8 = tlgduey8;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
-              fpdlcqk9ufgqj9ck = ufgqj9ck;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  xd4mybgj       = *fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr;
-                  lfu2qhid     += *fpdlcqk9ufgqj9ck++ * pow(xd4mybgj, (double) 2.0);
-                  fpdlcqk9t8hwvalr += *afpc0kns;
-              }
               jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
               prev_lfu2qhid = lfu2qhid;
+            }
+      } else {
+        anopu9vi = 34.0e0;
+        for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] >  anopu9vi) {
+              hdqsx7bk = exp(anopu9vi);
+              lbgwvp3q = 1;
+            } else
+            if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) {
+              hdqsx7bk = exp(-anopu9vi);
+              lbgwvp3q = 1;
+            } else {
+              hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu]);
+              lbgwvp3q = 0;
+            }
+            if (lbgwvp3q) {
+                  uqnkc6zg = hofjnx2e = 0.0e0;
+              } else {
+                  uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk +
+                          tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]);
+                  hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk);
+              }
+              txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 +
+                      tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]);
+              xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk +
+                      t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns])) +
+                      uqnkc6zg - hofjnx2e - txlvcey5;
+              if (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] > 0.0e0) {
+                xd4mybgj += tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
+                         log(t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]
+                 / (hdqsx7bk + t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]));
+              }
+              lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj;
           }
+          jxacz5qu[yq6lorbx] = 2.0 * (-0.5 * lfu2qhid + 0.5 * prev_lfu2qhid);
+          prev_lfu2qhid = lfu2qhid;
+        }
+        lfu2qhid *= (-0.5);
       }
+    }
+    if (*qfx3vhct == 8) {
+      fpdlcqk9tlgduey8 = tlgduey8;
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1;
+        fpdlcqk9ufgqj9ck = ufgqj9ck;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          xd4mybgj       = *fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr;
+          lfu2qhid     += *fpdlcqk9ufgqj9ck++ * pow(xd4mybgj, (double) 2.0);
+          fpdlcqk9t8hwvalr += *afpc0kns;
+        }
+        jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid);
+        prev_lfu2qhid = lfu2qhid;
+      }
+    }
   } else {
      fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
      fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1;
@@ -622,88 +636,89 @@ void yiumjq3nshjlwft5(int *qfx3vhct, double tlgduey8[], double ufgqj9ck[],
               fpdlcqk9tlgduey8++;
           }
       }
-      if (*qfx3vhct == 2) {
-          if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              xd4mybgj = *fpdlcqk9tlgduey8 > 0.0e0 ?  *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 +
-                      *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) :
-                      *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8;
-              lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
-              fpdlcqk9t8hwvalr += *afpc0kns;
-              fpdlcqk9tlgduey8++;
-          }
-      }
-      if (*qfx3vhct == 5) {
-              fpdlcqk9tlgduey8   =   tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
-              fpdlcqk9t8hwvalr  =  t8hwvalr +  *hj3ftvzu-1;
-              fpdlcqk9ufgqj9ck  =  ufgqj9ck;
-              fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  jtnbu2hz = exp(*fpdlcqk9m0ibglfx);
-                  uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz);
-                  xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ? (jtnbu2hz - 1.0e0) *
-                           log(*fpdlcqk9tlgduey8) + jtnbu2hz * (log(jtnbu2hz) -
-                           *fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr - log(*fpdlcqk9t8hwvalr)) -
-                           uqnkc6zg : -1000.0e0;
-                  xd4mybgj   = -xd4mybgj;
-                  lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
-                  fpdlcqk9t8hwvalr  += *afpc0kns;
-                  fpdlcqk9m0ibglfx += *wy1vqfzu;
-                  fpdlcqk9tlgduey8++;
-              }
-      }
-      if (*qfx3vhct == 3) {
-          if (*dqk5muto == 0) {
-              anopu9vi = 34.0e0;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
-                      hdqsx7bk = exp(anopu9vi);
-                      lbgwvp3q = 1;
-                  } else
-                  if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
-                      hdqsx7bk = exp(-anopu9vi);
-                      lbgwvp3q = 1;
-                  } else {
-                      hdqsx7bk = exp(m0ibglfx[2* *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
-                      lbgwvp3q = 0;
-                  }
-                  xd4mybgj =  (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] < 1.0e0) ?
-                    1.0e0 : tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2];
-                  lfu2qhid +=        ufgqj9ck[ayfnwr1v-1] *
-                                   (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] *
-                         log(xd4mybgj/t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]) +
-                         (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk) *
-                    log((t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns] + hdqsx7bk)
-                / (hdqsx7bk+tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2])));
-              }
-          } else {
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
-                  uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk + tlgduey8[ayfnwr1v-1 +
-                                        (*hj3ftvzu-1) * *ftnjamu2]);
-                  hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk);
-                  txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 + tlgduey8[ayfnwr1v-1 +
-                                        (*hj3ftvzu-1) * *ftnjamu2]);
-                  xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 +
-                          (ayfnwr1v-1) * *afpc0kns])) + uqnkc6zg - hofjnx2e - txlvcey5;
-
-                  if (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] > 0.0e0) {
-                      xd4mybgj += tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] *
-                          log(t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]
-                  / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]));
-                  }
-                  lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj;
-              }
-              lfu2qhid *= (-0.5e0);
-          }
-      }
-      if (*qfx3vhct == 8) {
+    if (*qfx3vhct == 2) {
+      if (*afpc0kns != *wy1vqfzu)
+        Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n");
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        xd4mybgj = *fpdlcqk9tlgduey8 > 0.0e0 ?  *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 +
+                *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) :
+                *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8;
+        lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
+        fpdlcqk9t8hwvalr += *afpc0kns;
+        fpdlcqk9tlgduey8++;
+      }
+    }
+    if (*qfx3vhct == 5) {
+      fpdlcqk9tlgduey8   =   tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
+      fpdlcqk9t8hwvalr  =  t8hwvalr +  *hj3ftvzu-1;
+      fpdlcqk9ufgqj9ck  =  ufgqj9ck;
+      fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-1;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        jtnbu2hz = exp(*fpdlcqk9m0ibglfx);
+        uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz);
+        xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ? (jtnbu2hz - 1.0e0) *
+                 log(*fpdlcqk9tlgduey8) + jtnbu2hz * (log(jtnbu2hz) -
+                 *fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr - log(*fpdlcqk9t8hwvalr)) -
+                 uqnkc6zg : -1000.0e0;
+        xd4mybgj   = -xd4mybgj;
+        lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj;
+        fpdlcqk9t8hwvalr  += *afpc0kns;
+        fpdlcqk9m0ibglfx += *wy1vqfzu;
+        fpdlcqk9tlgduey8++;
+      }
+    }
+    if (*qfx3vhct == 3) {
+      if (*dqk5muto == 0) {
+          anopu9vi = 34.0e0;
           for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              lfu2qhid += *fpdlcqk9ufgqj9ck++ *
-                        pow(*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr, (double) 2.0);
-              fpdlcqk9t8hwvalr += *afpc0kns;
-          }
+            if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
+              hdqsx7bk = exp(anopu9vi);
+              lbgwvp3q = 1;
+            } else
+            if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
+              hdqsx7bk = exp(-anopu9vi);
+              lbgwvp3q = 1;
+            } else {
+                hdqsx7bk = exp(m0ibglfx[2* *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
+                lbgwvp3q = 0;
+            }
+            xd4mybgj =  (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] < 1.0e0) ?
+              1.0e0 : tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2];
+            lfu2qhid +=        ufgqj9ck[ayfnwr1v-1] *
+                             (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] *
+                   log(xd4mybgj/t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]) +
+                   (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk) *
+              log((t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns] + hdqsx7bk)
+          / (hdqsx7bk+tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2])));
+        }
+    } else {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
+        uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk + tlgduey8[ayfnwr1v-1 +
+                              (*hj3ftvzu-1) * *ftnjamu2]);
+        hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk);
+        txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 + tlgduey8[ayfnwr1v-1 +
+                              (*hj3ftvzu-1) * *ftnjamu2]);
+        xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 +
+                (ayfnwr1v-1) * *afpc0kns])) + uqnkc6zg - hofjnx2e - txlvcey5;
+
+        if (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] > 0.0e0) {
+            xd4mybgj += tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] *
+                log(t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]
+        / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]));
+        }
+        lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj;
+      }
+        lfu2qhid *= (-0.5e0);
+      }
+    }
+    if (*qfx3vhct == 8) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        lfu2qhid += *fpdlcqk9ufgqj9ck++ *
+                  pow(*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr, (double) 2.0);
+        fpdlcqk9t8hwvalr += *afpc0kns;
       }
+    }
   }
   *jxacz5qu = 2.0e0 * lfu2qhid;
 }
@@ -720,30 +735,31 @@ void yiumjq3nflncwkfq76(double lncwkfq7[], double w8znmyce[], int *ftnjamu2,
   fpdlcqk9lncwkfq7  =  lncwkfq7;
 
   if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9w8znmyce++ = 1.0e0;
+      *fpdlcqk9w8znmyce++ = 0.0e0;
+    }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9w8znmyce++ = 0.0e0;
+      *fpdlcqk9w8znmyce++ = 1.0e0;
+    }
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
       for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9w8znmyce++ = 1.0e0;
-          *fpdlcqk9w8znmyce++ = 0.0e0;
-      }
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9w8znmyce++ = 0.0e0;
-          *fpdlcqk9w8znmyce++ = 1.0e0;
-      }
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
-              *fpdlcqk9w8znmyce++ = 0.0e0;
-          }
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
+        *fpdlcqk9w8znmyce++ = 0.0e0;
       }
+    }
   } else {
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9w8znmyce++ = 1.0e0;
+    }
+    if (*br5ovgcj != *ftnjamu2)
+      Rprintf("Error: *br5ovgcj != *ftnjamu2 in C_flncwkfq76\n");
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
       for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9w8znmyce++ = 1.0e0;
-      }
-      if (*br5ovgcj != *ftnjamu2) Rprintf("Error: *br5ovgcj != *ftnjamu2 in C_flncwkfq76\n");
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
-          }
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
       }
+    }
   }
 }
 
@@ -769,109 +785,109 @@ void yiumjq3nflncwkfq71(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int
   fpdlcqk9lncwkfq72 =  lncwkfq7;
 
   if (*qfx3vhct == 3 || *qfx3vhct == 5) { // ggg
-      if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
-          Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq71\n");
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
-              *fpdlcqk9w8znmyce++ = 0.0e0;
-          }
+    if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
+      Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq71\n");
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
+        *fpdlcqk9w8znmyce++ = 0.0e0;
       }
+    }
 
-      if (*unhycz0e == 0) {
-          for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-              fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-                  *fpdlcqk9w8znmyce++ = 0.0e0;
-              }
-          }
-      } else {
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
-              *fpdlcqk9vm4xjosb++ = 0.0;
-
-          fpdlcqk9lncwkfq7 = lncwkfq7;
-          for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-              fpdlcqk9vm4xjosb  = vm4xjosb;
-              for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
-                   fpdlcqk9vm4xjosb++;
-              }
-          }
-
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9vm4xjosb   *= (-0.50e0);
-               fpdlcqk9vm4xjosb++;
-          }
+  if (*unhycz0e == 0) {
+    for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+      fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+      fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
+        *fpdlcqk9w8znmyce++ = 0.0e0;
       }
+    }
+  } else {
+    fpdlcqk9vm4xjosb = vm4xjosb;
+    for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
+      *fpdlcqk9vm4xjosb++ = 0.0;
+
+    fpdlcqk9lncwkfq7 = lncwkfq7;
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      fpdlcqk9vm4xjosb  = vm4xjosb;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
+         fpdlcqk9vm4xjosb++;
+      }
+    }
+
+    fpdlcqk9vm4xjosb = vm4xjosb;
+    for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9vm4xjosb   *= (-0.50e0);
+       fpdlcqk9vm4xjosb++;
+    }
+  }
 
   } else { // ggg and hhh
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
-          }
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++;
       }
+    }
 
-      if (*unhycz0e == 0) {
-          for (i0spbklx  = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-              fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-              }
-          }
-      } else {
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
-              *fpdlcqk9vm4xjosb++ = 0.0;
-
-          fpdlcqk9lncwkfq7 = lncwkfq7;
-          for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-              fpdlcqk9vm4xjosb  = vm4xjosb;
-              for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
-                   fpdlcqk9vm4xjosb++;
-              }
-          }
-
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9vm4xjosb   *= (-0.50e0);
-               fpdlcqk9vm4xjosb++;
-          }
+  if (*unhycz0e == 0) {
+    for (i0spbklx  = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+      fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+      fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
       }
+    }
+  } else {
+    fpdlcqk9vm4xjosb = vm4xjosb;
+    for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
+      *fpdlcqk9vm4xjosb++ = 0.0;
+
+    fpdlcqk9lncwkfq7 = lncwkfq7;
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      fpdlcqk9vm4xjosb  = vm4xjosb;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
+         fpdlcqk9vm4xjosb++;
+      }
+    }
+
+    fpdlcqk9vm4xjosb = vm4xjosb;
+    for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9vm4xjosb   *= (-0.50e0);
+       fpdlcqk9vm4xjosb++;
+    }
+    }
   } // hhh
 
   if (*yru9olks > 0) {
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) { // kkk
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = 1.0e0;
-              *fpdlcqk9w8znmyce++ = 0.0e0;
-          }
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) { // kkk
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = 1.0e0;
+        *fpdlcqk9w8znmyce++ = 0.0e0;
+      }
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9w8znmyce++ = 0.0e0;
+        *fpdlcqk9w8znmyce++ = 1.0e0;
+      }
+      if (*yru9olks > 1) {
+        fpdlcqk9kifxa0he  = kifxa0he; //  + (i0spbklx-1) * *ftnjamu2;
+        for (i0spbklx = 2; i0spbklx <= *yru9olks; i0spbklx++) {
           for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9w8znmyce++ = 0.0e0;
-              *fpdlcqk9w8znmyce++ = 1.0e0;
-          }
-          if (*yru9olks > 1) {
-              fpdlcqk9kifxa0he  = kifxa0he; //  + (i0spbklx-1) * *ftnjamu2;
-              for (i0spbklx = 2; i0spbklx <= *yru9olks; i0spbklx++) {
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++;
-                      *fpdlcqk9w8znmyce++ = 0.0e0;
-                  }
-              }
-          }
-      } else { // kkk and iii
-          fpdlcqk9kifxa0he  = kifxa0he; //   + (i0spbklx-1) * *ftnjamu2;
-          for (i0spbklx = 1; i0spbklx <= *yru9olks; i0spbklx++) {
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++;
-              }
+            *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++;
+            *fpdlcqk9w8znmyce++ = 0.0e0;
           }
-      } // iii
+        }
+      }
+    } else { // kkk and iii
+      fpdlcqk9kifxa0he  = kifxa0he; //   + (i0spbklx-1) * *ftnjamu2;
+      for (i0spbklx = 1; i0spbklx <= *yru9olks; i0spbklx++) {
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++;
+        }
+      }
+    } // iii
   } // if (*yru9olks > 0)
   Free(wkumc9idtgiyxdw1);    Free(wkumc9iddufozmt7);
 }
@@ -900,126 +916,126 @@ void yiumjq3nflncwkfq72(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int
   fpdlcqk9lncwkfq7  =  lncwkfq7;
 
   for (gp1jxzuh = 1; gp1jxzuh <= *eu3oxvyb; gp1jxzuh++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
-          *fpdlcqk9w8znmyce++ = 0.0e0;
+    for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
+      *fpdlcqk9w8znmyce++ = 0.0e0;
   }
   fpdlcqk9w8znmyce = w8znmyce;
 
   if (*qfx3vhct == 3 || *qfx3vhct == 5) {
 
-      if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
-          Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq72\n");
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb      * *br5ovgcj;
-          fpdlcqk9lncwkfq7  =  lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                  *fpdlcqk9w8znmyce = *fpdlcqk9lncwkfq7;
-                  fpdlcqk9w8znmyce += 2 + *br5ovgcj;
-              }
-              fpdlcqk9lncwkfq7++;
-              fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj;  // fixed at 20100406
-          }
-          sedf7mxb += *afpc0kns;
-      }
+    if (*br5ovgcj != 2 * *ftnjamu2)  //Rprinf
+      Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq72\n");
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb      * *br5ovgcj;
+      fpdlcqk9lncwkfq7  =  lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+            *fpdlcqk9w8znmyce = *fpdlcqk9lncwkfq7;
+            fpdlcqk9w8znmyce += 2 + *br5ovgcj;
+        }
+        fpdlcqk9lncwkfq7++;
+        fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj;  // fixed at 20100406
+      }
+      sedf7mxb += *afpc0kns;
+    }
   } else {
-      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-          fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb      * *br5ovgcj;
-          fpdlcqk9lncwkfq7  =  lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-                  *fpdlcqk9w8znmyce++  = *fpdlcqk9lncwkfq7;
-                   fpdlcqk9w8znmyce   += *br5ovgcj;
-              }
-              fpdlcqk9lncwkfq7++;
-              fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj;  // fixed at 20100406
-          }
-          sedf7mxb += *wy1vqfzu;
-      }
+    for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+      fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb      * *br5ovgcj;
+      fpdlcqk9lncwkfq7  =  lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+          *fpdlcqk9w8znmyce++  = *fpdlcqk9lncwkfq7;
+           fpdlcqk9w8znmyce   += *br5ovgcj;
+        }
+        fpdlcqk9lncwkfq7++;
+        fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj;  // fixed at 20100406
+      }
+      sedf7mxb += *wy1vqfzu;
+    }
   }
 
   if (*fmzq7aob == 0) {
-      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-          for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-              fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb                     * *br5ovgcj;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-                  for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                      *fpdlcqk9w8znmyce  = uqnkc6zg;
-                       fpdlcqk9w8znmyce += 2 + *br5ovgcj;
-                  }
-                  fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj;  // fixed at 20100406
-              }
-              sedf7mxb += *afpc0kns;
-          }
-      } else {
-          for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-              fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-              fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb                     * *br5ovgcj;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-                  for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-                      *fpdlcqk9w8znmyce++  = uqnkc6zg;
-                       fpdlcqk9w8znmyce   += *br5ovgcj;
-                  }
-                  fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj;  // fixed at 20100406
-              }
-              sedf7mxb += *wy1vqfzu;
+    if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+      for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+        fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+        fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+        fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb                     * *br5ovgcj;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
+          for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+            *fpdlcqk9w8znmyce  = uqnkc6zg;
+             fpdlcqk9w8znmyce += 2 + *br5ovgcj;
+          }
+          fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj;  // fixed at 20100406
+        }
+        sedf7mxb += *afpc0kns;
+      }
+    } else {
+      for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+        fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+        fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+        fpdlcqk9w8znmyce = w8znmyce +  sedf7mxb                     * *br5ovgcj;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
+          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+            *fpdlcqk9w8znmyce++  = uqnkc6zg;
+             fpdlcqk9w8znmyce   += *br5ovgcj;
           }
+          fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj;  // fixed at 20100406
+        }
+        sedf7mxb += *wy1vqfzu;
       }
+    }
   } else {
-      if (*unhycz0e == 1) {
-
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
-              *fpdlcqk9vm4xjosb++ = 0.0;
-
-          fpdlcqk9lncwkfq7 = lncwkfq7;
-          for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
-              fpdlcqk9vm4xjosb  = vm4xjosb;
-              for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
-                   fpdlcqk9vm4xjosb++;
-              }
-          }
+    if (*unhycz0e == 1) {
+
+      fpdlcqk9vm4xjosb = vm4xjosb;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++)
+        *fpdlcqk9vm4xjosb++ = 0.0;
+
+      fpdlcqk9lncwkfq7 = lncwkfq7;
+      for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) {
+        fpdlcqk9vm4xjosb  = vm4xjosb;
+        for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0);
+           fpdlcqk9vm4xjosb++;
+        }
+      }
 
-          fpdlcqk9vm4xjosb = vm4xjosb;
-          for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9vm4xjosb   *= (-0.50e0);
-               fpdlcqk9vm4xjosb++;
+      fpdlcqk9vm4xjosb = vm4xjosb;
+      for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9vm4xjosb   *= (-0.50e0);
+         fpdlcqk9vm4xjosb++;
+      }
+    } else {
+      if (*qfx3vhct == 3 || *qfx3vhct == 5) {
+        for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+          fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+          fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+          fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
+            for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+              *fpdlcqk9w8znmyce++ = uqnkc6zg;
+               fpdlcqk9w8znmyce++;
+            }
           }
+        }
+        sedf7mxb += hyqwtp6i;
       } else {
-          if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-              for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-                  fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-                  fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-                  fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-                      for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                          *fpdlcqk9w8znmyce++ = uqnkc6zg;
-                           fpdlcqk9w8znmyce++;
-                      }
-                  }
-              }
-              sedf7mxb += hyqwtp6i;
-          } else {
-              for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
-                  fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
-                  fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
-                  fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
-                      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
-                          *fpdlcqk9w8znmyce++ = uqnkc6zg;
-                  }
-              }
-              sedf7mxb += hyqwtp6i;
+        for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) {
+          fpdlcqk9lncwkfq71 =  lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2;
+          fpdlcqk9lncwkfq72 =  lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2;
+          fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++;
+            for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++)
+              *fpdlcqk9w8znmyce++ = uqnkc6zg;
           }
+        }
+        sedf7mxb += hyqwtp6i;
       }
+    }
   }
   Free(wkumc9idtgiyxdw1);     Free(wkumc9iddufozmt7);
 }
@@ -1047,120 +1063,121 @@ void yiumjq3nietam6(double tlgduey8[], double m0ibglfx[], double y7sdgtqi[],
   fpdlcqk9tlgduey8    =   tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
   fpdlcqk9ufgqj9ck   =  ufgqj9ck;
   if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-      fpdlcqk9m0ibglfx1 = m0ibglfx + 2 * *hj3ftvzu-1;
-      fpdlcqk9m0ibglfx2 = m0ibglfx + 2 * *hj3ftvzu-2;
-  } else
-      fpdlcqk9m0ibglfx  = m0ibglfx +     *hj3ftvzu-1;
+    fpdlcqk9m0ibglfx1 = m0ibglfx + 2 * *hj3ftvzu-1;
+    fpdlcqk9m0ibglfx2 = m0ibglfx + 2 * *hj3ftvzu-2;
+  } else {
+    fpdlcqk9m0ibglfx  = m0ibglfx +     *hj3ftvzu-1;
+  }
 
   if (*qfx3vhct == 1 || *qfx3vhct == 4 ||
       *qfx3vhct == 3 || *qfx3vhct == 5) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          msrdjh5f  += *fpdlcqk9ufgqj9ck;
-          vogkfwt8 += *fpdlcqk9tlgduey8++ * *fpdlcqk9ufgqj9ck++;
-      }
-      gyuq8dex = vogkfwt8 / msrdjh5f;
-      fpdlcqk9tlgduey8    =   tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      msrdjh5f  += *fpdlcqk9ufgqj9ck;
+      vogkfwt8 += *fpdlcqk9tlgduey8++ * *fpdlcqk9ufgqj9ck++;
+    }
+    gyuq8dex = vogkfwt8 / msrdjh5f;
+    fpdlcqk9tlgduey8    =   tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
   }
   if (*qfx3vhct == 1) {
-      yiumjq3ng2vwexyk9(&gyuq8dex, &g2vwexykp);
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9m0ibglfx  = g2vwexykp;
-           fpdlcqk9m0ibglfx += *wy1vqfzu;
-      }
+    yiumjq3ng2vwexyk9(&gyuq8dex, &g2vwexykp);
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9m0ibglfx  = g2vwexykp;
+       fpdlcqk9m0ibglfx += *wy1vqfzu;
+    }
   }
   if (*qfx3vhct == 2) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9m0ibglfx  = log(*fpdlcqk9tlgduey8++ + myoffset);
-           fpdlcqk9m0ibglfx += *wy1vqfzu;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9m0ibglfx  = log(*fpdlcqk9tlgduey8++ + myoffset);
+       fpdlcqk9m0ibglfx += *wy1vqfzu;
+    }
   }
   if (*qfx3vhct == 4) {
-      yiumjq3nbewf1pzv9(&gyuq8dex, &qa8ltuhj);
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9m0ibglfx  = qa8ltuhj;
-           fpdlcqk9m0ibglfx += *wy1vqfzu;
-      }
+    yiumjq3nbewf1pzv9(&gyuq8dex, &qa8ltuhj);
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9m0ibglfx  = qa8ltuhj;
+       fpdlcqk9m0ibglfx += *wy1vqfzu;
+    }
   }
   if (*qfx3vhct == 5) {
-      if (*wr0lbopv == 1 || *wr0lbopv == 2) {
-          kwvo4ury = *wr0lbopv == 1 ? log(gyuq8dex + myoffset) :
-                                     log((6.0 / 8.0) * gyuq8dex);
-          cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx2  = kwvo4ury;
-              *fpdlcqk9m0ibglfx1  = cpz4fgkx;
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-          }
-      } else {
-          cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx2  = log(*fpdlcqk9tlgduey8++ + myoffset);
-              *fpdlcqk9m0ibglfx1  = cpz4fgkx;
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-          }
+    if (*wr0lbopv == 1 || *wr0lbopv == 2) {
+      kwvo4ury = *wr0lbopv == 1 ? log(gyuq8dex + myoffset) :
+                                 log((6.0 / 8.0) * gyuq8dex);
+      cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx2  = kwvo4ury;
+        *fpdlcqk9m0ibglfx1  = cpz4fgkx;
+         fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+         fpdlcqk9m0ibglfx2 += *wy1vqfzu;
       }
+    } else {
+      cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx2  = log(*fpdlcqk9tlgduey8++ + myoffset);
+        *fpdlcqk9m0ibglfx1  = cpz4fgkx;
+         fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+         fpdlcqk9m0ibglfx2 += *wy1vqfzu;
+      }
+    }
   }
   if (*qfx3vhct == 3) {
-      if (*wr0lbopv == 1) {
-          kwvo4ury = log(gyuq8dex + myoffset);
-          cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu -1] + myoffset);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx2  = kwvo4ury;
-              *fpdlcqk9m0ibglfx1  = cpz4fgkx;
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-          }
-      } else if (*wr0lbopv == 2) {
-          kwvo4ury = log(gyuq8dex + myoffset);
-          khl0iysgk   = y7sdgtqi[3 + *hj3ftvzu -1];
-          cpz4fgkx = log(khl0iysgk);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex;
-              *fpdlcqk9m0ibglfx2  = (tad5vhsu < 3.0 * gyuq8dex) ? kwvo4ury :
-                               log(sqrt(*fpdlcqk9tlgduey8));
-              *fpdlcqk9m0ibglfx1  = cpz4fgkx;
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-               fpdlcqk9tlgduey8++;
-          }
-      } else if (*wr0lbopv == 3) {
-          kwvo4ury = log(gyuq8dex + myoffset);
-          khl0iysgk = y7sdgtqi[3 + *hj3ftvzu -1];
-          cpz4fgkx = log(khl0iysgk);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex;
-              if (tad5vhsu > gyuq8dex) {
-                  *fpdlcqk9m0ibglfx2 = log(0.5 * (*fpdlcqk9tlgduey8 + gyuq8dex));
-                  *fpdlcqk9m0ibglfx1 = log(khl0iysgk / (tad5vhsu / gyuq8dex));
-              } else
-              if (*fpdlcqk9tlgduey8 < (gyuq8dex / 4.0)) {
-                  *fpdlcqk9m0ibglfx2 = log(gyuq8dex / 4.0);
-                  *fpdlcqk9m0ibglfx1 = cpz4fgkx;
-              } else {
-                  *fpdlcqk9m0ibglfx2 = kwvo4ury;
-                  *fpdlcqk9m0ibglfx1 = cpz4fgkx;
-              }
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-               fpdlcqk9tlgduey8++;
-          }
-      } else {
-          cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu - 1]);
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9m0ibglfx2  = log(*fpdlcqk9tlgduey8++ + myoffset);
-              *fpdlcqk9m0ibglfx1  = cpz4fgkx;
-               fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-               fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-          }
+    if (*wr0lbopv == 1) {
+      kwvo4ury = log(gyuq8dex + myoffset);
+      cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu -1] + myoffset);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx2  = kwvo4ury;
+        *fpdlcqk9m0ibglfx1  = cpz4fgkx;
+         fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+         fpdlcqk9m0ibglfx2 += *wy1vqfzu;
+      }
+    } else if (*wr0lbopv == 2) {
+      kwvo4ury = log(gyuq8dex + myoffset);
+      khl0iysgk   = y7sdgtqi[3 + *hj3ftvzu -1];
+      cpz4fgkx = log(khl0iysgk);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex;
+        *fpdlcqk9m0ibglfx2  = (tad5vhsu < 3.0 * gyuq8dex) ? kwvo4ury :
+                         log(sqrt(*fpdlcqk9tlgduey8));
+        *fpdlcqk9m0ibglfx1  = cpz4fgkx;
+         fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+         fpdlcqk9m0ibglfx2 += *wy1vqfzu;
+         fpdlcqk9tlgduey8++;
+      }
+    } else if (*wr0lbopv == 3) {
+      kwvo4ury = log(gyuq8dex + myoffset);
+      khl0iysgk = y7sdgtqi[3 + *hj3ftvzu -1];
+      cpz4fgkx = log(khl0iysgk);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex;
+        if (tad5vhsu > gyuq8dex) {
+          *fpdlcqk9m0ibglfx2 = log(0.5 * (*fpdlcqk9tlgduey8 + gyuq8dex));
+          *fpdlcqk9m0ibglfx1 = log(khl0iysgk / (tad5vhsu / gyuq8dex));
+        } else
+        if (*fpdlcqk9tlgduey8 < (gyuq8dex / 4.0)) {
+          *fpdlcqk9m0ibglfx2 = log(gyuq8dex / 4.0);
+          *fpdlcqk9m0ibglfx1 = cpz4fgkx;
+        } else {
+          *fpdlcqk9m0ibglfx2 = kwvo4ury;
+          *fpdlcqk9m0ibglfx1 = cpz4fgkx;
+        }
+        fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+        fpdlcqk9m0ibglfx2 += *wy1vqfzu;
+        fpdlcqk9tlgduey8++;
+      }
+    } else {
+      cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu - 1]);
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9m0ibglfx2  = log(*fpdlcqk9tlgduey8++ + myoffset);
+        *fpdlcqk9m0ibglfx1  = cpz4fgkx;
+         fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+         fpdlcqk9m0ibglfx2 += *wy1vqfzu;
       }
+    }
   }
   if (*qfx3vhct == 8) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9m0ibglfx  = *fpdlcqk9tlgduey8++;
-           fpdlcqk9m0ibglfx += *wy1vqfzu;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9m0ibglfx  = *fpdlcqk9tlgduey8++;
+       fpdlcqk9m0ibglfx += *wy1vqfzu;
+    }
   }
 }
 
@@ -1211,8 +1228,8 @@ void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[],
 
   fpdlcqk9m0ibglfx  = m0ibglfx  +  *hj3ftvzu-1;
   if (*qfx3vhct == 3 || *qfx3vhct == 5) {
-      fpdlcqk9m0ibglfx1 = m0ibglfx  +  2 * *hj3ftvzu-1;
-      fpdlcqk9m0ibglfx2 = m0ibglfx  +  2 * *hj3ftvzu-2;
+    fpdlcqk9m0ibglfx1 = m0ibglfx  +  2 * *hj3ftvzu-1;
+    fpdlcqk9m0ibglfx2 = m0ibglfx  +  2 * *hj3ftvzu-2;
   }
   fpdlcqk9t8hwvalr   =  t8hwvalr  +  *hj3ftvzu-1;
   fpdlcqk9vm4xjosb    =  vm4xjosb;
@@ -1223,242 +1240,249 @@ void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[],
   fpdlcqk9ghz9vuba    =   ghz9vuba  + (*hj3ftvzu-1) * *ftnjamu2;
 
   if (*qfx3vhct == 1) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          xd4mybgja = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr);
-          xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++;
-          if (xd4mybgja < *dn3iasxug) xd4mybgja = *dn3iasxug;
-          if (xd4mybgjb < *dn3iasxug) {
-              xd4mybgjb = *dn3iasxug;
-              *fpdlcqk9wpuarq2m = *uaf2xgqy;
-          } else {
-              *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
-          }
-          *fpdlcqk9rbne6ouj++ = xd4mybgjb;
-          *fpdlcqk9ghz9vuba++  = *fpdlcqk9m0ibglfx +
-                         (*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr) / xd4mybgja;
-          fpdlcqk9t8hwvalr  += *afpc0kns;
-          fpdlcqk9wpuarq2m   += *npjlv3mr;
-          fpdlcqk9m0ibglfx += *wy1vqfzu;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      xd4mybgja = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr);
+      xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++;
+      if (xd4mybgja < *dn3iasxug) xd4mybgja = *dn3iasxug;
+      if (xd4mybgjb < *dn3iasxug) {
+        xd4mybgjb = *dn3iasxug;
+        *fpdlcqk9wpuarq2m = *uaf2xgqy;
+      } else {
+        *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
+      }
+      *fpdlcqk9rbne6ouj++ = xd4mybgjb;
+      *fpdlcqk9ghz9vuba++  = *fpdlcqk9m0ibglfx +
+                     (*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr) / xd4mybgja;
+      fpdlcqk9t8hwvalr  += *afpc0kns;
+      fpdlcqk9wpuarq2m   += *npjlv3mr;
+      fpdlcqk9m0ibglfx += *wy1vqfzu;
+    }
   }
   if (*qfx3vhct == 2) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          xd4mybgja = *fpdlcqk9t8hwvalr;
-          xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++;
-          if (xd4mybgjb < *dn3iasxug) {
-              xd4mybgjb = *dn3iasxug;
-              *fpdlcqk9wpuarq2m = *uaf2xgqy;
-          } else {
-              *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
-          }
-          *fpdlcqk9rbne6ouj = xd4mybgjb;
-          if (*fpdlcqk9tlgduey8 > 0.0e0) {
-              xd4mybgjc = xd4mybgja;
-              if (xd4mybgjc < *dn3iasxug) xd4mybgjc = *dn3iasxug;
-              *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8 - xd4mybgjc) / xd4mybgjc;
-          } else {
-              *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx - 1.0e0;
-          }
-          fpdlcqk9m0ibglfx += *wy1vqfzu;
-          fpdlcqk9t8hwvalr  += *afpc0kns;
-          fpdlcqk9wpuarq2m   += *npjlv3mr;
-          fpdlcqk9rbne6ouj++;
-          fpdlcqk9tlgduey8++;
-          fpdlcqk9ghz9vuba++;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      xd4mybgja = *fpdlcqk9t8hwvalr;
+      xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++;
+      if (xd4mybgjb < *dn3iasxug) {
+        xd4mybgjb = *dn3iasxug;
+        *fpdlcqk9wpuarq2m = *uaf2xgqy;
+      } else {
+        *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
+      }
+      *fpdlcqk9rbne6ouj = xd4mybgjb;
+      if (*fpdlcqk9tlgduey8 > 0.0e0) {
+        xd4mybgjc = xd4mybgja;
+        if (xd4mybgjc < *dn3iasxug)
+          xd4mybgjc = *dn3iasxug;
+        *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8 - xd4mybgjc) / xd4mybgjc;
+      } else {
+        *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx - 1.0e0;
+      }
+      fpdlcqk9m0ibglfx += *wy1vqfzu;
+      fpdlcqk9t8hwvalr  += *afpc0kns;
+      fpdlcqk9wpuarq2m   += *npjlv3mr;
+      fpdlcqk9rbne6ouj++;
+      fpdlcqk9tlgduey8++;
+      fpdlcqk9ghz9vuba++;
+    }
   }
   if (*qfx3vhct == 4) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          if (*fpdlcqk9t8hwvalr < *dn3iasxug || *fpdlcqk9t8hwvalr > 1.0e0 - *dn3iasxug) {
-              xd4mybgja = *dn3iasxug;
-              xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck;
-              if (xd4mybgjb < *dn3iasxug) {
-                  xd4mybgjb = *dn3iasxug;
-                  *fpdlcqk9wpuarq2m = *uaf2xgqy;
-              } else {
-                  *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
-              }
-              *fpdlcqk9rbne6ouj = xd4mybgjb;
-              *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx +
-                           (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja;
-          } else {
-              xd4mybgja =  -(1.0e0 - *fpdlcqk9t8hwvalr) * log(1.0e0 - *fpdlcqk9t8hwvalr);
-              if (xd4mybgja < *dn3iasxug) {
-                  xd4mybgja = *dn3iasxug;
-              }
-              xd4mybgjb = -xd4mybgja * *fpdlcqk9ufgqj9ck *
-                       log(1.0e0 - *fpdlcqk9t8hwvalr) / *fpdlcqk9t8hwvalr;
-              if (xd4mybgjb < *dn3iasxug) {
-                  xd4mybgjb = *dn3iasxug;
-              }
-              *fpdlcqk9rbne6ouj = xd4mybgjb;
-               *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
-               *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx +
-                           (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja;
-          }
-          fpdlcqk9m0ibglfx += *wy1vqfzu;
-          fpdlcqk9t8hwvalr  += *afpc0kns;
-          fpdlcqk9wpuarq2m   += *npjlv3mr;
-          fpdlcqk9ufgqj9ck++;
-          fpdlcqk9rbne6ouj++;
-          fpdlcqk9tlgduey8++;
-          fpdlcqk9ghz9vuba++;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      if (*fpdlcqk9t8hwvalr < *dn3iasxug || *fpdlcqk9t8hwvalr > 1.0e0 - *dn3iasxug) {
+        xd4mybgja = *dn3iasxug;
+        xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck;
+        if (xd4mybgjb < *dn3iasxug) {
+          xd4mybgjb = *dn3iasxug;
+          *fpdlcqk9wpuarq2m = *uaf2xgqy;
+        } else {
+          *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
+        }
+        *fpdlcqk9rbne6ouj = xd4mybgjb;
+        *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx +
+                     (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja;
+      } else {
+        xd4mybgja =  -(1.0e0 - *fpdlcqk9t8hwvalr) * log(1.0e0 - *fpdlcqk9t8hwvalr);
+        if (xd4mybgja < *dn3iasxug) {
+          xd4mybgja = *dn3iasxug;
+        }
+        xd4mybgjb = -xd4mybgja * *fpdlcqk9ufgqj9ck *
+                 log(1.0e0 - *fpdlcqk9t8hwvalr) / *fpdlcqk9t8hwvalr;
+        if (xd4mybgjb < *dn3iasxug) {
+            xd4mybgjb = *dn3iasxug;
+        }
+        *fpdlcqk9rbne6ouj = xd4mybgjb;
+         *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb);
+         *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx +
+                     (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja;
+      }
+      fpdlcqk9m0ibglfx += *wy1vqfzu;
+      fpdlcqk9t8hwvalr  += *afpc0kns;
+      fpdlcqk9wpuarq2m   += *npjlv3mr;
+      fpdlcqk9ufgqj9ck++;
+      fpdlcqk9rbne6ouj++;
+      fpdlcqk9tlgduey8++;
+      fpdlcqk9ghz9vuba++;
+    }
   }
   if (*qfx3vhct == 5) {
-      fvn3iasxug = 1.0e-20;
-      anopu9vi  = 34.0e0;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
-              jtnbu2hz = exp(anopu9vi);
-              lbgwvp3q = 1;
-          } else
-          if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
-              jtnbu2hz = exp(-anopu9vi);
-              lbgwvp3q = 1;
-          } else {
-              jtnbu2hz = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
-              lbgwvp3q = 0;
-          }
-          tyee_C_vdgam1(&jtnbu2hz, &uqnkc6zgd, &okobr6tcex);
-          if (okobr6tcex != 1) {
-            Rprintf("Error 1 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex);
-          }
-          xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns];
-          if (xk7dnvei < fvn3iasxug) { xk7dnvei = fvn3iasxug; }
-          dldshape = log(tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]) +
-                     log(jtnbu2hz) - log(xk7dnvei) + 1.0e0 - uqnkc6zgd -
-                         tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei;
-
-
-
-          tyee_C_vtgam1(&jtnbu2hz, &uqnkc6zgt, &okobr6tcex);
-          if (okobr6tcex != 1) {
-            Rprintf("Error 2 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex);
-          }
-          rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
-          ufgqj9ck[ayfnwr1v-1] * jtnbu2hz;
-          xd4mybgja = jtnbu2hz * uqnkc6zgt - 1.0e0;
-          rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
-          ufgqj9ck[ayfnwr1v-1] * jtnbu2hz * xd4mybgja;
-
-          if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) {
-              rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug;
-              wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
-          } else {
-              wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] =
-                  sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]);
-          }
-          if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) {
-              rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug;
-              wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
-          } else {
-              wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] =
-              sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]);
-          }
+    fvn3iasxug = 1.0e-20;
+    anopu9vi  = 34.0e0;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
+        jtnbu2hz = exp(anopu9vi);
+        lbgwvp3q = 1;
+      } else
+      if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
+        jtnbu2hz = exp(-anopu9vi);
+        lbgwvp3q = 1;
+      } else {
+        jtnbu2hz = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
+        lbgwvp3q = 0;
+      }
+      tyee_C_vdgam1(&jtnbu2hz, &uqnkc6zgd, &okobr6tcex);
+      if (okobr6tcex != 1) {
+        Rprintf("Error 1 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex);
+      }
+      xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns];
+      if (xk7dnvei < fvn3iasxug) {
+        xk7dnvei = fvn3iasxug;
+      }
+      dldshape = log(tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]) +
+                 log(jtnbu2hz) - log(xk7dnvei) + 1.0e0 - uqnkc6zgd -
+                     tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei;
 
 
-          if (xd4mybgja < fvn3iasxug) { xd4mybgja = fvn3iasxug; }
-          ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
-          m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] +
-          tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0;
 
-          ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
-          m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] + dldshape / xd4mybgja;
+      tyee_C_vtgam1(&jtnbu2hz, &uqnkc6zgt, &okobr6tcex);
+      if (okobr6tcex != 1) {
+        Rprintf("Error 2 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex);
+      }
+      rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
+      ufgqj9ck[ayfnwr1v-1] * jtnbu2hz;
+      xd4mybgja = jtnbu2hz * uqnkc6zgt - 1.0e0;
+      rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
+      ufgqj9ck[ayfnwr1v-1] * jtnbu2hz * xd4mybgja;
+
+      if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) {
+        rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug;
+        wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
+      } else {
+        wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] =
+            sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]);
       }
-  }
-  if (*qfx3vhct == 3) {
-      anopu9vi = 34.0e0;
-      fvn3iasxug = 1.0e-20;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
-              hdqsx7bk = exp(anopu9vi);
-              lbgwvp3q = 1;
-          } else
-          if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
-              hdqsx7bk = exp(-anopu9vi);
-              lbgwvp3q = 1;
-          } else {
-              hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
-              lbgwvp3q = 0;
-          }
 
-          xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns];
-          if (xk7dnvei < fvn3iasxug) { xk7dnvei = fvn3iasxug; }
-              tmp1 = tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk;
-              tyee_C_vdgam1(&tmp1, &xd4mybgja, &okobr6tcex);
+      if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) {
+        rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug;
+        wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
+      } else {
+        wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] =
+        sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]);
+      }
 
-              if (okobr6tcex != 1) {
-                  Rprintf("error in dlgpwe0c okobr6tcex 3: %3d \n", okobr6tcex);
-              }
-              tyee_C_vdgam1(&hdqsx7bk, &xd4mybgjb, &okobr6tcex);
-              if (okobr6tcex != 1) {
-                  Rprintf("error in dlgpwe0c okobr6tcex 4: %3d \n", okobr6tcex);
-              }
-              dldk = xd4mybgja - xd4mybgjb -
-                (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk)
-                / (xk7dnvei + hdqsx7bk) + 1.0 + log(hdqsx7bk / (xk7dnvei + hdqsx7bk));
 
-          dkdeta = hdqsx7bk;
+      if (xd4mybgja < fvn3iasxug) {
+        xd4mybgja = fvn3iasxug;
+      }
+      ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
+      m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] +
+      tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0;
 
-          kkmat[0] = hdqsx7bk;
-          nm0eljqk[0] = xk7dnvei;
+      ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
+      m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] + dldshape / xd4mybgja;
+    }
+  }
+  if (*qfx3vhct == 3) {
+    anopu9vi = 34.0e0;
+    fvn3iasxug = 1.0e-20;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] >  anopu9vi) {
+        hdqsx7bk = exp(anopu9vi);
+        lbgwvp3q = 1;
+      } else
+      if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) {
+        hdqsx7bk = exp(-anopu9vi);
+        lbgwvp3q = 1;
+      } else {
+        hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]);
+        lbgwvp3q = 0;
+      }
+
+      xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns];
+      if (xk7dnvei < fvn3iasxug) { xk7dnvei = fvn3iasxug; }
+        tmp1 = tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk;
+        tyee_C_vdgam1(&tmp1, &xd4mybgja, &okobr6tcex);
+
+        if (okobr6tcex != 1) {
+          Rprintf("error in dlgpwe0c okobr6tcex 3: %3d \n", okobr6tcex);
+        }
+        tyee_C_vdgam1(&hdqsx7bk, &xd4mybgjb, &okobr6tcex);
+        if (okobr6tcex != 1) {
+          Rprintf("error in dlgpwe0c okobr6tcex 4: %3d \n", okobr6tcex);
+        }
+        dldk = xd4mybgja - xd4mybgjb -
+          (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk)
+          / (xk7dnvei + hdqsx7bk) + 1.0 + log(hdqsx7bk / (xk7dnvei + hdqsx7bk));
+
+      dkdeta = hdqsx7bk;
+
+      kkmat[0] = hdqsx7bk;
+      nm0eljqk[0] = xk7dnvei;
           sguwj9ty = 5000;
-          fvlmz9iyC_enbin9(bzmd6ftvmat, kkmat, nm0eljqk,
-                        &n2kersmx, &pqneb2ra, &dvhw1ulq, &pqneb2ra,
-                        &ux3nadiw, rsynp1go, &sguwj9ty);
-          if (dvhw1ulq != 1) {
-              *zjkrtol8 = 5;
-              Rprintf("Error. Exiting enbin9; dvhw1ulq is %d\n", dvhw1ulq);
-              return;
-          }
+      fvlmz9iyC_enbin9(bzmd6ftvmat, kkmat, nm0eljqk,
+                    &n2kersmx, &pqneb2ra, &dvhw1ulq, &pqneb2ra,
+                    &ux3nadiw, rsynp1go, &sguwj9ty);
+      if (dvhw1ulq != 1) {
+          *zjkrtol8 = 5;
+          Rprintf("Error. Exiting enbin9; dvhw1ulq is %d\n", dvhw1ulq);
+          return;
+      }
 
-          ed2ldk2 = -bzmd6ftvmat[0] - 1.0e0 / hdqsx7bk + 1.0e0 / (hdqsx7bk + xk7dnvei);
-          rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
-          ufgqj9ck[ayfnwr1v-1] * xk7dnvei * hdqsx7bk / (xk7dnvei + hdqsx7bk);
-          rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
-          ufgqj9ck[ayfnwr1v-1] * hdqsx7bk *
-                (-bzmd6ftvmat[0] * hdqsx7bk - 1.0e0 + hdqsx7bk / (hdqsx7bk + xk7dnvei));
-
-          if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) {
-              rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug;
-              wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
-          } else
-              wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] =
-              sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]);
-          if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) {
-              rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug;
-               wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
-          } else {
-                  wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] =
-            sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]);
-          }
+      ed2ldk2 = -bzmd6ftvmat[0] - 1.0e0 / hdqsx7bk + 1.0e0 / (hdqsx7bk + xk7dnvei);
+      rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
+      ufgqj9ck[ayfnwr1v-1] * xk7dnvei * hdqsx7bk / (xk7dnvei + hdqsx7bk);
+      rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
+      ufgqj9ck[ayfnwr1v-1] * hdqsx7bk *
+            (-bzmd6ftvmat[0] * hdqsx7bk - 1.0e0 + hdqsx7bk / (hdqsx7bk + xk7dnvei));
 
-          ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
-          m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] +
-          tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0;
-          ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
-          m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] +
-          dldk / (dkdeta * ed2ldk2);
+      if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) {
+        rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug;
+        wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
+      } else
+        wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] =
+        sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]);
+
+      if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) {
+        rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug;
+        wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy;
+      } else {
+        wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] =
+        sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]);
       }
+
+      ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] =
+      m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] +
+      tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0;
+      ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] =
+      m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] +
+      dldk / (dkdeta * ed2ldk2);
+    }
   }
   if (*qfx3vhct == 8) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9rbne6ouj  = *fpdlcqk9ufgqj9ck++;
-          *fpdlcqk9wpuarq2m   = sqrt(*fpdlcqk9rbne6ouj);
-          *fpdlcqk9ghz9vuba++ = *fpdlcqk9tlgduey8++;
-           fpdlcqk9wpuarq2m  += *npjlv3mr;
-           fpdlcqk9rbne6ouj++;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9rbne6ouj  = *fpdlcqk9ufgqj9ck++;
+      *fpdlcqk9wpuarq2m   = sqrt(*fpdlcqk9rbne6ouj);
+      *fpdlcqk9ghz9vuba++ = *fpdlcqk9tlgduey8++;
+       fpdlcqk9wpuarq2m  += *npjlv3mr;
+       fpdlcqk9rbne6ouj++;
+    }
   }
 
   if (*unhycz0e == 1) {
-      fpdlcqk9ghz9vuba = ghz9vuba  + ((*qfx3vhct == 3 || *qfx3vhct == 5) ?
-                  (2 * *hj3ftvzu-2) : (*hj3ftvzu-1)) * *ftnjamu2;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9ghz9vuba -= *fpdlcqk9vm4xjosb++;
-           fpdlcqk9ghz9vuba++;
-      }
+    fpdlcqk9ghz9vuba = ghz9vuba  + ((*qfx3vhct == 3 || *qfx3vhct == 5) ?
+                (2 * *hj3ftvzu-2) : (*hj3ftvzu-1)) * *ftnjamu2;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9ghz9vuba -= *fpdlcqk9vm4xjosb++;
+       fpdlcqk9ghz9vuba++;
+    }
   }
 }
 
@@ -1510,7 +1534,7 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
   dn3iasxug  = y7sdgtqi[0];
   uaf2xgqy = sqrt(dn3iasxug);
   if (qfx3vhct == 1 || qfx3vhct == 4)
-      vsoihn1r = log(dn3iasxug);
+    vsoihn1r = log(dn3iasxug);
   bh2vgiay   = y7sdgtqi[1];
   rsynp1go = y7sdgtqi[2];
 
@@ -1535,191 +1559,191 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
   ceqzd1hi653: hmayv1xt2 = 1.0e0;
 
   if (f7svlajr == 0) {
-      for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-          yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
-                       wy1vqfzu, afpc0kns, &qfx3vhct, &yq6lorbx, ufgqj9ck, &wr0lbopv);
-      }
+    for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+      yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
+                   wy1vqfzu, afpc0kns, &qfx3vhct, &yq6lorbx, ufgqj9ck, &wr0lbopv);
+    }
   } else
   if (f7svlajr == 2) {
-      yiumjq3npkc4ejib(w8znmyce, zshtfg8c, m0ibglfx,
-                   ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s,
-                   &vtsou9pz, &yu6izdrc, &qfx3vhct, &unhycz0e,
-                   vm4xjosb);
+    yiumjq3npkc4ejib(w8znmyce, zshtfg8c, m0ibglfx,
+                 ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s,
+                 &vtsou9pz, &yu6izdrc, &qfx3vhct, &unhycz0e,
+                 vm4xjosb);
   }
 
   yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
                 afpc0kns, &qfx3vhct, &yu6izdrc);
 
   if (f7svlajr == 2) {
-      yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                   t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                   &kvowz9ht, m0ibglfx, wkumc9idrpto5qwb, &yu6izdrc,
-                   &dn3iasxug, &vsoihn1r, &pqneb2ra);
+    yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                 t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                 &kvowz9ht, m0ibglfx, wkumc9idrpto5qwb, &yu6izdrc,
+                 &dn3iasxug, &vsoihn1r, &pqneb2ra);
   } else {
-     wkumc9idrpto5qwb[0] = -1.0e0;
+   wkumc9idrpto5qwb[0] = -1.0e0;
   }
 
 
   for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) {
 
-      for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-          yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
-                       t8hwvalr, ghz9vuba, rbne6ouj,
-                       wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
-                       ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                       &yq6lorbx, &qfx3vhct, zjkrtol8, &unhycz0e, vm4xjosb);
-      }
+    for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+      yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
+                   t8hwvalr, ghz9vuba, rbne6ouj,
+                   wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
+                   ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+                   &yq6lorbx, &qfx3vhct, zjkrtol8, &unhycz0e, vm4xjosb);
+    }
 
 
-      fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce;
-      for (yq6lorbx = 1; yq6lorbx <= xlpjcg3s; yq6lorbx++)
-          for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
-               *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++;
-
-
-
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
-              Rprintf("20100410; Error: this definitely does not work\n");
-              if (2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj)  //Rprintf
-                  Rprintf("Error: 2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n");
-              fpdlcqk9vc6hatuj = vc6hatuj;
-              for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-                  fpdlcqk9wpuarq2m  = wpuarq2m;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
-                          *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++;
-                           fpdlcqk9vc6hatuj++;
-                      }
-                  }
-              }
-          } else {
-              if (*wy1vqfzu * *ftnjamu2 != *br5ovgcj)  //Rprintf
-                  Rprintf("Error: *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n");
-              fpdlcqk9vc6hatuj = vc6hatuj;
-              for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-                  fpdlcqk9wpuarq2m = wpuarq2m;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
-                          *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++;
-                           fpdlcqk9vc6hatuj++;
-                      }
-                  }
-              }
+    fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce;
+    for (yq6lorbx = 1; yq6lorbx <= xlpjcg3s; yq6lorbx++)
+      for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
+         *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++;
+
+
+
+    if (qfx3vhct == 3 || qfx3vhct == 5) {
+      Rprintf("20100410; Error: this definitely does not work\n");
+      if (2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj)  //Rprintf
+        Rprintf("Error: 2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n");
+      fpdlcqk9vc6hatuj = vc6hatuj;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+        fpdlcqk9wpuarq2m  = wpuarq2m;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
+            *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++;
+             fpdlcqk9vc6hatuj++;
+          }
+        }
+      }
+    } else {
+      if (*wy1vqfzu * *ftnjamu2 != *br5ovgcj)  //Rprintf
+        Rprintf("Error: *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n");
+      fpdlcqk9vc6hatuj = vc6hatuj;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+        fpdlcqk9wpuarq2m = wpuarq2m;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
+            *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++;
+             fpdlcqk9vc6hatuj++;
           }
+        }
+      }
+    }
 
 
-      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
-          ges1xpkr[gp1jxzuh-1] = gp1jxzuh;
+    for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
+      ges1xpkr[gp1jxzuh-1] = gp1jxzuh;
 
-      F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr,
-                       wkumc9idtwk, &qemj9asg, &pvofyg8z);
+    F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr,
+                     wkumc9idtwk, &qemj9asg, &pvofyg8z);
 
-      if (qemj9asg != xlpjcg3s) {
-          *zjkrtol8 = 2;
-          Rprintf("Failure or Error in cqo_2: vc6hatuj is not of full xwdf5ltg.\n");
-          Free(wkumc9idrpto5qwb);    Free(wkumc9idtwk);
-          return;
-      }
+    if (qemj9asg != xlpjcg3s) {
+      *zjkrtol8 = 2;
+      Rprintf("Failure or Error in cqo_2: vc6hatuj is not of full xwdf5ltg.\n");
+      Free(wkumc9idrpto5qwb);    Free(wkumc9idtwk);
+      return;
+    }
 
-      if (*npjlv3mr != *wy1vqfzu)  //Rprintf
-          Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n");
-      qnwamo0e1     = wkumc9idtwk;
-      fpdlcqk9wpuarq2m = wpuarq2m;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          fpdlcqk9ghz9vuba = ghz9vuba +  ayfnwr1v-1;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              *qnwamo0e1++    = *fpdlcqk9wpuarq2m++ * *fpdlcqk9ghz9vuba;
-               fpdlcqk9ghz9vuba += *ftnjamu2;
-          }
+    if (*npjlv3mr != *wy1vqfzu)  //Rprintf
+      Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n");
+    qnwamo0e1     = wkumc9idtwk;
+    fpdlcqk9wpuarq2m = wpuarq2m;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      fpdlcqk9ghz9vuba = ghz9vuba +  ayfnwr1v-1;
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+          *qnwamo0e1++    = *fpdlcqk9wpuarq2m++ * *fpdlcqk9ghz9vuba;
+           fpdlcqk9ghz9vuba += *ftnjamu2;
       }
+    }
 
-      ybnsqgo9 = 101;
+    ybnsqgo9 = 101;
 
-      F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk,
-                       &uylxqtc7, wkumc9idtwk + *wy1vqfzu * *ftnjamu2, zshtfg8c,
-                       &uylxqtc7, m0ibglfx, &ybnsqgo9, &algpft4y);
+    F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk,
+                     &uylxqtc7, wkumc9idtwk + *wy1vqfzu * *ftnjamu2, zshtfg8c,
+                     &uylxqtc7, m0ibglfx, &ybnsqgo9, &algpft4y);
 
 
-      if (*npjlv3mr != *wy1vqfzu)  //Rprintf
-          Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n");
-      fpdlcqk9m0ibglfx = m0ibglfx;
-      fpdlcqk9wpuarq2m   = wpuarq2m;
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-               *fpdlcqk9m0ibglfx /= *fpdlcqk9wpuarq2m++;
-                fpdlcqk9m0ibglfx++;
+    if (*npjlv3mr != *wy1vqfzu)  //Rprintf
+      Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n");
+    fpdlcqk9m0ibglfx = m0ibglfx;
+    fpdlcqk9wpuarq2m   = wpuarq2m;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+         *fpdlcqk9m0ibglfx /= *fpdlcqk9wpuarq2m++;
+          fpdlcqk9m0ibglfx++;
           }
-      }
+    }
 
 
-      if (unhycz0e == 1) {
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
+    if (unhycz0e == 1) {
+      if (qfx3vhct == 3 || qfx3vhct == 5) {
 
-              if (2 * *afpc0kns != *wy1vqfzu)  //Rprintf
-                  Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_cqo_2\n");
+        if (2 * *afpc0kns != *wy1vqfzu)  //Rprintf
+          Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_cqo_2\n");
 
-              fpdlcqk9m0ibglfx = m0ibglfx;
-              fpdlcqk9vm4xjosb   = vm4xjosb;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
-                      *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb;
-                       fpdlcqk9m0ibglfx += 2;
-                  }
-                  fpdlcqk9vm4xjosb++;
-              }
-          } else {
-              fpdlcqk9m0ibglfx = m0ibglfx;
-              fpdlcqk9vm4xjosb   = vm4xjosb;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-                      *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb;
-                       fpdlcqk9m0ibglfx++;
-                  }
-                  fpdlcqk9vm4xjosb++;
-              }
+        fpdlcqk9m0ibglfx = m0ibglfx;
+        fpdlcqk9vm4xjosb   = vm4xjosb;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) {
+            *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb;
+             fpdlcqk9m0ibglfx += 2;
+          }
+          fpdlcqk9vm4xjosb++;
+        }
+      } else {
+        fpdlcqk9m0ibglfx = m0ibglfx;
+        fpdlcqk9vm4xjosb   = vm4xjosb;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+            *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb;
+             fpdlcqk9m0ibglfx++;
           }
+          fpdlcqk9vm4xjosb++;
+        }
       }
+    }
 
-      yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
-                     afpc0kns, &qfx3vhct, &yu6izdrc);
+    yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
+                   afpc0kns, &qfx3vhct, &yu6izdrc);
 
-      yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                    t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                    &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc,
-                    &dn3iasxug, &vsoihn1r, &pqneb2ra);
-
-      wiptsjx8 = fabs(*tlq9wpes - *wkumc9idrpto5qwb) / (1.0e0 +
-               fabs(*tlq9wpes));
-      if (wiptsjx8 < bh2vgiay) { // xxx
-          *zjkrtol8 = 0;
-          xui7hqwl[7] = kcm6jfob;
-
-
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
-              yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                           t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                           &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc,
-                           &dn3iasxug, &vsoihn1r, &yu6izdrc);
-          }
-          x1jrewny = 1;
-          goto ceqzd1hi20097;
-      } else { // xxx and
-          *wkumc9idrpto5qwb = *tlq9wpes;
-          x1jrewny = 0;
+    yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                 t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                 &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc,
+                 &dn3iasxug, &vsoihn1r, &pqneb2ra);
+
+    wiptsjx8 = fabs(*tlq9wpes - *wkumc9idrpto5qwb) / (1.0e0 +
+             fabs(*tlq9wpes));
+    if (wiptsjx8 < bh2vgiay) { // xxx
+      *zjkrtol8 = 0;
+      xui7hqwl[7] = kcm6jfob;
+
+
+      if (qfx3vhct == 3 || qfx3vhct == 5) {
+        yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                     t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                     &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc,
+                     &dn3iasxug, &vsoihn1r, &yu6izdrc);
       }
+      x1jrewny = 1;
+      goto ceqzd1hi20097;
+    } else { // xxx and
+      *wkumc9idrpto5qwb = *tlq9wpes;
+      x1jrewny = 0;
+    }
   }
 
   ceqzd1hi20097: hmayv1xt1 = 0.0e0;
 
   if (x1jrewny == 1) {
-      Free(wkumc9idrpto5qwb);    Free(wkumc9idtwk);
-      return;
+    Free(wkumc9idrpto5qwb);    Free(wkumc9idtwk);
+    return;
   }
 
   if (f7svlajr == 1 || f7svlajr == 2) {
-      f7svlajr = 0;
-      xui7hqwl[8] = 1;
-      goto ceqzd1hi653;
+    f7svlajr = 0;
+    xui7hqwl[8] = 1;
+    goto ceqzd1hi653;
   }
 
   *zjkrtol8 = 3;
@@ -1777,14 +1801,14 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
   vtsou9pz = xui7hqwl[11];
 
   zjkrtol8[0] = -1;
-  for(ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++)
-      zjkrtol8[ayfnwr1v] = 1;
+  for (ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++)
+    zjkrtol8[ayfnwr1v] = 1;
 
   if (vtsou9pz != 1) {
-      Rprintf("Error: vtsou9pz is not unity in cqo_1!\n");
-      *zjkrtol8 = 4;
-      Free(wkumc9idtwk);
-      return;
+    Rprintf("Error: vtsou9pz is not unity in cqo_1!\n");
+    *zjkrtol8 = 4;
+    Free(wkumc9idtwk);
+    return;
   }
   unhycz0e    = xui7hqwl[13];
   yru9olks    = xui7hqwl[15];
@@ -1818,228 +1842,228 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
   
 
   for (hj3ftvzu = 1; hj3ftvzu <= *afpc0kns; hj3ftvzu++) {
-      ceqzd1hi653: hmayv1xt = 1.0e0;
+    ceqzd1hi653: hmayv1xt = 1.0e0;
 
-      if (f7svlajr == 0) {
-          yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
-                       wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv);
+    if (f7svlajr == 0) {
+      yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
+                   wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv);
 
-      } else
-      if (f7svlajr == 2) {
-          yiumjq3npkc4ejib(w8znmyce, zshtfg8c + (hj3ftvzu-1) * xlpjcg3s, m0ibglfx,
-                       ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s,
-                       &vtsou9pz, &hj3ftvzu, &qfx3vhct, &unhycz0e,
-                       vm4xjosb);
-      }
-
-      yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
-                    afpc0kns, &qfx3vhct, &hj3ftvzu);
+    } else
+    if (f7svlajr == 2) {
+      yiumjq3npkc4ejib(w8znmyce, zshtfg8c + (hj3ftvzu-1) * xlpjcg3s, m0ibglfx,
+                   ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s,
+                   &vtsou9pz, &hj3ftvzu, &qfx3vhct, &unhycz0e,
+                   vm4xjosb);
+    }
 
-      if (f7svlajr == 2) {
-          yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                       t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                       &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu,
-                       &dn3iasxug, &vsoihn1r, &pqneb2ra);
-      } else {
-          rpto5qwb = -1.0e0;
-      }
+    yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
+                  afpc0kns, &qfx3vhct, &hj3ftvzu);
 
-      for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) {
+    if (f7svlajr == 2) {
+      yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                   t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                   &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu,
+                   &dn3iasxug, &vsoihn1r, &pqneb2ra);
+    } else {
+      rpto5qwb = -1.0e0;
+    }
 
+    for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) {
 
 
-          yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
-                       t8hwvalr, ghz9vuba, rbne6ouj,
-                       wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
-                       ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                       &hj3ftvzu, &qfx3vhct, zjkrtol8 + hj3ftvzu, &unhycz0e, vm4xjosb);
 
+      yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
+                   t8hwvalr, ghz9vuba, rbne6ouj,
+                   wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
+                   ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+                   &hj3ftvzu, &qfx3vhct, zjkrtol8 + hj3ftvzu, &unhycz0e, vm4xjosb);
 
 
-          fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce;
-          for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
-              for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
-                   *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++;
 
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
-              if (2 * *ftnjamu2 != *br5ovgcj)  //Rprintf
-                  Rprintf("Error: 2 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n");
-              fpdlcqk9vc6hatuj = vc6hatuj;
-              for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-                  fpdlcqk9wpuarq2m2 = wpuarq2m + 2*hj3ftvzu -2;
-                  fpdlcqk9wpuarq2m1 = wpuarq2m + 2*hj3ftvzu -1;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m2;
-                       fpdlcqk9vc6hatuj++;
-                      *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m1;
-                       fpdlcqk9vc6hatuj++;
-                       fpdlcqk9wpuarq2m1 += *npjlv3mr;
-                       fpdlcqk9wpuarq2m2 += *npjlv3mr;
-                  }
-              }
-          } else {
-              if (1 * *ftnjamu2 != *br5ovgcj)  //Rprintf
-                  Rprintf("Error: 1 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n");
-              fpdlcqk9vc6hatuj = vc6hatuj;
-              for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-                  fpdlcqk9wpuarq2m = wpuarq2m + hj3ftvzu -1;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m;
-                       fpdlcqk9vc6hatuj++;
-                       fpdlcqk9wpuarq2m  += *npjlv3mr;
-                  }
-              }
+      fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
+        for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++)
+           *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++;
+
+      if (qfx3vhct == 3 || qfx3vhct == 5) {
+        if (2 * *ftnjamu2 != *br5ovgcj)  //Rprintf
+          Rprintf("Error: 2 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n");
+        fpdlcqk9vc6hatuj = vc6hatuj;
+        for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+          fpdlcqk9wpuarq2m2 = wpuarq2m + 2*hj3ftvzu -2;
+          fpdlcqk9wpuarq2m1 = wpuarq2m + 2*hj3ftvzu -1;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m2;
+             fpdlcqk9vc6hatuj++;
+            *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m1;
+             fpdlcqk9vc6hatuj++;
+             fpdlcqk9wpuarq2m1 += *npjlv3mr;
+             fpdlcqk9wpuarq2m2 += *npjlv3mr;
+          }
+        }
+      } else {
+        if (1 * *ftnjamu2 != *br5ovgcj)  //Rprintf
+          Rprintf("Error: 1 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n");
+        fpdlcqk9vc6hatuj = vc6hatuj;
+        for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+          fpdlcqk9wpuarq2m = wpuarq2m + hj3ftvzu -1;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m;
+             fpdlcqk9vc6hatuj++;
+             fpdlcqk9wpuarq2m  += *npjlv3mr;
           }
+        }
+      }
 
-          for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
-              ges1xpkr[gp1jxzuh-1] = gp1jxzuh;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++)
+        ges1xpkr[gp1jxzuh-1] = gp1jxzuh;
 
 
-          F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr,
-                           wkumc9idtwk, &qemj9asg, &pvofyg8z);
+      F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr,
+                       wkumc9idtwk, &qemj9asg, &pvofyg8z);
 
-          if (qemj9asg != xlpjcg3s) {
-              Rprintf("Error in cqo_1: vc6hatuj is not of full xwdf5ltg.\n");
-              *zjkrtol8 = 2;
-              Free(wkumc9idtwk);
-              return;
-          }
+      if (qemj9asg != xlpjcg3s) {
+        Rprintf("Error in cqo_1: vc6hatuj is not of full xwdf5ltg.\n");
+        *zjkrtol8 = 2;
+        Free(wkumc9idtwk);
+        return;
+      }
+
+      if (qfx3vhct == 3 || qfx3vhct == 5) {
+        fpdlcqk9ghz9vuba1 = ghz9vuba + (2*hj3ftvzu-1) * *ftnjamu2;
+        fpdlcqk9ghz9vuba2 = ghz9vuba + (2*hj3ftvzu-2) * *ftnjamu2;
+        fpdlcqk9wpuarq2m1 = wpuarq2m +  2*hj3ftvzu-1;
+        fpdlcqk9wpuarq2m2 = wpuarq2m +  2*hj3ftvzu-2;
+        fpdlcqk9twk   = wkumc9idtwk;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m2 * *fpdlcqk9ghz9vuba2++;
+          *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m1 * *fpdlcqk9ghz9vuba1++;
+          fpdlcqk9wpuarq2m1 += *npjlv3mr;
+          fpdlcqk9wpuarq2m2 += *npjlv3mr;
+        }
+      } else {
+        fpdlcqk9ghz9vuba1 = ghz9vuba + (hj3ftvzu-1) * *ftnjamu2;
+        fpdlcqk9twk   = wkumc9idtwk;
+        fpdlcqk9wpuarq2m  = wpuarq2m + hj3ftvzu-1;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m * *fpdlcqk9ghz9vuba1++;
+          fpdlcqk9wpuarq2m  += *npjlv3mr;
+        }
+      }
 
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
-            fpdlcqk9ghz9vuba1 = ghz9vuba + (2*hj3ftvzu-1) * *ftnjamu2;
-            fpdlcqk9ghz9vuba2 = ghz9vuba + (2*hj3ftvzu-2) * *ftnjamu2;
-            fpdlcqk9wpuarq2m1 = wpuarq2m +  2*hj3ftvzu-1;
-            fpdlcqk9wpuarq2m2 = wpuarq2m +  2*hj3ftvzu-2;
-            fpdlcqk9twk   = wkumc9idtwk;
-            for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m2 * *fpdlcqk9ghz9vuba2++;
-              *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m1 * *fpdlcqk9ghz9vuba1++;
-              fpdlcqk9wpuarq2m1 += *npjlv3mr;
-              fpdlcqk9wpuarq2m2 += *npjlv3mr;
-            }
-          } else {
-              fpdlcqk9ghz9vuba1 = ghz9vuba + (hj3ftvzu-1) * *ftnjamu2;
-              fpdlcqk9twk   = wkumc9idtwk;
-              fpdlcqk9wpuarq2m  = wpuarq2m + hj3ftvzu-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m * *fpdlcqk9ghz9vuba1++;
-                  fpdlcqk9wpuarq2m  += *npjlv3mr;
-              }
-          }
+      ybnsqgo9 = 101;
 
-          ybnsqgo9 = 101;
+      F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk,
+                       &uylxqtc7, wkumc9idtwk + *br5ovgcj,
+                       zshtfg8c + (hj3ftvzu-1) * xlpjcg3s,
+                       &uylxqtc7, wkumc9idtwk + 2 * *br5ovgcj, &ybnsqgo9, &algpft4y);
 
-          F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk,
-                           &uylxqtc7, wkumc9idtwk + *br5ovgcj,
-                           zshtfg8c + (hj3ftvzu-1) * xlpjcg3s,
-                           &uylxqtc7, wkumc9idtwk + 2 * *br5ovgcj, &ybnsqgo9, &algpft4y);
 
+      fpdlcqk9twk     = wkumc9idtwk;
+      fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+        *fpdlcqk9twk++ = *fpdlcqk9zshtfg8c++;
+      }
 
-          fpdlcqk9twk     = wkumc9idtwk;
-          fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s;
-          for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-              *fpdlcqk9twk++ = *fpdlcqk9zshtfg8c++;
-          }
+      fpdlcqk9twk     = wkumc9idtwk;
+      fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s;
+      for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
+        *(fpdlcqk9zshtfg8c + ges1xpkr[gp1jxzuh-1] - 1) = *fpdlcqk9twk++;
+      }
 
-          fpdlcqk9twk     = wkumc9idtwk;
-          fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s;
-          for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) {
-              *(fpdlcqk9zshtfg8c + ges1xpkr[gp1jxzuh-1] - 1) = *fpdlcqk9twk++;
-          }
+      if (qfx3vhct == 3 || qfx3vhct == 5) {
 
-          if (qfx3vhct == 3 || qfx3vhct == 5) {
-
-              fpdlcqk9m0ibglfx2 = m0ibglfx   + 2 * hj3ftvzu -2;
-              fpdlcqk9m0ibglfx1 = m0ibglfx   + 2 * hj3ftvzu -1;
-              fpdlcqk9twk     = wkumc9idtwk + 2 * *br5ovgcj;
-              fpdlcqk9wpuarq2m2   = wpuarq2m     + 2 * hj3ftvzu -2;
-              fpdlcqk9wpuarq2m1   = wpuarq2m     + 2 * hj3ftvzu -1;
-
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9m0ibglfx2  = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m2;
-                  *fpdlcqk9m0ibglfx1  = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m1;
-                   fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-                   fpdlcqk9m0ibglfx2 += *wy1vqfzu;
-                   fpdlcqk9wpuarq2m1   += *npjlv3mr;
-                   fpdlcqk9wpuarq2m2   += *npjlv3mr;
-              }
+        fpdlcqk9m0ibglfx2 = m0ibglfx   + 2 * hj3ftvzu -2;
+        fpdlcqk9m0ibglfx1 = m0ibglfx   + 2 * hj3ftvzu -1;
+        fpdlcqk9twk     = wkumc9idtwk + 2 * *br5ovgcj;
+        fpdlcqk9wpuarq2m2   = wpuarq2m     + 2 * hj3ftvzu -2;
+        fpdlcqk9wpuarq2m1   = wpuarq2m     + 2 * hj3ftvzu -1;
 
-              if (unhycz0e == 1) {
-                  fpdlcqk9m0ibglfx = m0ibglfx + 2*hj3ftvzu-2;
-                  fpdlcqk9vm4xjosb   = vm4xjosb;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
-                       fpdlcqk9m0ibglfx += *wy1vqfzu;
-                  }
-              }
-          } else {
-              fpdlcqk9m0ibglfx  = m0ibglfx   +     hj3ftvzu -1;
-              fpdlcqk9twk     = wkumc9idtwk + 2 * *br5ovgcj;
-              fpdlcqk9wpuarq2m    = wpuarq2m     +     hj3ftvzu -1;
-
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9m0ibglfx   = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m;
-                   fpdlcqk9m0ibglfx  += *wy1vqfzu;
-                   fpdlcqk9wpuarq2m    += *npjlv3mr;
-              }
-              if (unhycz0e == 1) {
-                  fpdlcqk9m0ibglfx = m0ibglfx +   hj3ftvzu-1;
-                  fpdlcqk9vm4xjosb   = vm4xjosb;
-                  for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                      *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
-                       fpdlcqk9m0ibglfx += *wy1vqfzu;
-                  }
-              }
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9m0ibglfx2  = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m2;
+          *fpdlcqk9m0ibglfx1  = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m1;
+           fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+           fpdlcqk9m0ibglfx2 += *wy1vqfzu;
+           fpdlcqk9wpuarq2m1   += *npjlv3mr;
+           fpdlcqk9wpuarq2m2   += *npjlv3mr;
+        }
+
+        if (unhycz0e == 1) {
+          fpdlcqk9m0ibglfx = m0ibglfx + 2*hj3ftvzu-2;
+          fpdlcqk9vm4xjosb   = vm4xjosb;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
+             fpdlcqk9m0ibglfx += *wy1vqfzu;
+          }
+        }
+      } else {
+        fpdlcqk9m0ibglfx  = m0ibglfx   +     hj3ftvzu -1;
+        fpdlcqk9twk     = wkumc9idtwk + 2 * *br5ovgcj;
+        fpdlcqk9wpuarq2m    = wpuarq2m     +     hj3ftvzu -1;
+
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          *fpdlcqk9m0ibglfx   = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m;
+           fpdlcqk9m0ibglfx  += *wy1vqfzu;
+           fpdlcqk9wpuarq2m    += *npjlv3mr;
+        }
+        if (unhycz0e == 1) {
+          fpdlcqk9m0ibglfx = m0ibglfx +   hj3ftvzu-1;
+          fpdlcqk9vm4xjosb   = vm4xjosb;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++;
+             fpdlcqk9m0ibglfx += *wy1vqfzu;
           }
+        }
+      }
+
+      yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
+                    afpc0kns, &qfx3vhct, &hj3ftvzu);
 
-          yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
-                        afpc0kns, &qfx3vhct, &hj3ftvzu);
+      yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                   t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                   &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
+                   &dn3iasxug, &vsoihn1r, &pqneb2ra);
 
+      wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 +
+               fabs(tlq9wpes[hj3ftvzu]));
+      if (wiptsjx8 < bh2vgiay) {
+        zjkrtol8[hj3ftvzu] = 0;
+        xui7hqwl[7] = kcm6jfob;
+        if (qfx3vhct == 3 || qfx3vhct == 5) {
           yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
                        t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
                        &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
-                       &dn3iasxug, &vsoihn1r, &pqneb2ra);
-
-          wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 +
-                   fabs(tlq9wpes[hj3ftvzu]));
-          if (wiptsjx8 < bh2vgiay) {
-              zjkrtol8[hj3ftvzu] = 0;
-              xui7hqwl[7] = kcm6jfob;
-              if (qfx3vhct == 3 || qfx3vhct == 5) {
-                  yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                               t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                               &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
-                               &dn3iasxug, &vsoihn1r, &yu6izdrc);
-              }
-              Totdev += tlq9wpes[hj3ftvzu];
-              goto ceqzd1hi1011;
-          } else {
-              rpto5qwb = tlq9wpes[hj3ftvzu];
-          }
+                       &dn3iasxug, &vsoihn1r, &yu6izdrc);
+        }
+        Totdev += tlq9wpes[hj3ftvzu];
+        goto ceqzd1hi1011;
+      } else {
+        rpto5qwb = tlq9wpes[hj3ftvzu];
       }
+    }
 
-      Rprintf("cqo_1; no convergence for Species ");
-      Rprintf("number %3d. Trying internal starting values.\n", hj3ftvzu);
-      if (f7svlajr == 1) {
-          f7svlajr = 0;
-          xui7hqwl[8] = 1;
-          goto ceqzd1hi653;
-      }
+    Rprintf("cqo_1; no convergence for Species ");
+    Rprintf("number %3d. Trying internal starting values.\n", hj3ftvzu);
+    if (f7svlajr == 1) {
+      f7svlajr = 0;
+      xui7hqwl[8] = 1;
+      goto ceqzd1hi653;
+    }
 
-      *zjkrtol8 = 3;
-       zjkrtol8[hj3ftvzu] = 2;
-      Rprintf("cqo_1; no convergence for Species ");
-      Rprintf("number %3d. Continuing on with other species.\n", hj3ftvzu);
-      Totdev += tlq9wpes[hj3ftvzu];
+    *zjkrtol8 = 3;
+     zjkrtol8[hj3ftvzu] = 2;
+    Rprintf("cqo_1; no convergence for Species ");
+    Rprintf("number %3d. Continuing on with other species.\n", hj3ftvzu);
+    Totdev += tlq9wpes[hj3ftvzu];
 
   ceqzd1hi1011: hmayv1xt = 3.0e0;
   }
 
 
-  if(zjkrtol8[0] == -1)
-      for(ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++)
-          if(zjkrtol8[ayfnwr1v] != 0) zjkrtol8[0] = 1;
-  if(zjkrtol8[0] == -1)
+  if (zjkrtol8[0] == -1)
+    for (ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++)
+      if (zjkrtol8[ayfnwr1v] != 0) zjkrtol8[0] = 1;
+  if (zjkrtol8[0] == -1)
       zjkrtol8[0] = 0;
 
   *tlq9wpes = Totdev;
@@ -2088,85 +2112,85 @@ void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
   fpdlcqk9lncwkfq7   = lncwkfq7;
   fpdlcqk9yxiwebc5  = wkumc9idyxiwebc5;
   for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          fxnhilr3 = 0.0e0;
-          fpdlcqk9k7hulceq   = k7hulceq + (hpmwnav2-1) * *eoviz2fb;
-          fpdlcqk9atujnxb8  = atujnxb8 + ayfnwr1v-1;
-          for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-              fxnhilr3      += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++;
-              fpdlcqk9atujnxb8 += *ftnjamu2;
-          }
-          *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      fxnhilr3 = 0.0e0;
+      fpdlcqk9k7hulceq   = k7hulceq + (hpmwnav2-1) * *eoviz2fb;
+      fpdlcqk9atujnxb8  = atujnxb8 + ayfnwr1v-1;
+      for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
+        fxnhilr3      += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++;
+        fpdlcqk9atujnxb8 += *ftnjamu2;
       }
+      *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3;
+    }
   }
   if (vtsou9pz == 1) {
-      cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
-            m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
-            vc6hatuj, fasrkub3, ges1xpkr,
-            ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-            zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi);
+    cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
+          m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
+          vc6hatuj, fasrkub3, ges1xpkr,
+          ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+          zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi);
 
   } else {
-      cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
-            m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
-            vc6hatuj, fasrkub3, ges1xpkr,
-            ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-            zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi);
+    cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
+          m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
+          vc6hatuj, fasrkub3, ges1xpkr,
+          ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+          zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi);
   }
 
 
   fpdlcqk9atujnxb8 = atujnxb8;
   for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9atujnxb8 *= *ydcnh9xl;
-           fpdlcqk9atujnxb8++;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9atujnxb8 *= *ydcnh9xl;
+       fpdlcqk9atujnxb8++;
+    }
   }
 
   for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) {
-      for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-              fpdlcqk9lncwkfq7  =       lncwkfq7  + (hpmwnav2-1) * *ftnjamu2;
-              fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5  + (hpmwnav2-1) * *ftnjamu2;
-              fpdlcqk9atujnxb8  =       atujnxb8  + (xvr7bonh-1) * *ftnjamu2;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ + *fpdlcqk9atujnxb8++;
-              }
-
-
-          xui7hqwl[4] = 2;
+    for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
+          fpdlcqk9lncwkfq7  =       lncwkfq7  + (hpmwnav2-1) * *ftnjamu2;
+          fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5  + (hpmwnav2-1) * *ftnjamu2;
+          fpdlcqk9atujnxb8  =       atujnxb8  + (xvr7bonh-1) * *ftnjamu2;
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ + *fpdlcqk9atujnxb8++;
+          }
 
-          for (gp1jxzuh = 1; gp1jxzuh <= exrkcn5d; gp1jxzuh++)
-              zshtfg8c[gp1jxzuh-1] = wkumc9idajul8wkv[gp1jxzuh-1];
 
-          if (vtsou9pz == 1) {
-              cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
-                    m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
-                    vc6hatuj, fasrkub3, ges1xpkr,
-                    ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                    zjkrtol8, xui7hqwl,
-                    tlq9wpes, zshtfg8c, y7sdgtqi);
-          } else {
-              cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
-                    m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
-                    vc6hatuj, fasrkub3, ges1xpkr,
-                    ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                    zjkrtol8, xui7hqwl,
-                    tlq9wpes, zshtfg8c, y7sdgtqi);
-          }
+      xui7hqwl[4] = 2;
 
-          if (*zjkrtol8 != 0) {
-              Rprintf("Error in dcqo1: zjkrtol8 = %d\n", *zjkrtol8);
-              Rprintf("Continuing.\n");
-          }
-          *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / *ydcnh9xl;
-      }
+      for (gp1jxzuh = 1; gp1jxzuh <= exrkcn5d; gp1jxzuh++)
+        zshtfg8c[gp1jxzuh-1] = wkumc9idajul8wkv[gp1jxzuh-1];
 
-      if (xwdf5ltg > 1) {
-          fpdlcqk9lncwkfq7  =        lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
-          fpdlcqk9yxiwebc5 =  wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-               *fpdlcqk9lncwkfq7++  = *fpdlcqk9yxiwebc5++;
-      }
+      if (vtsou9pz == 1) {
+        cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
+              m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
+              vc6hatuj, fasrkub3, ges1xpkr,
+              ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+              zjkrtol8, xui7hqwl,
+              tlq9wpes, zshtfg8c, y7sdgtqi);
+      } else {
+        cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck,
+              m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce,
+              vc6hatuj, fasrkub3, ges1xpkr,
+              ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+              zjkrtol8, xui7hqwl,
+              tlq9wpes, zshtfg8c, y7sdgtqi);
+      }
+
+      if (*zjkrtol8 != 0) {
+        Rprintf("Error in dcqo1: zjkrtol8 = %d\n", *zjkrtol8);
+        Rprintf("Continuing.\n");
+      }
+      *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / *ydcnh9xl;
+    }
+
+    if (xwdf5ltg > 1) {
+      fpdlcqk9lncwkfq7  =        lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
+      fpdlcqk9yxiwebc5 =  wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+         *fpdlcqk9lncwkfq7++  = *fpdlcqk9yxiwebc5++;
+    }
   }
 
   Free(wkumc9idajul8wkv);   Free(wkumc9iddev0);   Free(wkumc9idyxiwebc5);
@@ -2182,27 +2206,29 @@ void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
 
 
 void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
-                 double m0ibglfx[], double t8hwvalr[], double ghz9vuba[],
-                 double rbne6ouj[], double wpuarq2m[],
-                 double vc6hatuj[], double fasrkub3[], int ges1xpkr[],
-                 int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr,
-                 int *zjkrtol8, int xui7hqwl[],
-                 double tlq9wpes[], double zshtfg8c[],
-                 double y7sdgtqi[], int psdvgce3[], int *qfozcl5b,
-                 double hdnw2fts[], double lamvec[], double wbkq9zyi[],
-                 int ezlgm2up[], int lqsahu0r[], int which[],
-                 double kispwgx3[],
-                 double mbvnaor6[],
-                 double hjm2ktyr[],
-                 int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[],
-                 double ifys6woa[], double rpyis2kc[], double gkdx5jals[],
-                 int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]) {
+           double m0ibglfx[], double t8hwvalr[], double ghz9vuba[],
+           double rbne6ouj[], double wpuarq2m[],
+           double vc6hatuj[], double fasrkub3[], int ges1xpkr[],
+           int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr,
+           int *zjkrtol8, int xui7hqwl[],
+           double tlq9wpes[], double zshtfg8c[],
+           double y7sdgtqi[], int psdvgce3[], int *qfozcl5b,
+           double hdnw2fts[], double lamvec[], double wbkq9zyi[],
+           int ezlgm2up[], int lqsahu0r[], int which[],
+           double kispwgx3[],
+           double mbvnaor6[],
+           double hjm2ktyr[],
+           int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[],
+           double ifys6woa[], double rpyis2kc[], double gkdx5jals[],
+           int nbzjkpi3[], int lindex[],
+           int acpios9q[], int jwbkl9fp[]) {
 
 
 
 
 
-  int    hj3ftvzu, ehtjigf4, kvowz9ht, yu6izdrc = 0, pqneb2ra = 1, xwdf5ltg = xui7hqwl[0],
+  int    hj3ftvzu, ehtjigf4, kvowz9ht,
+         yu6izdrc = 0, pqneb2ra = 1, xwdf5ltg = xui7hqwl[0],
          f7svlajr, qfx3vhct, c5aesxkul, wr0lbopv, vtsou9pz, xlpjcg3s,
          sedf7mxb, kcm6jfob, lensmo = (xwdf5ltg == 1 ? 2 : 4) * *afpc0kns;
   double rpto5qwb, dn3iasxug, wiptsjx8, bh2vgiay, uaf2xgqy, vsoihn1r,
@@ -2210,6 +2236,9 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
   double *fpdlcqk9kispwgx3;
 
 
+  int    len_1spp_ifys6woa;
+
+
   double hmayv1xt = 0.0, Totdev = 0.0e0;
 
 
@@ -2238,9 +2267,9 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
   wkumc9idhdnw2fts  = Calloc(lensmo                        , double);
   wkumc9idwbkq9zyi  = Calloc(lensmo                        , double);
 
-  for(ayfnwr1v = 0; ayfnwr1v < lensmo; ayfnwr1v++) {
-      wkumc9idhdnw2fts[ayfnwr1v] = hdnw2fts[ayfnwr1v];
-      wkumc9idwbkq9zyi[ayfnwr1v] = wbkq9zyi[ayfnwr1v];
+  for (ayfnwr1v = 0; ayfnwr1v < lensmo; ayfnwr1v++) {
+    wkumc9idhdnw2fts[ayfnwr1v] = hdnw2fts[ayfnwr1v];
+    wkumc9idwbkq9zyi[ayfnwr1v] = wbkq9zyi[ayfnwr1v];
   }
 
 
@@ -2255,16 +2284,16 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
 
   vtsou9pz = xui7hqwl[11];
   if (vtsou9pz != 1 || lyma1kwc != xwdf5ltg) {
-      Rprintf("Error: 'vtsou9pz' != 1, or 'lyma1kwc' != 'xwdf5ltg', in vcao6!\n");
-      *zjkrtol8 = 4;
-      Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
-      Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
-      return;
+    Rprintf("Error: 'vtsou9pz' != 1, or 'lyma1kwc' != 'xwdf5ltg', in vcao6!\n");
+    *zjkrtol8 = 4;
+    Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
+    Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
+    return;
   }
   wr0lbopv = xui7hqwl[17];
   dn3iasxug  = y7sdgtqi[0];
   uaf2xgqy = sqrt(dn3iasxug);
-      vsoihn1r = log(dn3iasxug);
+  vsoihn1r = log(dn3iasxug);
   bh2vgiay   = y7sdgtqi[1];
   rsynp1go = y7sdgtqi[2];
 
@@ -2276,162 +2305,178 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
 
 
 
+  len_1spp_ifys6woa = lindex[lyma1kwc] - 1;
+
+
+
+
+
+
   *zjkrtol8 = 1;
 
   for (hj3ftvzu = 1; hj3ftvzu <= *afpc0kns; hj3ftvzu++) {
-      ceqzd1hi653:  hmayv1xt = 1.0;
+    ceqzd1hi653:  hmayv1xt = 1.0;
 
-      qes4mujl = (qfx3vhct == 3 || qfx3vhct == 5) ?  2 * hj3ftvzu - 1 : hj3ftvzu;
+    qes4mujl = (qfx3vhct == 3 || qfx3vhct == 5) ?  2 * hj3ftvzu - 1 : hj3ftvzu;
 
-      if (f7svlajr == 0) {
-          yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
-                       wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv);
-      } else
-      if (f7svlajr != 1) {
-          Rprintf("Failure due to bad input of 'f7svlajr' variable\n");
-          *zjkrtol8 = 6;
-          Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
-          Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
-          return;
-      }
+    if (f7svlajr == 0) {
+      yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2,
+                   wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv);
+    } else
+    if (f7svlajr != 1) {
+      Rprintf("Failure due to bad input of 'f7svlajr' variable\n");
+      *zjkrtol8 = 6;
+      Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
+      Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
+      return;
+    }
 
-      yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
-                    afpc0kns, &qfx3vhct, &hj3ftvzu);
+    yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
+                  afpc0kns, &qfx3vhct, &hj3ftvzu);
 
-      if (f7svlajr == 2) {
-          yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                       t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                       &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu,
-                       &dn3iasxug, &vsoihn1r, &pqneb2ra);
-      } else {
-          rpto5qwb = -1.0e0;
-      }
+    if (f7svlajr == 2) {
+      yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                   t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                   &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu,
+                   &dn3iasxug, &vsoihn1r, &pqneb2ra);
+    } else {
+      rpto5qwb = -1.0e0;
+    }
 
-      for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) {
+    for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) {
 
-          yiumjq3nflncwkfq76(lncwkfq7, vc6hatuj, ftnjamu2, br5ovgcj, &xwdf5ltg, &qfx3vhct);
+      yiumjq3nflncwkfq76(lncwkfq7, vc6hatuj, ftnjamu2, br5ovgcj, &xwdf5ltg, &qfx3vhct);
 
-          psdvgce3[6] = 0;
+      psdvgce3[6] = 0;
 
-          yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
-                       t8hwvalr, ghz9vuba, rbne6ouj,
-                       wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
-                       ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                       &hj3ftvzu, &qfx3vhct, zjkrtol8, &yu6izdrc, hmayv1xtvm4xjosb);
+      yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx,
+                   t8hwvalr, ghz9vuba, rbne6ouj,
+                   wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy,
+                   ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+                   &hj3ftvzu, &qfx3vhct, zjkrtol8, &yu6izdrc, hmayv1xtvm4xjosb);
 
 
-          fpdlcqk9lxyst1eb = wkumc9idlxyst1eb;
-          fpdlcqk9zyodca3j    = wkumc9idzyodca3j;
-          fpdlcqk9m0ibglfx1 =  m0ibglfx + qes4mujl-1;
-          fpdlcqk9wpuarq2m1   =    wpuarq2m + qes4mujl-1;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1;
-              fpdlcqk9wpuarq2m2   = fpdlcqk9wpuarq2m1;
-              for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) {
-                 *fpdlcqk9lxyst1eb++ = *fpdlcqk9m0ibglfx2++;
-                 *fpdlcqk9zyodca3j++    = *fpdlcqk9wpuarq2m2++;
-              }
-              fpdlcqk9m0ibglfx1 += *wy1vqfzu;
-              fpdlcqk9wpuarq2m1   += *npjlv3mr;
-          }
+      fpdlcqk9lxyst1eb = wkumc9idlxyst1eb;
+      fpdlcqk9zyodca3j    = wkumc9idzyodca3j;
+      fpdlcqk9m0ibglfx1 =  m0ibglfx + qes4mujl-1;
+      fpdlcqk9wpuarq2m1   =    wpuarq2m + qes4mujl-1;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1;
+        fpdlcqk9wpuarq2m2   = fpdlcqk9wpuarq2m1;
+        for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) {
+          *fpdlcqk9lxyst1eb++ = *fpdlcqk9m0ibglfx2++;
+          *fpdlcqk9zyodca3j++    = *fpdlcqk9wpuarq2m2++;
+        }
+        fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+        fpdlcqk9wpuarq2m1   += *npjlv3mr;
+      }
 
 
 
-          sedf7mxb = 0; // 20100416 a stop gap. Used for xwdf5ltg==2 only i think.
-          ehtjigf4 = xwdf5ltg * (hj3ftvzu-1);
+        sedf7mxb = 0; // 20100416 a stop gap. Used for xwdf5ltg==2 only i think.
+        ehtjigf4 = xwdf5ltg * (hj3ftvzu-1);
 
-          if (kcm6jfob == 1) {
-            for (kij0gwer = 1; kij0gwer <= lyma1kwc; kij0gwer++) {
-                fpdlcqk9kispwgx3 = kispwgx3 + (ehtjigf4 + hnpt1zym[kij0gwer-1]-1) * *ftnjamu2;
-                for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-                    *fpdlcqk9kispwgx3++ = 0.0e0;
-            }
-          } else {
-                     wbkq9zyi[       ehtjigf4 + hnpt1zym[0]-1] =
-                wkumc9idwbkq9zyi[       ehtjigf4 + hnpt1zym[0]-1];
-                     hdnw2fts[       ehtjigf4 + hnpt1zym[0]-1] =
-                wkumc9idhdnw2fts[       ehtjigf4 + hnpt1zym[0]-1];
-              if (xwdf5ltg == 2) {
-                     wbkq9zyi[       ehtjigf4 + hnpt1zym[1]-1] =
-                wkumc9idwbkq9zyi[       ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idr3eoxkzp;
-                     hdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1] =
-                wkumc9idhdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idwld4qctn;
-              }
+        if (kcm6jfob == 1) {
+          for (kij0gwer = 1; kij0gwer <= lyma1kwc; kij0gwer++) {
+            fpdlcqk9kispwgx3 = kispwgx3 + (ehtjigf4 + hnpt1zym[kij0gwer-1]-1) * *ftnjamu2;
+            for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+              *fpdlcqk9kispwgx3++ = 0.0e0;
           }
-
-          Yee_vbfa(psdvgce3, fjcasv7g,
-                          mbvnaor6, ghz9vuba + (qes4mujl-1) * *ftnjamu2,
-                   rbne6ouj + (qes4mujl-1) * *ftnjamu2,
-                          hdnw2fts + sedf7mxb + ehtjigf4 + hnpt1zym[0] - 1,
-                          lamvec +        ehtjigf4 + hnpt1zym[0] - 1,
-                          wbkq9zyi +        ehtjigf4 + hnpt1zym[0] - 1,
-                   ezlgm2up, lqsahu0r, which,
-                   kispwgx3 + (ehtjigf4 + *hnpt1zym - 1) * *ftnjamu2, wkumc9idlxyst1eb,
-                   zshtfg8c + (hj3ftvzu - 1) * xlpjcg3s, wkumc9idui8ysltq,
-                   vc6hatuj, fasrkub3, ges1xpkr,
-                   wkumc9idzyodca3j, hjm2ktyr,
-                   jnxpuym2, hnpt1zym, iz2nbfjc,
-                   ifys6woa + (ehtjigf4 + hnpt1zym[0] - 1) * *ftnjamu2,
-                   rpyis2kc + (hj3ftvzu-1) * (nbzjkpi3[xwdf5ltg] - 1), gkdx5jals,
-                   nbzjkpi3, acpios9q, jwbkl9fp);
-
-
-
-          y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
-          xumj5dnk = psdvgce3[13];
-          if (xumj5dnk != 0) {
-            Rprintf("vcao6: Error... exiting; error code = %d\n", xumj5dnk);
-            *zjkrtol8 = 8;
-            Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
-            Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
-            return;
+        } else {
+                 wbkq9zyi[       ehtjigf4 + hnpt1zym[0]-1] =
+            wkumc9idwbkq9zyi[       ehtjigf4 + hnpt1zym[0]-1];
+                 hdnw2fts[       ehtjigf4 + hnpt1zym[0]-1] =
+            wkumc9idhdnw2fts[       ehtjigf4 + hnpt1zym[0]-1];
+          if (xwdf5ltg == 2) {
+                 wbkq9zyi[       ehtjigf4 + hnpt1zym[1]-1] =
+            wkumc9idwbkq9zyi[       ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idr3eoxkzp;
+                 hdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1] =
+            wkumc9idhdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idwld4qctn;
           }
+        }
 
-          fpdlcqk9lxyst1eb = wkumc9idlxyst1eb;
-          fpdlcqk9m0ibglfx1 =       m0ibglfx + qes4mujl-1;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1;
-              for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) {
-                  *fpdlcqk9m0ibglfx2++ = *fpdlcqk9lxyst1eb++;
-              }
-              fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+        Yee_vbfa(psdvgce3, fjcasv7g,
+                        mbvnaor6, ghz9vuba + (qes4mujl-1) * *ftnjamu2,
+                 rbne6ouj + (qes4mujl-1) * *ftnjamu2,
+                        hdnw2fts + sedf7mxb + ehtjigf4 + hnpt1zym[0] - 1,
+                        lamvec +        ehtjigf4 + hnpt1zym[0] - 1,
+                        wbkq9zyi +        ehtjigf4 + hnpt1zym[0] - 1,
+                 ezlgm2up, lqsahu0r, which,
+                 kispwgx3 + (ehtjigf4 + *hnpt1zym - 1) * *ftnjamu2, wkumc9idlxyst1eb,
+                 zshtfg8c + (hj3ftvzu - 1) * xlpjcg3s, wkumc9idui8ysltq,
+                 vc6hatuj, fasrkub3, ges1xpkr,
+                 wkumc9idzyodca3j, hjm2ktyr,
+                 jnxpuym2, hnpt1zym, iz2nbfjc,
+
+
+
+
+                 ifys6woa + ehtjigf4 * len_1spp_ifys6woa,
+
+
+
+                 rpyis2kc + (hj3ftvzu-1) * (nbzjkpi3[xwdf5ltg] - 1), gkdx5jals,
+                 nbzjkpi3, lindex,  // 20130525; lindex added
+                 acpios9q, jwbkl9fp);
+
+
+
+
+        y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
+        xumj5dnk = psdvgce3[13];
+        if (xumj5dnk != 0) {
+          Rprintf("vcao6: Error... exiting; error code = %d\n", xumj5dnk);
+          *zjkrtol8 = 8;
+          Free(wkumc9idui8ysltq);    Free(wkumc9idlxyst1eb);   Free(wkumc9idzyodca3j);
+          Free(wkumc9idhdnw2fts);    Free(wkumc9idwbkq9zyi);
+          return;
+        }
+
+        fpdlcqk9lxyst1eb = wkumc9idlxyst1eb;
+        fpdlcqk9m0ibglfx1 =       m0ibglfx + qes4mujl-1;
+        for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+          fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1;
+          for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) {
+            *fpdlcqk9m0ibglfx2++ = *fpdlcqk9lxyst1eb++;
           }
+          fpdlcqk9m0ibglfx1 += *wy1vqfzu;
+        }
+
+        yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
+                      afpc0kns, &qfx3vhct, &hj3ftvzu);
 
-          yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu,
-                        afpc0kns, &qfx3vhct, &hj3ftvzu);
+        yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
+                     t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
+                     &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
+                     &dn3iasxug, &vsoihn1r, &pqneb2ra);
 
+      wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 +
+               fabs(tlq9wpes[hj3ftvzu]));
+
+      if (wiptsjx8 < bh2vgiay) {
+        *zjkrtol8 = 0;
+        xui7hqwl[7] = kcm6jfob;
+        if (qfx3vhct == 3 || qfx3vhct == 5) {
           yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
                        t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
                        &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
-                       &dn3iasxug, &vsoihn1r, &pqneb2ra);
-
-          wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 +
-                   fabs(tlq9wpes[hj3ftvzu]));
-
-          if (wiptsjx8 < bh2vgiay) {
-              *zjkrtol8 = 0;
-              xui7hqwl[7] = kcm6jfob;
-              if (qfx3vhct == 3 || qfx3vhct == 5) {
-                  yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck,
-                               t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns,
-                               &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu,
-                               &dn3iasxug, &vsoihn1r, &yu6izdrc);
-              }
-              Totdev += tlq9wpes[hj3ftvzu];
-              goto ceqzd1hi1011;
-          } else {
-              rpto5qwb = tlq9wpes[hj3ftvzu];
-          }
+                       &dn3iasxug, &vsoihn1r, &yu6izdrc);
+        }
+        Totdev += tlq9wpes[hj3ftvzu];
+        goto ceqzd1hi1011;
+      } else {
+        rpto5qwb = tlq9wpes[hj3ftvzu];
       }
+    }
 
-      if (f7svlajr == 1) {
-          f7svlajr = 0;
-          xui7hqwl[8] = 1;
-          goto ceqzd1hi653;
-      }
+    if (f7svlajr == 1) {
+      f7svlajr = 0;
+      xui7hqwl[8] = 1;
+      goto ceqzd1hi653;
+    }
 
-      *zjkrtol8 = 3;
-      Totdev += tlq9wpes[hj3ftvzu];
+    *zjkrtol8 = 3;
+    Totdev += tlq9wpes[hj3ftvzu];
 
   ceqzd1hi1011: hmayv1xt = 2.0e0;
   }
@@ -2470,7 +2515,8 @@ void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
                   int iz2nbfjc[],
                   double ifys6woa[],
                   double rpyis2kc[], double gkdx5jals[],
-                  int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]) {
+                  int nbzjkpi3[], int lindex[],
+                  int acpios9q[], int jwbkl9fp[]) {
 
 
 
@@ -2505,90 +2551,92 @@ void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
   fpdlcqk9lncwkfq7   = lncwkfq7;
   fpdlcqk9yxiwebc5  = wkumc9idyxiwebc5;
   for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          fxnhilr3 = 0.0e0;
-          fpdlcqk9k7hulceq   = k7hulceq + (hpmwnav2-1) * *eoviz2fb;
-          fpdlcqk9atujnxb8  = atujnxb8 + ayfnwr1v-1;
-          for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-              fxnhilr3      += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++;
-              fpdlcqk9atujnxb8 += *ftnjamu2;
-          }
-          *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3;
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      fxnhilr3 = 0.0e0;
+      fpdlcqk9k7hulceq   = k7hulceq + (hpmwnav2-1) * *eoviz2fb;
+      fpdlcqk9atujnxb8  = atujnxb8 + ayfnwr1v-1;
+      for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
+        fxnhilr3      += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++;
+        fpdlcqk9atujnxb8 += *ftnjamu2;
       }
+      *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3;
+    }
   }
 
   if (vtsou9pz == 1) {
-      vcao6(lncwkfq7, tlgduey8, ufgqj9ck,
-            m0ibglfx, t8hwvalr, ghz9vuba,
-            rbne6ouj, wpuarq2m,
-            vc6hatuj, fasrkub3, ges1xpkr,
-            ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-            zjkrtol8, xui7hqwl,
-            wkumc9iddev0, ajul8wkv,
-            y7sdgtqi, psdvgce3, qfozcl5b,
-            hdnw2fts, lamvec, wbkq9zyi,
-            ezlgm2up, lqsahu0r, which,
-            kispwgx3,
-            mbvnaor6,
-            hjm2ktyr,
-            jnxpuym2, hnpt1zym, iz2nbfjc,
-            ifys6woa, rpyis2kc, gkdx5jals,
-            nbzjkpi3, acpios9q, jwbkl9fp);
-
-      y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
+    vcao6(lncwkfq7, tlgduey8, ufgqj9ck,
+          m0ibglfx, t8hwvalr, ghz9vuba,
+          rbne6ouj, wpuarq2m,
+          vc6hatuj, fasrkub3, ges1xpkr,
+          ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+          zjkrtol8, xui7hqwl,
+          wkumc9iddev0, ajul8wkv,
+          y7sdgtqi, psdvgce3, qfozcl5b,
+          hdnw2fts, lamvec, wbkq9zyi,
+          ezlgm2up, lqsahu0r, which,
+          kispwgx3,
+          mbvnaor6,
+          hjm2ktyr,
+          jnxpuym2, hnpt1zym, iz2nbfjc,
+          ifys6woa, rpyis2kc, gkdx5jals,
+          nbzjkpi3, lindex,
+          acpios9q, jwbkl9fp);
+
+    y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
   }
 
   fpdlcqk9atujnxb8 = atujnxb8;
   for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-          *fpdlcqk9atujnxb8 *= ydcnh9xl;
-           fpdlcqk9atujnxb8++;
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+      *fpdlcqk9atujnxb8 *= ydcnh9xl;
+       fpdlcqk9atujnxb8++;
+    }
   }
 
   for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) {
-      fpdlcqk9atujnxb8  =  atujnxb8;  //  + (xvr7bonh-1) * *ftnjamu2;
-      for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-              *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ +  *fpdlcqk9atujnxb8++;
-          }
-          xui7hqwl[4] = 0;
-
-
-          if (vtsou9pz == 1) {
-              vcao6(lncwkfq7, tlgduey8, ufgqj9ck,
-                    m0ibglfx, t8hwvalr, ghz9vuba,
-                    rbne6ouj, wpuarq2m,
-                    vc6hatuj, fasrkub3, ges1xpkr,
-                    ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
-                    zjkrtol8, xui7hqwl,
-                    tlq9wpes, zshtfg8c,
-                    y7sdgtqi, psdvgce3, qfozcl5b,
-                    hdnw2fts, lamvec, wbkq9zyi,
-                    ezlgm2up, lqsahu0r, which,
-                    kispwgx3,
-                    mbvnaor6,
-                    hjm2ktyr,
-                    jnxpuym2, hnpt1zym, iz2nbfjc,
-                    ifys6woa, rpyis2kc, gkdx5jals,
-                    nbzjkpi3, acpios9q, jwbkl9fp);
-
-              y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
-          }
-
-          if (*zjkrtol8 != 0) {
-              Rprintf("Warning: failured to converge in vdcao6. \n");
-              Rprintf("Continuing.\n");
-          }
-          *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / ydcnh9xl;
-      }
-
-      if (xwdf5ltg > 1) {
-          fpdlcqk9lncwkfq7  =       lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
-          fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2;
-          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
-              *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++;
-      }
+    fpdlcqk9atujnxb8  =  atujnxb8;  //  + (xvr7bonh-1) * *ftnjamu2;
+    for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+        *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ +  *fpdlcqk9atujnxb8++;
+      }
+      xui7hqwl[4] = 0;
+
+
+      if (vtsou9pz == 1) {
+        vcao6(lncwkfq7, tlgduey8, ufgqj9ck,
+              m0ibglfx, t8hwvalr, ghz9vuba,
+              rbne6ouj, wpuarq2m,
+              vc6hatuj, fasrkub3, ges1xpkr,
+              ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr,
+              zjkrtol8, xui7hqwl,
+              tlq9wpes, zshtfg8c,
+              y7sdgtqi, psdvgce3, qfozcl5b,
+              hdnw2fts, lamvec, wbkq9zyi,
+              ezlgm2up, lqsahu0r, which,
+              kispwgx3,
+              mbvnaor6,
+              hjm2ktyr,
+              jnxpuym2, hnpt1zym, iz2nbfjc,
+              ifys6woa, rpyis2kc, gkdx5jals,
+              nbzjkpi3, lindex,
+              acpios9q, jwbkl9fp);
+
+        y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v;
+      }
+
+      if (*zjkrtol8 != 0) {
+        Rprintf("Warning: failured to converge in vdcao6. \n");
+        Rprintf("Continuing.\n");
+      }
+      *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / ydcnh9xl;
+    }
+
+    if (xwdf5ltg > 1) {
+      fpdlcqk9lncwkfq7  =       lncwkfq7 + (hpmwnav2-1) * *ftnjamu2;
+      fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2;
+      for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++)
+        *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++;
+    }
   }
 
   Free(wkumc9idyxiwebc5);    Free(wkumc9iddev0 );
@@ -2650,49 +2698,49 @@ void yiumjq3npnm1or(double *objzgdk0, double *lfu2qhid) {
   Q34 = 1.0e0;
 
   if (*objzgdk0 < -ULIMIT) {
-      *lfu2qhid = 2.753624e-89;
-      return;
+    *lfu2qhid = 2.753624e-89;
+    return;
   }
   if (*objzgdk0 >  ULIMIT) {
-      *lfu2qhid = 1.0e0;
-      return;
+    *lfu2qhid = 1.0e0;
+    return;
   }
 
   y = *objzgdk0 / SQRT2;
   if (y < 0.0e0) {
-      y = -y;
-      sn = -1;
+    y = -y;
+    sn = -1;
   } else {
-      sn = 1;
+    sn = 1;
   }
   y2 = y * y;
   y4 = y2 * y2;
   y6 = y4 * y2;
   if (y < 0.46875e0) {
-      R1 = P10 + P11 * y2 + P12 * y4 + P13 * y6;
-      R2 = Q10 + Q11 * y2 + Q12 * y4 + Q13 * y6;
-      erf = y * R1 / R2;
-      *lfu2qhid = (sn == 1) ? 0.5e0 + 0.5 * erf : 0.5e0 - 0.5 * erf;
+    R1 = P10 + P11 * y2 + P12 * y4 + P13 * y6;
+    R2 = Q10 + Q11 * y2 + Q12 * y4 + Q13 * y6;
+    erf = y * R1 / R2;
+    *lfu2qhid = (sn == 1) ? 0.5e0 + 0.5 * erf : 0.5e0 - 0.5 * erf;
   } else
   if (y < 4.0e0) {
-      y3 = y2 * y;
-      y5 = y4 * y;
-      y7 = y6 * y;
-      R1 = P20 + P21 * y + P22 * y2 + P23 * y3 +
-          P24 * y4 + P25 * y5 + P26 * y6 + P27 * y7;
-      R2 = Q20 + Q21 * y + Q22 * y2 + Q23 * y3 +
-          Q24 * y4 + Q25 * y5 + Q26 * y6 + Q27 * y7;
-      erfc = exp(-y2) * R1 / R2;
-      *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc;
+    y3 = y2 * y;
+    y5 = y4 * y;
+    y7 = y6 * y;
+    R1 = P20 + P21 * y + P22 * y2 + P23 * y3 +
+         P24 * y4 + P25 * y5 + P26 * y6 + P27 * y7;
+    R2 = Q20 + Q21 * y + Q22 * y2 + Q23 * y3 +
+         Q24 * y4 + Q25 * y5 + Q26 * y6 + Q27 * y7;
+    erfc = exp(-y2) * R1 / R2;
+    *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc;
   } else {
-      z = y4;
-      z2 = z * z;
-      z3 = z2 * z;
-      z4 = z2 * z2;
-      R1 = P30 + P31 * z + P32 * z2 + P33 * z3 + P34 * z4;
-      R2 = Q30 + Q31 * z + Q32 * z2 + Q33 * z3 + Q34 * z4;
-      erfc = (exp(-y2)/y) * (1.0 / SQRTPI + R1 / (R2 * y2));
-      *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc;
+    z = y4;
+    z2 = z * z;
+    z3 = z2 * z;
+    z4 = z2 * z2;
+    R1 = P30 + P31 * z + P32 * z2 + P33 * z3 + P34 * z4;
+    R2 = Q30 + Q31 * z + Q32 * z2 + Q33 * z3 + Q34 * z4;
+    erfc = (exp(-y2)/y) * (1.0 / SQRTPI + R1 / (R2 * y2));
+    *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc;
   }
 }
 
@@ -2703,7 +2751,7 @@ void yiumjq3npnm1ow(double objzgdk0[], double lfu2qhid[], int *f8yswcat) {
   int    ayfnwr1v;
 
   for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-      yiumjq3npnm1or(objzgdk0++, lfu2qhid++);
+    yiumjq3npnm1or(objzgdk0++, lfu2qhid++);
   }
 }
 
diff --git a/src/cqof.f b/src/cqof.f
new file mode 100644
index 0000000..0c32920
--- /dev/null
+++ b/src/cqof.f
@@ -0,0 +1,2306 @@
+C Output from Public domain Ratfor, version 1.01
+      subroutine pnm1or(objzgdk0, lfu2qhid)
+      implicit logical (a-z)
+      double precision objzgdk0, lfu2qhid
+      integer sn
+      double precision r1, r2, y, y2, y3, y4, y5, y6, y7
+      double precision erf, erfc, z, z2, z3, z4
+      double precision sqrt2, sqrtpi, ulimit, p10,p11,p12,p13, q10,q11,q
+     *12,q13
+      double precision p20,p21,p22,p23,p24,p25,p26,p27
+      double precision q20,q21,q22,q23,q24,q25,q26,q27
+      double precision p30,p31,p32,p33,p34
+      double precision q30,q31,q32,q33,q34
+      sqrt2 = 1.414213562373095049d0
+      sqrtpi = 1.772453850905516027d0
+      ulimit = 20.0d0
+      p10 = 242.66795523053175d0
+      p11 = 21.979261618294152d0
+      p12 = 6.9963834886191355d0
+      p13 = -.035609843701815385d0
+      q10 = 215.05887586986120d0
+      q11 = 91.164905404514901d0
+      q12 = 15.082797630407787d0
+      q13 = 1.0d0
+      p20 = 300.4592610201616005d0
+      p21 = 451.9189537118729422d0
+      p22 = 339.3208167343436870d0
+      p23 = 152.9892850469404039d0
+      p24 = 43.16222722205673530d0
+      p25 = 7.211758250883093659d0
+      p26 = .5641955174789739711d0
+      p27 = -.0000001368648573827167067d0
+      q20 = 300.4592609569832933d0
+      q21 = 790.9509253278980272d0
+      q22 = 931.3540948506096211d0
+      q23 = 638.9802644656311665d0
+      q24 = 277.5854447439876434d0
+      q25 = 77.00015293522947295d0
+      q26 = 12.78272731962942351d0
+      q27 = 1.0d0
+      p30 = -.00299610707703542174d0
+      p31 = -.0494730910623250734d0
+      p32 = -.226956593539686930d0
+      p33 = -.278661308609647788d0
+      p34 = -.0223192459734184686d0
+      q30 = .0106209230528467918d0
+      q31 = .191308926107829841d0
+      q32 = 1.05167510706793207d0
+      q33 = 1.98733201817135256d0
+      q34 = 1.0d0
+      if(objzgdk0 .lt. -ulimit)then
+      lfu2qhid = 2.753624d-89
+      return
+      endif
+      if(objzgdk0 .gt. ulimit)then
+      lfu2qhid = 1.0d0
+      return
+      endif
+      y = objzgdk0 / sqrt2
+      if(y .lt. 0.0d0)then
+      y = -y
+      sn = -1
+      else
+      sn = 1
+      endif
+      y2 = y * y
+      y4 = y2 * y2
+      y6 = y4 * y2
+      if(y .lt. 0.46875d0)then
+      r1 = p10 + p11 * y2 + p12 * y4 + p13 * y6
+      r2 = q10 + q11 * y2 + q12 * y4 + q13 * y6
+      erf = y * r1 / r2
+      if(sn .eq. 1)then
+      lfu2qhid = 0.5d0 + 0.5*erf
+      else
+      lfu2qhid = 0.5d0 - 0.5*erf
+      endif
+      else
+      if(y .lt. 4.0d0)then
+      y3 = y2 * y
+      y5 = y4 * y
+      y7 = y6 * y
+      r1 = p20 + p21 * y + p22 * y2 + p23 * y3 + p24 * y4 + p25 * y5 + p
+     *26 * y6 + p27 * y7
+      r2 = q20 + q21 * y + q22 * y2 + q23 * y3 + q24 * y4 + q25 * y5 + q
+     *26 * y6 + q27 * y7
+      erfc = dexp(-y2) * r1 / r2
+      if(sn .eq. 1)then
+      lfu2qhid = 1.0 - 0.5*erfc
+      else
+      lfu2qhid = 0.5*erfc
+      endif
+      else
+      z = y4
+      z2 = z * z
+      z3 = z2 * z
+      z4 = z2 * z2
+      r1 = p30 + p31 * z + p32 * z2 + p33 * z3 + p34 * z4
+      r2 = q30 + q31 * z + q32 * z2 + q33 * z3 + q34 * z4
+      erfc = (dexp(-y2)/y) * (1.0 / sqrtpi + r1 / (r2 * y2))
+      if(sn .eq. 1)then
+      lfu2qhid = 1.0d0 - 0.5*erfc
+      else
+      lfu2qhid = 0.5*erfc
+      endif
+      endif
+      endif
+      return
+      end
+      subroutine pnm1ow(objzgdk0, lfu2qhid, kuzxj1lo)
+      implicit logical (a-z)
+      integer kuzxj1lo, ayfnwr1v
+      double precision objzgdk0(kuzxj1lo), lfu2qhid(kuzxj1lo)
+      do23016 ayfnwr1v=1,kuzxj1lo 
+      call pnm1or(objzgdk0(ayfnwr1v), lfu2qhid(ayfnwr1v))
+23016 continue
+23017 continue
+      return
+      end
+      subroutine n2howibc2a(objzgdk0, i9mwnvqt, lfu2qhid)
+      implicit logical (a-z)
+      double precision objzgdk0, i9mwnvqt, lfu2qhid
+      double precision xd4mybgja
+      if(1.0d0 - objzgdk0 .ge. 1.0d0)then
+      lfu2qhid = -8.12589d0 / (3.0*dsqrt(i9mwnvqt))
+      else
+      if(1.0d0 - objzgdk0 .le. 0.0d0)then
+      lfu2qhid = 8.12589d0 / (3.0*dsqrt(i9mwnvqt))
+      else
+      call pnm1or(1.0d0-objzgdk0, xd4mybgja)
+      xd4mybgja = xd4mybgja / (3.0*dsqrt(i9mwnvqt))
+      lfu2qhid = -3.0d0 * dlog(1.0d0 + xd4mybgja)
+      endif
+      endif
+      return
+      end
+      subroutine zi8qrpsb(objzgdk0, lfu2qhid)
+      implicit logical (a-z)
+      double precision objzgdk0, lfu2qhid
+      if(1.0d0 - objzgdk0 .ge. 1.0d0)then
+      lfu2qhid = -35.0d0
+      else
+      if(1.0d0 - objzgdk0 .le. 0.0d0)then
+      lfu2qhid = 3.542106d0
+      else
+      lfu2qhid = dlog(-dlog(1.0d0 - objzgdk0))
+      endif
+      endif
+      return
+      end
+      subroutine g2vwexyk9(objzgdk0, lfu2qhid)
+      implicit logical (a-z)
+      double precision objzgdk0, lfu2qhid
+      if(1.0d0 - objzgdk0 .ge. 1.0d0)then
+      lfu2qhid = -34.53958d0
+      else
+      if(1.0d0 - objzgdk0 .le. 0.0d0)then
+      lfu2qhid = 34.53958d0
+      else
+      lfu2qhid = dlog(objzgdk0 / (1.0d0 - objzgdk0))
+      endif
+      endif
+      return
+      end
+      subroutine pkc4ejib(w8znmyce, beta, m0ibglfx, kuzxj1lo, wy1vqfzu, 
+     *br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu, qfx3vhct, unhycz0e, vm4xjo
+     *sb)
+      implicit logical (a-z)
+      integer kuzxj1lo, wy1vqfzu, br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu
+     *, qfx3vhct, unhycz0e
+      double precision w8znmyce(br5ovgcj,xlpjcg3s), beta(xlpjcg3s), m0ib
+     *glfx(wy1vqfzu,kuzxj1lo), vm4xjosb(kuzxj1lo)
+      integer ayfnwr1v, yq6lorbx, gp1jxzuh, i1loc, sedf7mxb
+      double precision vogkfwt8
+      if(vtsou9pz .eq. 1)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      sedf7mxb = 2*hj3ftvzu-1
+      do23034 ayfnwr1v=1,kuzxj1lo 
+      vogkfwt8 = 0.0d0
+      do23036 gp1jxzuh=1,xlpjcg3s 
+      vogkfwt8 = vogkfwt8 + w8znmyce(2*ayfnwr1v-1,gp1jxzuh) * beta(gp1jx
+     *zuh)
+23036 continue
+23037 continue
+      m0ibglfx(sedf7mxb,ayfnwr1v) = vogkfwt8
+23034 continue
+23035 continue
+      sedf7mxb = 2*hj3ftvzu
+      do23038 ayfnwr1v=1,kuzxj1lo 
+      vogkfwt8 = 0.0d0
+      do23040 gp1jxzuh=1,xlpjcg3s 
+      vogkfwt8 = vogkfwt8 + w8znmyce(2*ayfnwr1v ,gp1jxzuh) * beta(gp1jxz
+     *uh)
+23040 continue
+23041 continue
+      m0ibglfx(sedf7mxb,ayfnwr1v) = vogkfwt8
+23038 continue
+23039 continue
+      else
+      do23042 ayfnwr1v=1,br5ovgcj 
+      vogkfwt8 = 0.0d0
+      do23044 gp1jxzuh=1,xlpjcg3s 
+      vogkfwt8 = vogkfwt8 + w8znmyce(ayfnwr1v,gp1jxzuh) * beta(gp1jxzuh)
+23044 continue
+23045 continue
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = vogkfwt8
+23042 continue
+23043 continue
+      endif
+      else
+      i1loc = 1
+      do23046 ayfnwr1v=1,kuzxj1lo 
+      do23048 yq6lorbx=1,wy1vqfzu 
+      vogkfwt8 = 0.0d0
+      do23050 gp1jxzuh=1,xlpjcg3s 
+      vogkfwt8 = vogkfwt8 + w8znmyce(i1loc,gp1jxzuh) * beta(gp1jxzuh)
+23050 continue
+23051 continue
+      i1loc = i1loc + 1
+      m0ibglfx(yq6lorbx,ayfnwr1v) = vogkfwt8
+23048 continue
+23049 continue
+23046 continue
+23047 continue
+      endif
+      if(unhycz0e .eq. 1)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23056 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) 
+     *+ vm4xjosb(ayfnwr1v)
+23056 continue
+23057 continue
+      else
+      do23058 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v) + vm4xjo
+     *sb(ayfnwr1v)
+23058 continue
+23059 continue
+      endif
+      endif
+      return
+      end
+      subroutine nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0k
+     *ns, qfx3vhct, hj3ftvzu)
+      implicit logical (a-z)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, qfx3vhct, hj3ftvzu
+      double precision m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns,ku
+     *zxj1lo)
+      integer ayfnwr1v, yq6lorbx
+      double precision o3jyipdf0
+      if(hj3ftvzu .eq. 0)then
+      if(qfx3vhct .eq. 1)then
+      do23064 ayfnwr1v=1,kuzxj1lo 
+      do23066 yq6lorbx=1,wy1vqfzu 
+      o3jyipdf0 = dexp(m0ibglfx(yq6lorbx,ayfnwr1v))
+      t8hwvalr(yq6lorbx,ayfnwr1v) = o3jyipdf0 / (1.0d0 + o3jyipdf0)
+23066 continue
+23067 continue
+23064 continue
+23065 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23070 ayfnwr1v=1,kuzxj1lo 
+      do23072 yq6lorbx=1,wy1vqfzu 
+      t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(yq6lorbx,ayfnwr1v))
+23072 continue
+23073 continue
+23070 continue
+23071 continue
+      endif
+      if(qfx3vhct .eq. 4)then
+      do23076 ayfnwr1v=1,kuzxj1lo 
+      do23078 yq6lorbx=1,wy1vqfzu 
+      t8hwvalr(yq6lorbx,ayfnwr1v) = 1.0d0-dexp(-dexp(m0ibglfx(yq6lorbx,a
+     *yfnwr1v)))
+23078 continue
+23079 continue
+23076 continue
+23077 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      do23082 ayfnwr1v=1,kuzxj1lo 
+      do23084 yq6lorbx=1,afpc0kns 
+      t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(2*yq6lorbx-1,ayfnwr1v)
+     *)
+23084 continue
+23085 continue
+23082 continue
+23083 continue
+      endif
+      if(qfx3vhct .eq. 3)then
+      do23088 ayfnwr1v=1,kuzxj1lo 
+      do23090 yq6lorbx=1,afpc0kns 
+      t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(2*yq6lorbx-1,ayfnwr1v)
+     *)
+23090 continue
+23091 continue
+23088 continue
+23089 continue
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23094 ayfnwr1v=1,kuzxj1lo 
+      do23096 yq6lorbx=1,wy1vqfzu 
+      t8hwvalr(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v)
+23096 continue
+23097 continue
+23094 continue
+23095 continue
+      endif
+      else
+      if(qfx3vhct .eq. 1)then
+      do23100 ayfnwr1v=1,kuzxj1lo 
+      o3jyipdf0 = dexp(m0ibglfx(hj3ftvzu,ayfnwr1v))
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = o3jyipdf0 / (1.0d0 + o3jyipdf0)
+23100 continue
+23101 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23104 ayfnwr1v=1,kuzxj1lo 
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(hj3ftvzu,ayfnwr1v))
+23104 continue
+23105 continue
+      endif
+      if(qfx3vhct .eq. 4)then
+      do23108 ayfnwr1v=1,kuzxj1lo 
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = 1.0d0 - dexp(-dexp(m0ibglfx(hj3ftvzu
+     *,ayfnwr1v)))
+23108 continue
+23109 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      do23112 ayfnwr1v=1,kuzxj1lo 
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(2*hj3ftvzu-1,ayfnwr1v)
+     *)
+23112 continue
+23113 continue
+      endif
+      if(qfx3vhct .eq. 3)then
+      do23116 ayfnwr1v=1,kuzxj1lo 
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(2*hj3ftvzu-1,ayfnwr1v)
+     *)
+23116 continue
+23117 continue
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23120 ayfnwr1v=1,kuzxj1lo 
+      t8hwvalr(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v)
+23120 continue
+23121 continue
+      endif
+      endif
+      return
+      end
+      subroutine shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, 
+     *wy1vqfzu, afpc0kns, dimw, m0ibglfx, dev, hj3ftvzu, n3iasxug, vsoih
+     *n1r, cll)
+      implicit logical (a-z)
+      integer qfx3vhct, kuzxj1lo, wy1vqfzu, afpc0kns, dimw, hj3ftvzu, cl
+     *l
+      double precision tlgduey8(kuzxj1lo, afpc0kns), wmat(kuzxj1lo, dimw
+     *), t8hwvalr(afpc0kns, kuzxj1lo), m0ibglfx(wy1vqfzu,kuzxj1lo), dev,
+     * n3iasxug, vsoihn1r
+      integer ayfnwr1v, yq6lorbx
+      double precision bzmd6ftv, txlvcey5, xd4mybgj, uqnkc6zg, hofjnx2e,
+     * smu, afwp5imx, ivqk2ywz, qvd7yktm
+      double precision hdqsx7bk, anopu9vi, jtnbu2hz
+      logical lbgwvp3q
+      bzmd6ftv = 0.0d0
+      if(hj3ftvzu .eq. 0)then
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then
+      do23126 yq6lorbx=1,wy1vqfzu 
+      do23128 ayfnwr1v=1,kuzxj1lo 
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then
+      ivqk2ywz = tlgduey8(ayfnwr1v,yq6lorbx) * dlog(tlgduey8(ayfnwr1v,yq
+     *6lorbx))
+      else
+      ivqk2ywz = 0.0d0
+      endif
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .lt. 1.0d0)then
+      ivqk2ywz = ivqk2ywz + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog
+     *(1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx))
+      endif
+      xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) * (1.0d0 - t8hwvalr(yq6lorb
+     *x,ayfnwr1v))
+      if(xd4mybgj .lt. n3iasxug)then
+      smu = t8hwvalr(yq6lorbx,ayfnwr1v)
+      if(smu .lt. n3iasxug)then
+      qvd7yktm = tlgduey8(ayfnwr1v,yq6lorbx) * vsoihn1r
+      else
+      qvd7yktm = tlgduey8(ayfnwr1v,yq6lorbx) * dlog(smu)
+      endif
+      afwp5imx = 1.0d0 - smu
+      if(afwp5imx .lt. n3iasxug)then
+      qvd7yktm = qvd7yktm + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * vsoi
+     *hn1r
+      else
+      qvd7yktm = qvd7yktm + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog
+     *(afwp5imx)
+      endif
+      else
+      qvd7yktm = (tlgduey8(ayfnwr1v,yq6lorbx) * dlog(t8hwvalr(yq6lorbx,a
+     *yfnwr1v)) + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog(1.0d0 - t
+     *8hwvalr(yq6lorbx,ayfnwr1v)))
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (ivqk2ywz - qvd7yktm)
+23128 continue
+23129 continue
+23126 continue
+23127 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23142 yq6lorbx=1,wy1vqfzu 
+      do23144 ayfnwr1v=1,kuzxj1lo 
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then
+      xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) - tlgduey8(ayfnwr1v,yq6lorb
+     *x) + tlgduey8(ayfnwr1v,yq6lorbx) * dlog(tlgduey8(ayfnwr1v,yq6lorbx
+     *) / t8hwvalr(yq6lorbx,ayfnwr1v))
+      else
+      xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) - tlgduey8(ayfnwr1v,yq6lorb
+     *x)
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23144 continue
+23145 continue
+23142 continue
+23143 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      do23150 yq6lorbx=1,afpc0kns 
+      do23152 ayfnwr1v=1,kuzxj1lo 
+      jtnbu2hz = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v))
+      call tldz5ion(jtnbu2hz, uqnkc6zg)
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then
+      xd4mybgj = (jtnbu2hz - 1.0d0) * dlog(tlgduey8(ayfnwr1v,yq6lorbx)) 
+     *+ (dlog(jtnbu2hz)-tlgduey8(ayfnwr1v,yq6lorbx) / t8hwvalr(yq6lorbx,
+     *ayfnwr1v) - dlog(t8hwvalr(yq6lorbx,ayfnwr1v)) ) * jtnbu2hz - uqnkc
+     *6zg
+      else
+      xd4mybgj = -1000.0d0
+      endif
+      xd4mybgj = -xd4mybgj
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23152 continue
+23153 continue
+23150 continue
+23151 continue
+      endif
+      if(qfx3vhct .eq. 3)then
+      if(cll .eq. 0)then
+      anopu9vi = 34.0d0
+      do23160 yq6lorbx=1,afpc0kns 
+      do23162 ayfnwr1v=1,kuzxj1lo 
+      if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .gt. anopu9vi)then
+      hdqsx7bk = dexp(anopu9vi)
+      lbgwvp3q = .true.
+      else
+      if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .lt. -anopu9vi)then
+      hdqsx7bk = dexp(-anopu9vi)
+      lbgwvp3q = .true.
+      else
+      hdqsx7bk = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v))
+      lbgwvp3q = .false.
+      endif
+      endif
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .lt. 1.0d0)then
+      xd4mybgj = 1.0d0
+      else
+      xd4mybgj = tlgduey8(ayfnwr1v,yq6lorbx)
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (tlgduey8(ayfnwr1v,yq6lor
+     *bx) * dlog(xd4mybgj/t8hwvalr(yq6lorbx,ayfnwr1v)) + (tlgduey8(ayfnw
+     *r1v,yq6lorbx) + hdqsx7bk) * dlog((t8hwvalr(yq6lorbx,ayfnwr1v)+hdqs
+     *x7bk) / (hdqsx7bk+ tlgduey8(ayfnwr1v,yq6lorbx))))
+23162 continue
+23163 continue
+23160 continue
+23161 continue
+      else
+      anopu9vi = 34.0d0
+      do23170 yq6lorbx=1,afpc0kns 
+      do23172 ayfnwr1v=1,kuzxj1lo 
+      if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .gt. anopu9vi)then
+      hdqsx7bk = dexp(anopu9vi)
+      lbgwvp3q = .true.
+      else
+      if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .lt. -anopu9vi)then
+      hdqsx7bk = dexp(-anopu9vi)
+      lbgwvp3q = .true.
+      else
+      hdqsx7bk = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v))
+      lbgwvp3q = .false.
+      endif
+      endif
+      if( lbgwvp3q )then
+      uqnkc6zg = 0.0d0
+      hofjnx2e = 0.0d0
+      else
+      call tldz5ion(hdqsx7bk + tlgduey8(ayfnwr1v,yq6lorbx), uqnkc6zg)
+      call tldz5ion(hdqsx7bk, hofjnx2e)
+      endif
+      call tldz5ion(1.0d0 + tlgduey8(ayfnwr1v,yq6lorbx), txlvcey5)
+      xd4mybgj = hdqsx7bk * dlog(hdqsx7bk / (hdqsx7bk + t8hwvalr(yq6lorb
+     *x,ayfnwr1v))) + uqnkc6zg - hofjnx2e - txlvcey5
+      if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then
+      xd4mybgj = xd4mybgj + tlgduey8(ayfnwr1v,yq6lorbx) * dlog(t8hwvalr(
+     *yq6lorbx,ayfnwr1v) / (hdqsx7bk + t8hwvalr(yq6lorbx,ayfnwr1v)))
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23172 continue
+23173 continue
+23170 continue
+23171 continue
+      bzmd6ftv = -bzmd6ftv / 2.0d0
+      endif
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23184 yq6lorbx=1,wy1vqfzu 
+      do23186 ayfnwr1v=1,kuzxj1lo 
+      xd4mybgj = tlgduey8(ayfnwr1v,yq6lorbx) - t8hwvalr(yq6lorbx,ayfnwr1
+     *v)
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj**2
+23186 continue
+23187 continue
+23184 continue
+23185 continue
+      endif
+      else
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then
+      do23190 ayfnwr1v=1,kuzxj1lo 
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then
+      ivqk2ywz = tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(tlgduey8(ayfnwr1v,hj
+     *3ftvzu))
+      else
+      ivqk2ywz = 0.0d0
+      endif
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. 1.0d0)then
+      ivqk2ywz = ivqk2ywz + (1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu)) * dlog
+     *(1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu))
+      endif
+      xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) * (1.0d0 - t8hwvalr(hj3ftvz
+     *u,ayfnwr1v))
+      if(xd4mybgj .lt. n3iasxug)then
+      smu = t8hwvalr(hj3ftvzu,ayfnwr1v)
+      if(smu .lt. n3iasxug)then
+      qvd7yktm = tlgduey8(ayfnwr1v,hj3ftvzu) * vsoihn1r
+      else
+      qvd7yktm = tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(smu)
+      endif
+      afwp5imx = 1.0d0 - smu
+      if(afwp5imx .lt. n3iasxug)then
+      qvd7yktm = qvd7yktm + (1.0d0-tlgduey8(ayfnwr1v,hj3ftvzu))*vsoihn1r
+      else
+      qvd7yktm = qvd7yktm + (1.0d0-tlgduey8(ayfnwr1v,hj3ftvzu))*dlog(afw
+     *p5imx)
+      endif
+      else
+      qvd7yktm = (tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(t8hwvalr(hj3ftvzu,a
+     *yfnwr1v)) + (1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu)) * dlog(1.0d0 - t
+     *8hwvalr(hj3ftvzu,ayfnwr1v)))
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (ivqk2ywz - qvd7yktm)
+23190 continue
+23191 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23204 ayfnwr1v=1,kuzxj1lo 
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then
+      xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) - tlgduey8(ayfnwr1v,hj3ftvz
+     *u) + tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(tlgduey8(ayfnwr1v,hj3ftvzu
+     *) / t8hwvalr(hj3ftvzu,ayfnwr1v))
+      else
+      xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) - tlgduey8(ayfnwr1v,hj3ftvz
+     *u)
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23204 continue
+23205 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      do23210 ayfnwr1v=1,kuzxj1lo 
+      jtnbu2hz = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v))
+      call tldz5ion(jtnbu2hz, uqnkc6zg)
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then
+      xd4mybgj = (jtnbu2hz - 1.0d0) * dlog(tlgduey8(ayfnwr1v,hj3ftvzu)) 
+     *+ jtnbu2hz * (dlog(jtnbu2hz) - tlgduey8(ayfnwr1v,hj3ftvzu) / t8hwv
+     *alr(hj3ftvzu,ayfnwr1v) - dlog(t8hwvalr(hj3ftvzu,ayfnwr1v))) - uqnk
+     *c6zg
+      else
+      xd4mybgj = -1000.0d0
+      endif
+      xd4mybgj = -xd4mybgj
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23210 continue
+23211 continue
+      endif
+      if(qfx3vhct .eq. 3)then
+      if(cll .eq. 0)then
+      anopu9vi = 34.0d0
+      do23218 ayfnwr1v=1,kuzxj1lo 
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then
+      hdqsx7bk = dexp(anopu9vi)
+      lbgwvp3q = .true.
+      else
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then
+      hdqsx7bk = dexp(-anopu9vi)
+      lbgwvp3q = .true.
+      else
+      hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v))
+      lbgwvp3q = .false.
+      endif
+      endif
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. 1.0d0)then
+      xd4mybgj = 1.0d0
+      else
+      xd4mybgj = tlgduey8(ayfnwr1v,hj3ftvzu)
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (tlgduey8(ayfnwr1v,hj3ftv
+     *zu) * dlog(xd4mybgj/t8hwvalr(hj3ftvzu,ayfnwr1v)) + (tlgduey8(ayfnw
+     *r1v,hj3ftvzu)+hdqsx7bk) * dlog((t8hwvalr(hj3ftvzu,ayfnwr1v) + hdqs
+     *x7bk) / ( hdqsx7bk+tlgduey8(ayfnwr1v,hj3ftvzu))))
+23218 continue
+23219 continue
+      else
+      do23226 ayfnwr1v=1,kuzxj1lo 
+      hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v))
+      call tldz5ion(hdqsx7bk + tlgduey8(ayfnwr1v,hj3ftvzu), uqnkc6zg)
+      call tldz5ion(hdqsx7bk, hofjnx2e)
+      call tldz5ion(1.0d0 + tlgduey8(ayfnwr1v,hj3ftvzu), txlvcey5)
+      xd4mybgj = hdqsx7bk * dlog(hdqsx7bk / (hdqsx7bk + t8hwvalr(hj3ftvz
+     *u,ayfnwr1v))) + uqnkc6zg - hofjnx2e - txlvcey5
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then
+      xd4mybgj = xd4mybgj + tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(t8hwvalr(
+     *hj3ftvzu,ayfnwr1v) / (hdqsx7bk + t8hwvalr(hj3ftvzu,ayfnwr1v)))
+      endif
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj
+23226 continue
+23227 continue
+      bzmd6ftv = -bzmd6ftv / 2.0d0
+      endif
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23232 ayfnwr1v=1,kuzxj1lo 
+      xd4mybgj = tlgduey8(ayfnwr1v,hj3ftvzu) - t8hwvalr(hj3ftvzu,ayfnwr1
+     *v)
+      bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj**2
+23232 continue
+23233 continue
+      endif
+      endif
+      dev = 2.0d0 * bzmd6ftv
+      return
+      end
+      subroutine flncwkfq76(lncwkfq7, w8znmyce, kuzxj1lo, br5ovgcj, xwdf
+     *5ltg, qfx3vhct)
+      implicit logical (a-z)
+      integer kuzxj1lo, br5ovgcj, xwdf5ltg, qfx3vhct
+      double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,*)
+      integer ayfnwr1v, sedf7mxb, hpmwnav2
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq.5 ))then
+      sedf7mxb = 1
+      do23236 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 1.0d0
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0
+23236 continue
+23237 continue
+      sedf7mxb = sedf7mxb + 1
+      do23238 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 0.0d0
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 1.0d0
+23238 continue
+23239 continue
+      sedf7mxb = sedf7mxb + 1
+      do23240 hpmwnav2=1,xwdf5ltg 
+      do23242 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = lncwkfq7(ayfnwr1v,hpmwnav2)
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0
+23242 continue
+23243 continue
+      sedf7mxb = sedf7mxb + 1
+23240 continue
+23241 continue
+      else
+      sedf7mxb = 1
+      do23244 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(ayfnwr1v,sedf7mxb) = 1.0d0
+23244 continue
+23245 continue
+      sedf7mxb = sedf7mxb + 1
+      do23246 hpmwnav2=1,xwdf5ltg 
+      do23248 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(ayfnwr1v,sedf7mxb)=lncwkfq7(ayfnwr1v,hpmwnav2)
+23248 continue
+23249 continue
+      sedf7mxb = sedf7mxb + 1
+23246 continue
+23247 continue
+      endif
+      return
+      end
+      subroutine flncwkfq71(lncwkfq7, w8znmyce, kuzxj1lo, xwdf5ltg, qfx3
+     *vhct, vm4xjosb, br5ovgcj, xlpjcg3s, hyqwtp6i, tgiyxdw1, dufozmt7, 
+     *kifxa0he, p1, unhycz0e)
+      implicit logical (a-z)
+      integer kuzxj1lo, xwdf5ltg, qfx3vhct, br5ovgcj, xlpjcg3s, hyqwtp6i
+     *, tgiyxdw1(hyqwtp6i), dufozmt7(hyqwtp6i), p1, unhycz0e
+      double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,xl
+     *pjcg3s), kifxa0he(kuzxj1lo,p1)
+      double precision vm4xjosb(kuzxj1lo)
+      integer i0spbklx, ayfnwr1v, sedf7mxb, hpmwnav2
+      double precision tad5vhsu, uqnkc6zg
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23252 hpmwnav2=1,xwdf5ltg 
+      do23254 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,hpmwnav2) = lncwkfq7(ayfnwr1v,hpmwnav2)
+      w8znmyce(2*ayfnwr1v ,hpmwnav2) = 0.0d0
+23254 continue
+23255 continue
+23252 continue
+23253 continue
+      sedf7mxb = xwdf5ltg + 1
+      if(unhycz0e .eq. 0)then
+      do23258 i0spbklx=1,hyqwtp6i 
+      do23260 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spb
+     *klx)) * lncwkfq7(ayfnwr1v,dufozmt7(i0spbklx))
+      w8znmyce(2*ayfnwr1v ,sedf7mxb) = 0.0d0
+23260 continue
+23261 continue
+      sedf7mxb = sedf7mxb + 1
+23258 continue
+23259 continue
+      else
+      do23262 ayfnwr1v=1,kuzxj1lo 
+      tad5vhsu = 0.0d0
+      do23264 hpmwnav2=1,xwdf5ltg 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2)
+      tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg
+23264 continue
+23265 continue
+      vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu
+23262 continue
+23263 continue
+      endif
+      else
+      do23266 hpmwnav2=1,xwdf5ltg 
+      do23268 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(ayfnwr1v,hpmwnav2) = lncwkfq7(ayfnwr1v,hpmwnav2)
+23268 continue
+23269 continue
+23266 continue
+23267 continue
+      sedf7mxb = xwdf5ltg + 1
+      if(unhycz0e .eq. 0)then
+      do23272 i0spbklx=1,hyqwtp6i 
+      do23274 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(ayfnwr1v,sedf7mxb) = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)
+     *) * lncwkfq7(ayfnwr1v,dufozmt7(i0spbklx))
+23274 continue
+23275 continue
+      sedf7mxb = sedf7mxb + 1
+23272 continue
+23273 continue
+      else
+      do23276 ayfnwr1v=1,kuzxj1lo 
+      tad5vhsu = 0.0d0
+      do23278 hpmwnav2=1,xwdf5ltg 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2)
+      tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg
+23278 continue
+23279 continue
+      vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu
+23276 continue
+23277 continue
+      endif
+      endif
+      if(p1 .gt. 0)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23284 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 1.0d0
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0
+23284 continue
+23285 continue
+      sedf7mxb = sedf7mxb + 1
+      do23286 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 0.0d0
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 1.0d0
+23286 continue
+23287 continue
+      sedf7mxb = sedf7mxb + 1
+      if(p1 .gt. 1)then
+      do23290 i0spbklx=2,p1 
+      do23292 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(2*ayfnwr1v-1,sedf7mxb) = kifxa0he(ayfnwr1v,i0spbklx)
+      w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0
+23292 continue
+23293 continue
+      sedf7mxb = sedf7mxb + 1
+23290 continue
+23291 continue
+      endif
+      else
+      do23294 i0spbklx=1,p1 
+      do23296 ayfnwr1v=1,kuzxj1lo 
+      w8znmyce(ayfnwr1v,sedf7mxb) = kifxa0he(ayfnwr1v,i0spbklx)
+23296 continue
+23297 continue
+      sedf7mxb = sedf7mxb + 1
+23294 continue
+23295 continue
+      endif
+      endif
+      return
+      end
+      subroutine flncwkfq72(lncwkfq7, w8znmyce, kuzxj1lo, wy1vqfzu, br5o
+     *vgcj, xwdf5ltg, qfx3vhct, afpc0kns, fmzq7aob, eu3oxvyb, hyqwtp6i, 
+     *tgiyxdw1, dufozmt7, unhycz0e, vm4xjosb)
+      implicit logical (a-z)
+      integer kuzxj1lo, wy1vqfzu, br5ovgcj, xwdf5ltg, qfx3vhct, afpc0kns
+     *, fmzq7aob, eu3oxvyb, hyqwtp6i, tgiyxdw1(hyqwtp6i), dufozmt7(hyqwt
+     *p6i), unhycz0e
+      double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,*)
+     *, vm4xjosb(kuzxj1lo)
+      integer i0spbklx, ayfnwr1v, yq6lorbx, gp1jxzuh, ptr, sedf7mxb, hpm
+     *wnav2
+      double precision uqnkc6zg, tad5vhsu
+      do23298 gp1jxzuh=1,eu3oxvyb 
+      do23300 ayfnwr1v=1,br5ovgcj 
+      w8znmyce(ayfnwr1v,gp1jxzuh) = 0.0d0
+23300 continue
+23301 continue
+23298 continue
+23299 continue
+      sedf7mxb = 0
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23304 hpmwnav2=1,xwdf5ltg 
+      ptr = 1
+      do23306 ayfnwr1v=1,kuzxj1lo 
+      do23308 yq6lorbx=1,afpc0kns 
+      w8znmyce(ptr,sedf7mxb+yq6lorbx) = lncwkfq7(ayfnwr1v,hpmwnav2)
+      ptr = ptr + 2
+23308 continue
+23309 continue
+23306 continue
+23307 continue
+      sedf7mxb = sedf7mxb + afpc0kns
+23304 continue
+23305 continue
+      else
+      do23310 hpmwnav2=1,xwdf5ltg 
+      ptr = 0
+      do23312 ayfnwr1v=1,kuzxj1lo 
+      do23314 yq6lorbx=1,wy1vqfzu 
+      ptr = ptr + 1
+      w8znmyce(ptr,sedf7mxb+yq6lorbx) = lncwkfq7(ayfnwr1v,hpmwnav2)
+23314 continue
+23315 continue
+23312 continue
+23313 continue
+      sedf7mxb = sedf7mxb + wy1vqfzu
+23310 continue
+23311 continue
+      endif
+      if(fmzq7aob .eq. 0)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23320 i0spbklx=1,hyqwtp6i 
+      ptr = 1
+      do23322 ayfnwr1v=1,kuzxj1lo 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr
+     *1v,dufozmt7(i0spbklx))
+      do23324 yq6lorbx=1,afpc0kns 
+      w8znmyce(ptr,sedf7mxb+yq6lorbx) = uqnkc6zg
+      ptr = ptr + 2
+23324 continue
+23325 continue
+23322 continue
+23323 continue
+      sedf7mxb = sedf7mxb + afpc0kns
+23320 continue
+23321 continue
+      else
+      do23326 i0spbklx=1,hyqwtp6i 
+      ptr = 0
+      do23328 ayfnwr1v=1,kuzxj1lo 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr
+     *1v,dufozmt7(i0spbklx))
+      do23330 yq6lorbx=1,wy1vqfzu 
+      ptr = ptr + 1
+      w8znmyce(ptr,sedf7mxb+yq6lorbx) = uqnkc6zg
+23330 continue
+23331 continue
+23328 continue
+23329 continue
+      sedf7mxb = sedf7mxb + wy1vqfzu
+23326 continue
+23327 continue
+      endif
+      else
+      if(unhycz0e .eq. 1)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23336 ayfnwr1v=1,kuzxj1lo 
+      tad5vhsu = 0.0d0
+      do23338 hpmwnav2=1,xwdf5ltg 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2)
+      tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg
+23338 continue
+23339 continue
+      vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu
+23336 continue
+23337 continue
+      else
+      do23340 ayfnwr1v=1,kuzxj1lo 
+      tad5vhsu = 0.0d0
+      do23342 hpmwnav2=1,xwdf5ltg 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2)
+      tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg
+23342 continue
+23343 continue
+      vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu
+23340 continue
+23341 continue
+      endif
+      else
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23346 i0spbklx=1,hyqwtp6i 
+      ptr = 1
+      do23348 ayfnwr1v=1,kuzxj1lo 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr
+     *1v,dufozmt7(i0spbklx))
+      do23350 yq6lorbx=1,afpc0kns 
+      w8znmyce(ptr,sedf7mxb+i0spbklx) = uqnkc6zg
+      ptr = ptr + 2
+23350 continue
+23351 continue
+23348 continue
+23349 continue
+23346 continue
+23347 continue
+      sedf7mxb = sedf7mxb + hyqwtp6i
+      else
+      do23352 i0spbklx=1,hyqwtp6i 
+      ptr = 0
+      do23354 ayfnwr1v=1,kuzxj1lo 
+      uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr
+     *1v,dufozmt7(i0spbklx))
+      do23356 yq6lorbx=1,wy1vqfzu 
+      ptr = ptr + 1
+      w8znmyce(ptr,sedf7mxb+i0spbklx) = uqnkc6zg
+23356 continue
+23357 continue
+23354 continue
+23355 continue
+23352 continue
+23353 continue
+      sedf7mxb = sedf7mxb + hyqwtp6i
+      endif
+      endif
+      endif
+      return
+      end
+      subroutine ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu
+     *, afpc0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv)
+      implicit logical (a-z)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, qfx3vhct, hj3ftvzu, wr0lbopv
+      double precision tlgduey8(kuzxj1lo,afpc0kns), m0ibglfx(wy1vqfzu,ku
+     *zxj1lo), y7sdgtqi(15)
+      double precision wmat(kuzxj1lo,*)
+      double precision vogkfwt8, cumw, gyuq8dex, g2vwexykp, qa8ltuhj, kw
+     *vo4ury, cpz4fgkx, fguvm9tyi, kinit
+      integer ayfnwr1v
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4) .or. (qfx3vhct .eq. 3)
+     * .or. (qfx3vhct .eq. 5))then
+      vogkfwt8 = 0.0d0
+      cumw = 0.0d0
+      do23360 ayfnwr1v=1,kuzxj1lo 
+      vogkfwt8 = vogkfwt8 + tlgduey8(ayfnwr1v,hj3ftvzu) * wmat(ayfnwr1v,
+     *1)
+      cumw = cumw + wmat(ayfnwr1v,1)
+23360 continue
+23361 continue
+      gyuq8dex = vogkfwt8 / cumw
+      endif
+      if(qfx3vhct .eq. 1)then
+      call g2vwexyk9(gyuq8dex, g2vwexykp)
+      do23364 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = g2vwexykp
+23364 continue
+23365 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23368 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu) + 0
+     *.125d0)
+23368 continue
+23369 continue
+      endif
+      if(qfx3vhct .eq. 4)then
+      call zi8qrpsb(gyuq8dex, qa8ltuhj)
+      do23372 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = qa8ltuhj
+23372 continue
+23373 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      if(wr0lbopv .eq. 1)then
+      kwvo4ury = dlog(gyuq8dex + 0.03125d0)
+      cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0)
+      do23378 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury
+      m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx
+23378 continue
+23379 continue
+      else
+      if(wr0lbopv .eq. 2)then
+      kwvo4ury = dlog((6.0/8.0)*gyuq8dex+0.000d0)
+      cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0)
+      do23382 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx
+23382 continue
+23383 continue
+      else
+      cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0)
+      do23384 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu)
+     * + 0.03125d0)
+      m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx
+23384 continue
+23385 continue
+      endif
+      endif
+      endif
+      if(qfx3vhct .eq. 3)then
+      if(wr0lbopv .eq. 1)then
+      kwvo4ury = dlog(gyuq8dex + 0.03125d0)
+      cpz4fgkx = dlog(y7sdgtqi(3+hj3ftvzu)+0.03125d0)
+      do23390 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury
+      m0ibglfx(2*hj3ftvzu,ayfnwr1v) = cpz4fgkx
+23390 continue
+23391 continue
+      else
+      if(wr0lbopv .eq. 2)then
+      kwvo4ury = dlog(gyuq8dex + 0.03125d0)
+      kinit = y7sdgtqi(3+hj3ftvzu)
+      cpz4fgkx = dlog(kinit)
+      do23394 ayfnwr1v=1,kuzxj1lo 
+      fguvm9tyi = tlgduey8(ayfnwr1v,hj3ftvzu) - gyuq8dex
+      if(fguvm9tyi .gt. 3.0 * gyuq8dex)then
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(dsqrt(tlgduey8(ayfnwr1v,hj3
+     *ftvzu)))
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx
+      else
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx
+      endif
+23394 continue
+23395 continue
+      else
+      if(wr0lbopv .eq. 3)then
+      kwvo4ury = dlog(gyuq8dex + 0.03125d0)
+      kinit = y7sdgtqi(3+hj3ftvzu)
+      cpz4fgkx = dlog(kinit)
+      do23400 ayfnwr1v=1,kuzxj1lo 
+      fguvm9tyi = tlgduey8(ayfnwr1v,hj3ftvzu) - gyuq8dex
+      if(fguvm9tyi .gt. gyuq8dex)then
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(0.5*(tlgduey8(ayfnwr1v,hj3f
+     *tvzu)+gyuq8dex))
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = dlog(kinit / (fguvm9tyi / gyuq8de
+     *x))
+      else
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. (gyuq8dex / 4.0))then
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(gyuq8dex / 4.0)
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx
+      else
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx
+      endif
+      endif
+23400 continue
+23401 continue
+      else
+      cpz4fgkx = dlog(y7sdgtqi(3+hj3ftvzu))
+      do23406 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu)
+     * + 0.03125d0)
+      m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx
+23406 continue
+23407 continue
+      endif
+      endif
+      endif
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23410 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = tlgduey8(ayfnwr1v,hj3ftvzu)
+23410 continue
+23411 continue
+      endif
+      return
+      end
+      subroutine dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, 
+     *rbne6ouj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, unhycz
+     *0e, vm4xjosb)
+      implicit logical (a-z)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, hj3ftvzu, zj
+     *krtol8, unhycz0e
+      double precision tlgduey8(kuzxj1lo,afpc0kns), wmat(kuzxj1lo,*), m0
+     *ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), vm4xjosb(k
+     *uzxj1lo), ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu)
+     *, wpuarq2m(dimu,kuzxj1lo), rsynp1go, n3iasxug, uaf2xgqy
+      integer ayfnwr1v, qfx3vhct
+      double precision xd4mybgja, xd4mybgjb, xd4mybgjc, anopu9vi
+      logical lbgwvp3q
+      double precision hdqsx7bk, dkdeta, dldk, ux3nadiw, ed2ldk2, n2kers
+     *mx, bzmd6ftvmat(1,1), kkmat(1,1), nm0eljqk(1,1)
+      integer hbsl0gto, dvhw1ulq, sguwj9ty
+      double precision jtnbu2hz, uqnkc6zgd, uqnkc6zgt, dldshape
+      double precision fvn3iasxug, xk7dnvei
+      integer okobr6tcex
+      br5ovgcj = 1
+      hbsl0gto = 1
+      n2kersmx = 0.990d0
+      n2kersmx = 0.995d0
+      if(qfx3vhct .eq. 1)then
+      do23414 ayfnwr1v=1,kuzxj1lo 
+      xd4mybgja = t8hwvalr(hj3ftvzu,ayfnwr1v) * (1.0d0 - t8hwvalr(hj3ftv
+     *zu,ayfnwr1v))
+      xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1)
+      if(xd4mybgja .lt. n3iasxug)then
+      xd4mybgja = n3iasxug
+      endif
+      if(xd4mybgjb .lt. n3iasxug)then
+      xd4mybgjb = n3iasxug
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb)
+      endif
+      rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu
+     *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja
+23414 continue
+23415 continue
+      endif
+      if(qfx3vhct .eq. 2)then
+      do23422 ayfnwr1v=1,kuzxj1lo 
+      xd4mybgja = t8hwvalr(hj3ftvzu,ayfnwr1v)
+      xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1)
+      if(xd4mybgjb .lt. n3iasxug)then
+      xd4mybgjb = n3iasxug
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb)
+      endif
+      rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb
+      if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then
+      xd4mybgjc = xd4mybgja
+      if(xd4mybgjc .lt. n3iasxug)then
+      xd4mybgjc = n3iasxug
+      endif
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu
+     *ey8(ayfnwr1v,hj3ftvzu)-xd4mybgjc)/xd4mybgjc
+      else
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) - 1.0d0
+      endif
+23422 continue
+23423 continue
+      endif
+      if(qfx3vhct .eq. 4)then
+      do23432 ayfnwr1v=1,kuzxj1lo 
+      if((t8hwvalr(hj3ftvzu,ayfnwr1v) .lt. n3iasxug) .or. (t8hwvalr(hj3f
+     *tvzu,ayfnwr1v) .gt. 1.0d0 - n3iasxug))then
+      xd4mybgja = n3iasxug
+      xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1)
+      if(xd4mybgjb .lt. n3iasxug)then
+      xd4mybgjb = n3iasxug
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb)
+      endif
+      rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu
+     *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja
+      else
+      xd4mybgja = -(1.0d0 - t8hwvalr(hj3ftvzu,ayfnwr1v)) * dlog(1.0d0 - 
+     *t8hwvalr(hj3ftvzu,ayfnwr1v))
+      if(xd4mybgja .lt. n3iasxug)then
+      xd4mybgja = n3iasxug
+      endif
+      xd4mybgjb = -xd4mybgja * wmat(ayfnwr1v,1) * dlog(1.0d0 - t8hwvalr(
+     *hj3ftvzu,ayfnwr1v)) / t8hwvalr(hj3ftvzu,ayfnwr1v)
+      if(xd4mybgjb .lt. n3iasxug)then
+      xd4mybgjb = n3iasxug
+      endif
+      rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb)
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu
+     *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja
+      endif
+23432 continue
+23433 continue
+      endif
+      if(qfx3vhct .eq. 5)then
+      fvn3iasxug = 1.0d-20
+      anopu9vi = 34.0d0
+      do23444 ayfnwr1v=1,kuzxj1lo 
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then
+      jtnbu2hz = dexp(anopu9vi)
+      lbgwvp3q = .true.
+      else
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then
+      jtnbu2hz = dexp(-anopu9vi)
+      lbgwvp3q = .true.
+      else
+      jtnbu2hz = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v))
+      lbgwvp3q = .false.
+      endif
+      endif
+      call vdgam1(jtnbu2hz, uqnkc6zgd, okobr6tcex)
+      if(okobr6tcex .ne. 1)then
+      call intpr("error in dlgpwe0c okobr6tcex 1: ",-1,okobr6tcex,1)
+      endif
+      xk7dnvei = t8hwvalr(hj3ftvzu,ayfnwr1v)
+      if(xk7dnvei .lt. fvn3iasxug)then
+      xk7dnvei = fvn3iasxug
+      endif
+      dldshape = dlog(tlgduey8(ayfnwr1v,hj3ftvzu)) + dlog(jtnbu2hz) - dl
+     *og(xk7dnvei) + 1.0d0 - uqnkc6zgd - tlgduey8(ayfnwr1v,hj3ftvzu) / x
+     *k7dnvei
+      call vtgam1(jtnbu2hz, uqnkc6zgt, okobr6tcex)
+      if(okobr6tcex .ne. 1)then
+      call intpr("error in dlgpwe0c okobr6tcex 2: ",-1,okobr6tcex,1)
+      endif
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = wmat(ayfnwr1v,1) * jtnbu2hz
+      xd4mybgja = jtnbu2hz * uqnkc6zgt - 1.0d0
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu ) = wmat(ayfnwr1v,1) * jtnbu2hz * xd4
+     *mybgja
+      if(rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) .lt. n3iasxug)then
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = n3iasxug
+      wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftv
+     *zu-1))
+      endif
+      if(rbne6ouj(ayfnwr1v,2*hj3ftvzu) .lt. n3iasxug)then
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu) = n3iasxug
+      wpuarq2m(2*hj3ftvzu,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(2*hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftvzu
+     *))
+      endif
+      if(xd4mybgja .lt. fvn3iasxug)then
+      xd4mybgja = fvn3iasxug
+      endif
+      ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) 
+     *+ tlgduey8(ayfnwr1v,hj3ftvzu) / xk7dnvei - 1.0d0
+      ghz9vuba(ayfnwr1v,2*hj3ftvzu ) = m0ibglfx(2*hj3ftvzu ,ayfnwr1v) + 
+     *dldshape / xd4mybgja
+23444 continue
+23445 continue
+      endif
+      if(qfx3vhct .eq. 3)then
+      anopu9vi = 34.0d0
+      fvn3iasxug = 1.0d-20
+      do23464 ayfnwr1v=1,kuzxj1lo 
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then
+      hdqsx7bk = dexp(anopu9vi)
+      lbgwvp3q = .true.
+      else
+      if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then
+      hdqsx7bk = dexp(-anopu9vi)
+      lbgwvp3q = .true.
+      else
+      hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v))
+      lbgwvp3q = .false.
+      endif
+      endif
+      xk7dnvei = t8hwvalr(hj3ftvzu,ayfnwr1v)
+      if(xk7dnvei .lt. fvn3iasxug)then
+      xk7dnvei = fvn3iasxug
+      endif
+      call vdgam1(tlgduey8(ayfnwr1v,hj3ftvzu) + hdqsx7bk, xd4mybgja, oko
+     *br6tcex)
+      if(okobr6tcex .ne. 1)then
+      endif
+      call vdgam1(hdqsx7bk, xd4mybgjb, okobr6tcex)
+      if(okobr6tcex .ne. 1)then
+      endif
+      dldk = xd4mybgja - xd4mybgjb - (tlgduey8(ayfnwr1v,hj3ftvzu) + hdqs
+     *x7bk) / (xk7dnvei + hdqsx7bk) + 1.0d0 + dlog(hdqsx7bk / (xk7dnvei 
+     *+ hdqsx7bk))
+      dkdeta = hdqsx7bk
+      kkmat(1,1) = hdqsx7bk
+      nm0eljqk(1,1) = xk7dnvei
+      sguwj9ty = 5000
+      call enbin9(bzmd6ftvmat, kkmat, nm0eljqk, n2kersmx, hbsl0gto, dvhw
+     *1ulq, hbsl0gto, ux3nadiw, rsynp1go, sguwj9ty)
+      if(dvhw1ulq .ne. 1)then
+      zjkrtol8 = 5
+      return
+      endif
+      ed2ldk2 = -bzmd6ftvmat(1,1) - 1.0d0 / hdqsx7bk + 1.0d0 / (hdqsx7bk
+     * + xk7dnvei)
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = wmat(ayfnwr1v,1) * xk7dnvei * hd
+     *qsx7bk / (xk7dnvei + hdqsx7bk)
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu ) = wmat(ayfnwr1v,1) * hdqsx7bk * (-b
+     *zmd6ftvmat(1,1)*hdqsx7bk - 1.0d0 + hdqsx7bk / (hdqsx7bk + xk7dnvei
+     *))
+      if(rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) .lt. n3iasxug)then
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = n3iasxug
+      wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftv
+     *zu-1))
+      endif
+      if(rbne6ouj(ayfnwr1v,2*hj3ftvzu) .lt. n3iasxug)then
+      rbne6ouj(ayfnwr1v,2*hj3ftvzu) = n3iasxug
+      wpuarq2m(2*hj3ftvzu,ayfnwr1v) = uaf2xgqy
+      else
+      wpuarq2m(2*hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftvzu
+     *))
+      endif
+      ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) 
+     *+ tlgduey8(ayfnwr1v,hj3ftvzu) / xk7dnvei - 1.0d0
+      ghz9vuba(ayfnwr1v,2*hj3ftvzu ) = m0ibglfx(2*hj3ftvzu ,ayfnwr1v) + 
+     *dldk / (dkdeta * ed2ldk2)
+23464 continue
+23465 continue
+      endif
+      if(qfx3vhct .eq. 8)then
+      do23484 ayfnwr1v=1,kuzxj1lo 
+      rbne6ouj(ayfnwr1v,hj3ftvzu) = wmat(ayfnwr1v,1)
+      wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,hj3ftvzu))
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = tlgduey8(ayfnwr1v,hj3ftvzu)
+23484 continue
+23485 continue
+      endif
+      if(unhycz0e .eq. 1)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23490 ayfnwr1v=1,kuzxj1lo 
+      ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) 
+     *- vm4xjosb(ayfnwr1v)
+23490 continue
+23491 continue
+      else
+      do23492 ayfnwr1v=1,kuzxj1lo 
+      ghz9vuba(ayfnwr1v,hj3ftvzu) = ghz9vuba(ayfnwr1v,hj3ftvzu) - vm4xjo
+     *sb(ayfnwr1v)
+23492 continue
+23493 continue
+      endif
+      endif
+      return
+      end
+      subroutine cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4
+     *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj,
+     * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu,
+     * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm
+     *, y7sdgtqi)
+      implicit logical (a-z)
+      integer xui7hqwl(18), tgiyxdw1(*), dufozmt7(*)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1
+     *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo)
+      double precision ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy
+     *1vqfzu), wpuarq2m(dimu,kuzxj1lo), w8znmyce(br5ovgcj,*)
+      double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(wy1vqfzu,kuzxj1lo,2), wkmm(wy1vqfzu * (wy1vqf
+     *zu + 1))
+      integer ayfnwr1v, yq6lorbx, gp1jxzuh, hyqwtp6i, ptr, i1loc, i2, iz
+     *ero0, iter, fmzq7aob, xwdf5ltg, dimw, f7svlajr, qfx3vhct, c5aesxku
+     *l
+      integer job, info, qemj9asg, xlpjcg3s, eu3oxvyb, vtsou9pz, unhycz0
+     *e, zaupqv9b
+      integer hbsl0gto, wr0lbopv
+      double precision rpto5qwb, n3iasxug, pvofyg8z, wiptsjx8, uylxqtc7,
+     * bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go
+      double precision hmayv1xt1, hmayv1xt2
+      integer x1jrewny
+      hbsl0gto = 1
+      x1jrewny = 0
+      kifxa0he(1,1) = 1
+      wkmm(1) = 0.0d0
+      xwdf5ltg = xui7hqwl(1)
+      fmzq7aob = xui7hqwl(2)
+      xlpjcg3s = xui7hqwl(3)
+      dimw = xui7hqwl(4)
+      f7svlajr = xui7hqwl(5)
+      qfx3vhct = xui7hqwl(6)
+      c5aesxkul = xui7hqwl(7)
+      xui7hqwl(9) = 0
+      eu3oxvyb = xui7hqwl(11)
+      vtsou9pz = xui7hqwl(12)
+      unhycz0e = xui7hqwl(14)
+      zaupqv9b = xui7hqwl(15)
+      wr0lbopv = xui7hqwl(18)
+      n3iasxug = y7sdgtqi(1)
+      uaf2xgqy = dsqrt(n3iasxug)
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then
+      vsoihn1r = dlog(n3iasxug)
+      endif
+      bh2vgiay = y7sdgtqi(2)
+      rsynp1go = y7sdgtqi(3)
+      uylxqtc7 = 0.0d0
+      izero0 = 0
+      zjkrtol8 = 1
+      call qpsedg8xf(tgiyxdw1, dufozmt7, xwdf5ltg)
+      hyqwtp6i = xwdf5ltg * (xwdf5ltg+1) / 2
+      call flncwkfq72(lncwkfq7, w8znmyce, kuzxj1lo, wy1vqfzu, br5ovgcj, 
+     *xwdf5ltg, qfx3vhct, afpc0kns, fmzq7aob, eu3oxvyb, hyqwtp6i, tgiyxd
+     *w1, dufozmt7, unhycz0e, vm4xjosb)
+653   hmayv1xt2 = 1.0d0
+      if(f7svlajr .eq. 0)then
+      do23498 yq6lorbx=1,afpc0kns 
+      call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc
+     *0kns, qfx3vhct, yq6lorbx, wmat, wr0lbopv)
+23498 continue
+23499 continue
+      else
+      if(f7svlajr .eq. 2)then
+      call pkc4ejib(w8znmyce, beta, m0ibglfx, kuzxj1lo, wy1vqfzu, br5ovg
+     *cj, xlpjcg3s, vtsou9pz, izero0, qfx3vhct, unhycz0e, vm4xjosb)
+      endif
+      endif
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, izero0)
+      if(f7svlajr .eq. 2)then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, izero0, n3iasxug, vsoihn1r
+     *, hbsl0gto)
+      else
+      rpto5qwb = -1.0d0
+      endif
+      do23504 iter=1,c5aesxkul 
+      do23506 yq6lorbx=1,afpc0kns 
+      call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o
+     *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af
+     *pc0kns, br5ovgcj, dimu, yq6lorbx, qfx3vhct, zjkrtol8, unhycz0e, vm
+     *4xjosb)
+23506 continue
+23507 continue
+      do23508 yq6lorbx=1,xlpjcg3s 
+      do23510 ayfnwr1v=1,br5ovgcj 
+      vc6hatuj(ayfnwr1v,yq6lorbx) = w8znmyce(ayfnwr1v,yq6lorbx)
+23510 continue
+23511 continue
+23508 continue
+23509 continue
+      do23512 yq6lorbx=1,xlpjcg3s 
+      ptr = 1
+      do23514 i1loc=1,kuzxj1lo 
+      do23516 i2=1,wy1vqfzu 
+      vc6hatuj(ptr,yq6lorbx) = wpuarq2m(i2,i1loc) * vc6hatuj(ptr,yq6lorb
+     *x)
+      ptr = ptr + 1
+23516 continue
+23517 continue
+23514 continue
+23515 continue
+23512 continue
+23513 continue
+      do23518 gp1jxzuh=1,xlpjcg3s 
+      ges1xpkr(gp1jxzuh) = gp1jxzuh
+23518 continue
+23519 continue
+      pvofyg8z = 1.0d-7
+      call vqrdca(vc6hatuj,br5ovgcj,br5ovgcj,xlpjcg3s,fasrkub3,ges1xpkr,
+     *twk,qemj9asg,pvofyg8z)
+      if(qemj9asg .ne. xlpjcg3s)then
+      zjkrtol8 = 2
+      return
+      endif
+      do23522 ayfnwr1v=1,kuzxj1lo 
+      do23524 yq6lorbx=1,wy1vqfzu 
+      twk(yq6lorbx,ayfnwr1v,1) = wpuarq2m(yq6lorbx,ayfnwr1v) * ghz9vuba(
+     *ayfnwr1v,yq6lorbx)
+23524 continue
+23525 continue
+23522 continue
+23523 continue
+      job = 101
+      call vdqrsl(vc6hatuj,br5ovgcj,br5ovgcj,qemj9asg,fasrkub3, twk, uyl
+     *xqtc7, twk(1,1,2), beta, uylxqtc7,m0ibglfx,job,info)
+      do23526 ayfnwr1v=1,kuzxj1lo 
+      do23528 yq6lorbx=1,wy1vqfzu 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) / wpuarq
+     *2m(yq6lorbx,ayfnwr1v)
+23528 continue
+23529 continue
+23526 continue
+23527 continue
+      if(unhycz0e .eq. 1)then
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23534 ayfnwr1v=1,kuzxj1lo 
+      do23536 yq6lorbx=1,afpc0kns 
+      m0ibglfx(2*yq6lorbx-1,ayfnwr1v) = m0ibglfx(2*yq6lorbx-1,ayfnwr1v) 
+     *+ vm4xjosb(ayfnwr1v)
+23536 continue
+23537 continue
+23534 continue
+23535 continue
+      else
+      do23538 ayfnwr1v=1,kuzxj1lo 
+      do23540 yq6lorbx=1,wy1vqfzu 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + vm4xjo
+     *sb(ayfnwr1v)
+23540 continue
+23541 continue
+23538 continue
+23539 continue
+      endif
+      endif
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, izero0)
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,izero0,n3iasxug,vsoihn1r, h
+     *bsl0gto)
+      wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes))
+      if(wiptsjx8 .lt. bh2vgiay)then
+      zjkrtol8 = 0
+      xui7hqwl(8) = iter
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,izero0,n3iasxug,vsoihn1r, i
+     *zero0)
+      endif
+      x1jrewny = 1
+      goto 20097
+      else
+      rpto5qwb = tlq9wpes
+      x1jrewny = 0
+      endif
+23504 continue
+23505 continue
+20097 hmayv1xt1 = 0.0d0
+      if(x1jrewny .eq. 1)then
+      return
+      endif
+      if(f7svlajr .eq. 1 .or. f7svlajr .eq. 2)then
+      f7svlajr = 0
+      xui7hqwl(9) = 1
+      goto 653
+      endif
+      zjkrtol8 = 3
+      return
+      end
+      subroutine cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4
+     *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj,
+     * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu,
+     * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm
+     *, y7sdgtqi)
+      implicit logical (a-z)
+      integer xui7hqwl(18), tgiyxdw1(*), dufozmt7(*)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), vm4xjosb(kuzxj1lo
+     *), t8hwvalr(afpc0kns,kuzxj1lo), kifxa0he(kuzxj1lo,*), ghz9vuba(kuz
+     *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1
+     *lo), w8znmyce(br5ovgcj,*)
+      double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(br5ovgcj,3), wkmm(wy1vqfzu*(wy1vqfzu+1))
+      integer ayfnwr1v, hj3ftvzu, hyqwtp6i, izero0, iter, fmzq7aob, unhy
+     *cz0e, xwdf5ltg, dimw, f7svlajr, qfx3vhct, c5aesxkul
+      integer job, info, qemj9asg, xlpjcg3s, vtsou9pz, zaupqv9b
+      integer hbsl0gto, p1, wr0lbopv
+      double precision rpto5qwb, n3iasxug, pvofyg8z, wiptsjx8, uylxqtc7,
+     * bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go
+      integer gp1jxzuh
+      double precision aqg1vdmo, hmayv1xt
+      aqg1vdmo = 0.0d0
+      hbsl0gto = 1
+      wkmm(1) = 1.0d0
+      call intpr("entering cqo1f hbsl0gto ------------------------------
+     *-: ",-1,hbsl0gto,1)
+      call intpr("in cqo1f afpc0kns: ",-1,afpc0kns,1)
+      xwdf5ltg = xui7hqwl(1)
+      fmzq7aob = xui7hqwl(2)
+      xlpjcg3s = xui7hqwl(3)
+      dimw = xui7hqwl(4)
+      f7svlajr = xui7hqwl(5)
+      qfx3vhct = xui7hqwl(6)
+      c5aesxkul = xui7hqwl(7)
+      xui7hqwl(9) = 0
+      vtsou9pz = xui7hqwl(12)
+      if(vtsou9pz .ne. 1)then
+      zjkrtol8 = 4
+      return
+      endif
+      unhycz0e = xui7hqwl(14)
+      zaupqv9b = xui7hqwl(15)
+      p1 = xui7hqwl(16)
+      wr0lbopv = xui7hqwl(18)
+      call intpr("Entry to cqo1f: f7svlajr ",-1,f7svlajr,1)
+      n3iasxug = y7sdgtqi(1)
+      uaf2xgqy = dsqrt(n3iasxug)
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then
+      vsoihn1r = dlog(n3iasxug)
+      endif
+      bh2vgiay = y7sdgtqi(2)
+      rsynp1go = y7sdgtqi(3)
+      uylxqtc7 = 0.0d0
+      izero0 = 0
+      zjkrtol8 = 1
+      call qpsedg8xf(tgiyxdw1, dufozmt7, xwdf5ltg)
+      hyqwtp6i = xwdf5ltg * (xwdf5ltg+1) / 2
+      call flncwkfq71(lncwkfq7, w8znmyce, kuzxj1lo, xwdf5ltg, qfx3vhct, 
+     *vm4xjosb, br5ovgcj, xlpjcg3s, hyqwtp6i, tgiyxdw1, dufozmt7, kifxa0
+     *he, p1, unhycz0e)
+      call dblepr("cqo1f: vm4xjosb()",-1,vm4xjosb,kuzxj1lo)
+      call dblepr("cqo1f: w8znmyce(,)",-1,w8znmyce,br5ovgcj*xlpjcg3s)
+      call dblepr("cqo1f: wmat(,1)",-1,wmat(1,1),kuzxj1lo)
+      do23554 hj3ftvzu=1,afpc0kns 
+      call intpr("cqo1f: hj3ftvzu======================: ",-1,hj3ftvzu,1
+     *)
+653   hmayv1xt = 1.0d0
+      if(f7svlajr .eq. 0)then
+      call intpr("cqo1f: calling ietam6 ",-1,hj3ftvzu,1)
+      call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc
+     *0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv)
+      else
+      if(f7svlajr .eq. 2)then
+      call intpr("cqo1f: calling pkc4ejib; vtsou9pz== ",-1,vtsou9pz,1)
+      call pkc4ejib(w8znmyce, beta(1+(hj3ftvzu-1)*xlpjcg3s), m0ibglfx, k
+     *uzxj1lo, wy1vqfzu, br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu, qfx3vhc
+     *t, unhycz0e, vm4xjosb)
+      endif
+      endif
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, hj3ftvzu)
+      if(f7svlajr .eq. 2)then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, hj3ftvzu, n3iasxug, vsoihn
+     *1r, hbsl0gto)
+      else
+      rpto5qwb = -1.0d0
+      endif
+      do23562 iter=1,c5aesxkul 
+      call intpr("iter: ",-1,iter,1)
+      call intpr("posn 7: ",-1,hbsl0gto,1)
+      call intpr("qfx3vhct: ",-1,qfx3vhct,1)
+      call dblepr("rpto5qwb",-1,rpto5qwb,1)
+      call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o
+     *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af
+     *pc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, unhycz0e, vm
+     *4xjosb)
+      call dblepr("cqo1f: m0ibglfx",-1,m0ibglfx,wy1vqfzu*kuzxj1lo)
+      call dblepr("cqo1f: wpuarq2m",-1,wpuarq2m,dimu*kuzxj1lo)
+      call dblepr("cqo1f: ghz9vuba",-1,ghz9vuba,kuzxj1lo*wy1vqfzu)
+      call dblepr("cqo1f: rbne6ouj",-1,rbne6ouj,kuzxj1lo*wy1vqfzu)
+      do23564 gp1jxzuh=1,xlpjcg3s 
+      do23566 ayfnwr1v=1,br5ovgcj 
+      vc6hatuj(ayfnwr1v,gp1jxzuh) = w8znmyce(ayfnwr1v,gp1jxzuh)
+23566 continue
+23567 continue
+23564 continue
+23565 continue
+      call intpr("posn 3: ",-1,hbsl0gto,1)
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23570 gp1jxzuh=1,xlpjcg3s 
+      do23572 ayfnwr1v=1,kuzxj1lo 
+      vc6hatuj(2*ayfnwr1v-1,gp1jxzuh) = wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) 
+     ** vc6hatuj(2*ayfnwr1v-1,gp1jxzuh)
+      vc6hatuj(2*ayfnwr1v ,gp1jxzuh) = wpuarq2m(2*hj3ftvzu ,ayfnwr1v) * 
+     *vc6hatuj(2*ayfnwr1v ,gp1jxzuh)
+23572 continue
+23573 continue
+23570 continue
+23571 continue
+      else
+      do23574 gp1jxzuh=1,xlpjcg3s 
+      do23576 ayfnwr1v=1,kuzxj1lo 
+      vc6hatuj(ayfnwr1v,gp1jxzuh) = wpuarq2m(hj3ftvzu,ayfnwr1v) * vc6hat
+     *uj(ayfnwr1v,gp1jxzuh)
+23576 continue
+23577 continue
+23574 continue
+23575 continue
+      endif
+      call intpr("posn 4: ",-1,hbsl0gto,1)
+      do23578 gp1jxzuh=1,xlpjcg3s 
+      ges1xpkr(gp1jxzuh) = gp1jxzuh
+23578 continue
+23579 continue
+      call dblepr("cqo1f: vc6hatuj",-1,vc6hatuj,br5ovgcj*xlpjcg3s)
+      call intpr("iter: ",-1,iter,1)
+      pvofyg8z = 1.0d-7
+      call vqrdca(vc6hatuj,br5ovgcj,br5ovgcj,xlpjcg3s,fasrkub3,ges1xpkr,
+     *twk,qemj9asg,pvofyg8z)
+      call intpr("ges1xpkr: ",-1,ges1xpkr,xlpjcg3s)
+      if(qemj9asg .ne. xlpjcg3s)then
+      zjkrtol8 = 2
+      return
+      endif
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23584 ayfnwr1v=1,kuzxj1lo 
+      twk(2*ayfnwr1v-1,1) = wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) * ghz9vuba(a
+     *yfnwr1v,2*hj3ftvzu-1)
+      twk(2*ayfnwr1v ,1) = wpuarq2m(2*hj3ftvzu ,ayfnwr1v) * ghz9vuba(ayf
+     *nwr1v,2*hj3ftvzu )
+23584 continue
+23585 continue
+      else
+      do23586 ayfnwr1v=1,kuzxj1lo
+      twk(ayfnwr1v,1) = wpuarq2m(hj3ftvzu,ayfnwr1v) * ghz9vuba(ayfnwr1v,
+     *hj3ftvzu)
+23586 continue
+23587 continue
+      endif
+      call intpr("posn 5: ",-1,hbsl0gto,1)
+      job = 101
+      call intpr("posn 6: ",-1,hbsl0gto,1)
+      call vdqrsl(vc6hatuj,br5ovgcj,br5ovgcj,qemj9asg,fasrkub3, twk(1,1)
+     *, uylxqtc7, twk(1,2), beta(1+(hj3ftvzu-1)*xlpjcg3s), uylxqtc7,twk(
+     *1,3),job,info)
+      call dblepr("beta(1+(hj3ftvzu-1)*xlpjcg3s)",-1,beta(1+(hj3ftvzu-1)
+     **xlpjcg3s),xlpjcg3s)
+      if(zaupqv9b .gt. 1)then
+      endif
+      do23590 gp1jxzuh=1,xlpjcg3s 
+      twk(gp1jxzuh,1) = beta((hj3ftvzu-1)*xlpjcg3s + gp1jxzuh)
+23590 continue
+23591 continue
+      do23592 gp1jxzuh=1,xlpjcg3s 
+      beta((hj3ftvzu-1)*xlpjcg3s + ges1xpkr(gp1jxzuh)) = twk(gp1jxzuh,1)
+23592 continue
+23593 continue
+      call intpr("posn 7: ",-1,hbsl0gto,1)
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      do23596 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = twk(2*ayfnwr1v-1,3) / wpuarq2m(2
+     **hj3ftvzu-1,ayfnwr1v)
+      m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = twk(2*ayfnwr1v ,3) / wpuarq2m(2*h
+     *j3ftvzu ,ayfnwr1v)
+23596 continue
+23597 continue
+      if(unhycz0e .eq. 1)then
+      do23600 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) 
+     *+ vm4xjosb(ayfnwr1v)
+23600 continue
+23601 continue
+      endif
+      else
+      do23602 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = twk(ayfnwr1v,3) / wpuarq2m(hj3ftvzu,
+     *ayfnwr1v)
+23602 continue
+23603 continue
+      if(unhycz0e .eq. 1)then
+      do23606 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v) + vm4xjo
+     *sb(ayfnwr1v)
+23606 continue
+23607 continue
+      endif
+      endif
+      call intpr("posn 8: ",-1,hbsl0gto,1)
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, hj3ftvzu)
+      call intpr("posn 8b: ",-1,hbsl0gto,1)
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r,
+     *hbsl0gto)
+      call intpr("posn 8c: ",-1,hbsl0gto,1)
+      wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes))
+      call intpr("cqo1f: iter -------------",-1,iter,1)
+      call dblepr("cqo1f: wiptsjx8",-1,wiptsjx8,1)
+      if(wiptsjx8 .lt. bh2vgiay)then
+      zjkrtol8 = 0
+      xui7hqwl(8)=iter
+      call intpr("cqo1f xui7hqwl(8): ",-1,xui7hqwl(8),1)
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r,
+     * izero0)
+      endif
+      aqg1vdmo = aqg1vdmo + tlq9wpes
+      goto 1011
+      else
+      rpto5qwb = tlq9wpes
+      endif
+      call intpr("posn 9: ",-1,hbsl0gto,1)
+23562 continue
+23563 continue
+      call intpr("cqo1f; unsuccessful convergence: ",-1,hbsl0gto,1)
+      if(f7svlajr .eq. 1)then
+      f7svlajr = 0
+      xui7hqwl(9) = 1
+      goto 653
+      endif
+      zjkrtol8 = 3
+1011  hmayv1xt = 1.0d0
+23554 continue
+23555 continue
+      call intpr("exiting cqo1f hbsl0gto ============================ : 
+     *",-1,hbsl0gto,1)
+      tlq9wpes = aqg1vdmo
+      return
+      end
+      subroutine vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, gh
+     *z9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo
+     *, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes
+     *, beta, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up
+     *, nef, which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, 
+     *hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkm
+     *m, work3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbz
+     *jkpi3, acpios9q, itwk, jwbkl9fp)
+      implicit logical (a-z)
+      integer xui7hqwl(19)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns
+     *,kuzxj1lo)
+      double precision ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy
+     *1vqfzu), wpuarq2m(dimu,kuzxj1lo)
+      double precision vc6hatuj(br5ovgcj,2), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(br5ovgcj,3), wkmm(wy1vqfzu*(wy1vqfzu+1))
+      integer hj3ftvzu, ehtjigf4, izero0, iter, xwdf5ltg, dimw, f7svlajr
+     *, qfx3vhct, c5aesxkul
+      integer vtsou9pz, zaupqv9b, xlpjcg3s
+      integer hbsl0gto, sedf7mxb
+      double precision rpto5qwb, n3iasxug, wiptsjx8, uylxqtc7, bh2vgiay,
+     * uaf2xgqy, vsoihn1r, rsynp1go
+      double precision aqg1vdmo, hmayv1xt
+      integer psdvgce3(15), qfozcl5b, ezlgm2up(*),nef(*),which(*), jnxpu
+     *ym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*)
+      integer wr0lbopv, acpios9q(*), itwk(*), jwbkl9fp(*)
+      integer nbzjkpi3(*)
+      double precision kiye1wjz(*)
+      double precision ub4xioar(qfozcl5b,kuzxj1lo), kispwgx3(kuzxj1lo,*)
+     *,s0(wy1vqfzu), zyodca3j(qfozcl5b,kuzxj1lo), lxyst1eb(qfozcl5b,kuzx
+     *j1lo), mbvnaor6(kuzxj1lo,*), hjm2ktyr(qfozcl5b,*), work1(*), wk2(k
+     *uzxj1lo,qfozcl5b), work3(*), sgdub(*), bmb(*), ifys6woa(*), mwk(*)
+     *, rpyis2kc(*), zv2xfhei(*)
+      integer qes4mujl
+      integer ayfnwr1v, kij0gwer, xumj5dnk
+      integer irhm4cfa, lyma1kwc
+      double precision xbignn(2), lncrw8mg, ufkq9rpg, r3eoxkzp, wld4qctn
+      double precision zpcqv3uj, resss
+      double precision vm4xjosb(2)
+      lncrw8mg=0.0d0
+      ufkq9rpg=0.0d0
+      r3eoxkzp=0.0d0
+      wld4qctn=0.0d0
+      irhm4cfa = xui7hqwl(19)
+      aqg1vdmo = 0.0d0
+      hbsl0gto = 1
+      wkmm(1) = 1.0d0
+      twk(1,1) = 1.0d0
+      xwdf5ltg = xui7hqwl(1)
+      xlpjcg3s = xui7hqwl(3)
+      dimw = xui7hqwl(4)
+      f7svlajr = xui7hqwl(5)
+      qfx3vhct = xui7hqwl(6)
+      c5aesxkul = xui7hqwl(7)
+      xui7hqwl(9) = 0
+      lyma1kwc = xui7hqwl(11)
+      vtsou9pz = xui7hqwl(12)
+      if((vtsou9pz .ne. 1) .or. (lyma1kwc .ne. xwdf5ltg))then
+      zjkrtol8 = 4
+      return
+      endif
+      zaupqv9b = xui7hqwl(15)
+      wr0lbopv = xui7hqwl(18)
+      zpcqv3uj = y7sdgtqi(3+afpc0kns+afpc0kns+2)
+      n3iasxug = y7sdgtqi(1)
+      uaf2xgqy = dsqrt(n3iasxug)
+      if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then
+      vsoihn1r = dlog(n3iasxug)
+      endif
+      bh2vgiay = y7sdgtqi(2)
+      rsynp1go = y7sdgtqi(3)
+      uylxqtc7 = 0.0d0
+      izero0 = 0
+      zjkrtol8 = 1
+      do23618 hj3ftvzu=1,afpc0kns 
+653   hmayv1xt = 1.0d0
+      if(f7svlajr .eq. 0)then
+      call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc
+     *0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv)
+      else
+      if(f7svlajr .ne. 1)then
+      zjkrtol8 = 6
+      return
+      endif
+      endif
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, hj3ftvzu)
+      if(f7svlajr .eq. 2)then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, hj3ftvzu, n3iasxug, vsoihn
+     *1r, hbsl0gto)
+      else
+      rpto5qwb = -1.0d0
+      endif
+      do23626 iter=1,c5aesxkul 
+      call flncwkfq76(lncwkfq7, vc6hatuj, kuzxj1lo, br5ovgcj, xwdf5ltg, 
+     *qfx3vhct)
+      psdvgce3(7) = 0
+      call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o
+     *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af
+     *pc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, izero0, vm4x
+     *josb)
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      qes4mujl = 2*hj3ftvzu-1
+      else
+      qes4mujl = hj3ftvzu
+      endif
+      do23630 kij0gwer=1,qfozcl5b 
+      do23632 ayfnwr1v=1,kuzxj1lo 
+      zyodca3j(kij0gwer,ayfnwr1v) = wpuarq2m(qes4mujl-1+kij0gwer,ayfnwr1
+     *v)
+      lxyst1eb(kij0gwer,ayfnwr1v) = m0ibglfx(qes4mujl-1+kij0gwer,ayfnwr1
+     *v)
+23632 continue
+23633 continue
+23630 continue
+23631 continue
+      sedf7mxb = lyma1kwc * afpc0kns
+      ehtjigf4 = xwdf5ltg * (hj3ftvzu-1)
+      if(iter .eq. 1)then
+      lncrw8mg = kiye1wjz( ehtjigf4 + hnpt1zym(1))
+      ufkq9rpg = kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1))
+      if(xwdf5ltg .eq. 2)then
+      r3eoxkzp = kiye1wjz( ehtjigf4 + hnpt1zym(2))
+      wld4qctn = kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(2))
+      endif
+      do23638 kij0gwer=1,lyma1kwc 
+      do23640 ayfnwr1v=1,kuzxj1lo 
+      kispwgx3(ayfnwr1v,ehtjigf4 + hnpt1zym(kij0gwer)) = 0.0d0
+23640 continue
+23641 continue
+23638 continue
+23639 continue
+      else
+      kiye1wjz( ehtjigf4 + hnpt1zym(1)) = lncrw8mg
+      kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1)) = ufkq9rpg
+      if(xwdf5ltg .eq. 2)then
+      kiye1wjz( ehtjigf4 + hnpt1zym(2)) = r3eoxkzp
+      kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(2)) = wld4qctn
+      endif
+      endif
+      call vbfa(irhm4cfa,kuzxj1lo,qfozcl5b,psdvgce3, mbvnaor6, ghz9vuba(
+     *1,qes4mujl), rbne6ouj(1,qes4mujl), kiye1wjz( ehtjigf4 + hnpt1zym(1
+     *)), kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1)), ezlgm2up,nef,whic
+     *h, ub4xioar,kispwgx3(1,ehtjigf4 + hnpt1zym(1)), lxyst1eb,s0, beta(
+     *1+(hj3ftvzu-1)*xlpjcg3s), cov,zpcqv3uj, vc6hatuj,fasrkub3, ges1xpk
+     *r, xbignn, zyodca3j, hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nb
+     *fjc, work1, wk2, wwkmm, work3, sgdub, bmb, ifys6woa, mwk, ttwk, rp
+     *yis2kc(1+(hj3ftvzu-1)*(nbzjkpi3(1+xwdf5ltg)-1)), zv2xfhei, resss, 
+     *nbzjkpi3, acpios9q, itwk, jwbkl9fp)
+      y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss
+      xumj5dnk = psdvgce3(14)
+      if(xumj5dnk .ne. 0)then
+      call intpr("vcao6f: exiting because of an error",-1,xumj5dnk,1)
+      zjkrtol8 = 8
+      return
+      endif
+      do23646 kij0gwer=1,qfozcl5b 
+      do23648 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(qes4mujl-1+kij0gwer,ayfnwr1v) = lxyst1eb(kij0gwer,ayfnwr1
+     *v)
+23648 continue
+23649 continue
+23646 continue
+23647 continue
+      call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf
+     *x3vhct, hj3ftvzu)
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes, hj3ftvzu, n3iasxug, vsoihn
+     *1r, hbsl0gto)
+      wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes))
+      if(wiptsjx8 .lt. bh2vgiay)then
+      zjkrtol8 = 0
+      xui7hqwl(8) = iter
+      if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then
+      call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf
+     *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r,
+     * izero0)
+      endif
+      aqg1vdmo = aqg1vdmo + tlq9wpes
+      goto 1011
+      else
+      rpto5qwb = tlq9wpes
+      endif
+23626 continue
+23627 continue
+      if(f7svlajr .eq. 1)then
+      f7svlajr = 0
+      xui7hqwl(9) = 1
+      goto 653
+      endif
+      zjkrtol8 = 3
+1011  hmayv1xt = 1.0d0
+23618 continue
+23619 continue
+      tlq9wpes = aqg1vdmo
+      return
+      end
+      subroutine dcqof(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4
+     *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj,
+     * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu,
+     * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm
+     *, y7sdgtqi, atujnxb8, yxiwebc5, k7hulceq, p2, kpzavbj3, ydcnh9xl, 
+     *ajul8wkv)
+      implicit logical (a-z)
+      integer xui7hqwl(19), tgiyxdw1(*), dufozmt7(*)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      integer vtsou9pz
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1
+     *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), ghz9vuba(kuz
+     *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1
+     *lo), w8znmyce(br5ovgcj,*)
+      double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(wy1vqfzu,kuzxj1lo,*), wkmm(wy1vqfzu*(wy1vqfzu
+     *+1))
+      integer p2
+      double precision atujnxb8(kuzxj1lo,p2), yxiwebc5(kuzxj1lo,*), k7hu
+     *lceq(p2,*), kpzavbj3(p2,*), ydcnh9xl, ajul8wkv(*)
+      integer ayfnwr1v, xvr7bonh, hpmwnav2, xwdf5ltg, idlosrw8, gp1jxzuh
+     *, exrkcn5d, wr0lbopv
+      double precision summ, dev0
+      xwdf5ltg = xui7hqwl(1)
+      idlosrw8 = xui7hqwl(5)
+      vtsou9pz = xui7hqwl(12)
+      exrkcn5d = xui7hqwl(13)
+      wr0lbopv = xui7hqwl(18)
+      do23656 hpmwnav2=1,xwdf5ltg 
+      do23658 ayfnwr1v=1,kuzxj1lo 
+      summ = 0.0d0
+      do23660 xvr7bonh=1,p2 
+      summ = summ + atujnxb8(ayfnwr1v,xvr7bonh) * k7hulceq(xvr7bonh,hpmw
+     *nav2)
+23660 continue
+23661 continue
+      yxiwebc5(ayfnwr1v,hpmwnav2) = summ
+      lncwkfq7(ayfnwr1v,hpmwnav2) = summ
+23658 continue
+23659 continue
+23656 continue
+23657 continue
+      if(vtsou9pz.eq.1)then
+      call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd
+     *gtqi)
+      else
+      call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd
+     *gtqi)
+      endif
+      do23664 xvr7bonh=1,p2 
+      do23666 ayfnwr1v=1,kuzxj1lo 
+      atujnxb8(ayfnwr1v,xvr7bonh) = ydcnh9xl * atujnxb8(ayfnwr1v,xvr7bon
+     *h)
+23666 continue
+23667 continue
+23664 continue
+23665 continue
+      do23668 hpmwnav2=1,xwdf5ltg 
+      do23670 xvr7bonh=1,p2 
+      do23672 ayfnwr1v=1,kuzxj1lo 
+      lncwkfq7(ayfnwr1v,hpmwnav2)=yxiwebc5(ayfnwr1v,hpmwnav2)+atujnxb8(a
+     *yfnwr1v,xvr7bonh)
+23672 continue
+23673 continue
+      xui7hqwl(5) = 2
+      do23674 gp1jxzuh=1,exrkcn5d 
+      beta(gp1jxzuh) = ajul8wkv(gp1jxzuh)
+23674 continue
+23675 continue
+      if(vtsou9pz.eq.1)then
+      call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd
+     *gtqi)
+      else
+      call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd
+     *gtqi)
+      endif
+      if(zjkrtol8 .ne. 0)then
+      return
+      endif
+      kpzavbj3(xvr7bonh,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl
+23670 continue
+23671 continue
+      if(xwdf5ltg .gt. 1)then
+      do23682 ayfnwr1v=1,kuzxj1lo 
+      lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2)
+23682 continue
+23683 continue
+      endif
+23668 continue
+23669 continue
+      xui7hqwl(5) = idlosrw8
+      return
+      end
+      subroutine vdcaof(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, gh
+     *z9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo
+     *, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes
+     *, beta, twk, wkmm, y7sdgtqi, atujnxb8, yxiwebc5, k7hulceq, p2, kpz
+     *avbj3, ajul8wkv, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef, which
+     *, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2ktyr, jn
+     *xpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, work3, sg
+     *dub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3, acpio
+     *s9q, itwk, jwbkl9fp)
+      implicit logical (a-z)
+      integer xui7hqwl(19)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      integer vtsou9pz
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns
+     *,kuzxj1lo), ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfz
+     *u), wpuarq2m(dimu,kuzxj1lo)
+      double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(wy1vqfzu,kuzxj1lo,*)
+      double precision wkmm(wy1vqfzu*(wy1vqfzu+1))
+      integer p2
+      double precision atujnxb8(kuzxj1lo,p2), yxiwebc5(kuzxj1lo,*), k7hu
+     *lceq(p2,*), kpzavbj3(p2,*), ydcnh9xl, ajul8wkv(*)
+      integer ayfnwr1v, pp, hpmwnav2, xwdf5ltg, idlosrw8, exrkcn5d, wr0l
+     *bopv
+      double precision summ, dev0
+      integer psdvgce3(15), qfozcl5b, ezlgm2up(*),nef(*),which(*), jnxpu
+     *ym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*), nbzjkpi3(2), acpios
+     *9q(*), itwk(*), jwbkl9fp(2)
+      double precision kiye1wjz(*)
+      double precision ub4xioar(qfozcl5b,kuzxj1lo), kispwgx3(kuzxj1lo,*)
+     *,s0(wy1vqfzu), zyodca3j(qfozcl5b,kuzxj1lo)
+      double precision lxyst1eb(qfozcl5b,kuzxj1lo), mbvnaor6(kuzxj1lo,*)
+     *, hjm2ktyr(qfozcl5b,*), work1(*), wk2(kuzxj1lo,qfozcl5b), work3(*)
+     *, sgdub(*), bmb(*), ifys6woa(*), mwk(*), rpyis2kc(*), zv2xfhei(*),
+     * resss
+      integer irhm4cfa
+      double precision zpcqv3uj
+      resss = 0.0d0
+      irhm4cfa = 0
+      xwdf5ltg = xui7hqwl(1)
+      idlosrw8 = xui7hqwl(5)
+      vtsou9pz = xui7hqwl(12)
+      exrkcn5d = xui7hqwl(13)
+      wr0lbopv = xui7hqwl(18)
+      zpcqv3uj = y7sdgtqi(3+afpc0kns+afpc0kns+2)
+      ydcnh9xl = y7sdgtqi(3+afpc0kns+afpc0kns+3)
+      do23684 hpmwnav2=1,xwdf5ltg 
+      do23686 ayfnwr1v=1,kuzxj1lo 
+      summ = 0.0d0
+      do23688 pp=1,p2 
+      summ = summ + atujnxb8(ayfnwr1v,pp) * k7hulceq(pp,hpmwnav2)
+23688 continue
+23689 continue
+      yxiwebc5(ayfnwr1v,hpmwnav2) = summ
+      lncwkfq7(ayfnwr1v,hpmwnav2) = summ
+23686 continue
+23687 continue
+23684 continue
+23685 continue
+      if(vtsou9pz.eq.1)then
+      call vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba
+     *, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo, wy1v
+     *qfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, dev0, ajul8wkv
+     *, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef,
+     * which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2kt
+     *yr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, wor
+     *k3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3,
+     * acpios9q, itwk, jwbkl9fp)
+      y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss
+      else
+      endif
+      do23692 pp=1,p2 
+      do23694 ayfnwr1v=1,kuzxj1lo 
+      atujnxb8(ayfnwr1v,pp) = ydcnh9xl * atujnxb8(ayfnwr1v,pp)
+23694 continue
+23695 continue
+23692 continue
+23693 continue
+      do23696 hpmwnav2=1,xwdf5ltg 
+      do23698 pp=1,p2 
+      do23700 ayfnwr1v=1,kuzxj1lo 
+      lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) + atujnx
+     *b8(ayfnwr1v,pp)
+23700 continue
+23701 continue
+      xui7hqwl(5) = 0
+      if(vtsou9pz.eq.1)then
+      call vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba
+     *, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo, wy1v
+     *qfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes, beta
+     *, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef,
+     * which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2kt
+     *yr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, wor
+     *k3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3,
+     * acpios9q, itwk, jwbkl9fp)
+      y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss
+      else
+      endif
+      if(zjkrtol8 .ne. 0)then
+      return
+      endif
+      kpzavbj3(pp,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl
+23698 continue
+23699 continue
+      if(xwdf5ltg .gt. 1)then
+      do23708 ayfnwr1v=1,kuzxj1lo 
+      lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2)
+23708 continue
+23709 continue
+      endif
+23696 continue
+23697 continue
+      xui7hqwl(5) = idlosrw8
+      return
+      end
+      subroutine duqof(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4
+     *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj,
+     * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu,
+     * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm
+     *, y7sdgtqi, yxiwebc5, kpzavbj3, ydcnh9xl, ajul8wkv)
+      implicit logical (a-z)
+      integer xui7hqwl(19), tgiyxdw1(*), dufozmt7(*)
+      integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge
+     *s1xpkr(*)
+      integer vtsou9pz
+      double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns)
+     *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1
+     *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), ghz9vuba(kuz
+     *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1
+     *lo), w8znmyce(br5ovgcj,*)
+      double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta
+     *(*), y7sdgtqi(*)
+      double precision twk(wy1vqfzu,kuzxj1lo,*), wkmm(wy1vqfzu*(wy1vqfzu
+     *+1))
+      double precision yxiwebc5(kuzxj1lo,*), kpzavbj3(kuzxj1lo,*), ydcnh
+     *9xl, ajul8wkv(*)
+      integer ayfnwr1v, hpmwnav2, xwdf5ltg, idlosrw8, gp1jxzuh, exrkcn5d
+      double precision dev0
+      xwdf5ltg = xui7hqwl(1)
+      idlosrw8 = xui7hqwl(5)
+      vtsou9pz = xui7hqwl(12)
+      exrkcn5d = xui7hqwl(13)
+      if(vtsou9pz.eq.1)then
+      call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd
+     *gtqi)
+      else
+      call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd
+     *gtqi)
+      endif
+      do23712 hpmwnav2=1,xwdf5ltg 
+      do23714 ayfnwr1v=1,kuzxj1lo 
+      lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) + ydcnh9
+     *xl
+      xui7hqwl(5) = 2
+      do23716 gp1jxzuh=1,exrkcn5d 
+      beta(gp1jxzuh) = ajul8wkv(gp1jxzuh)
+23716 continue
+23717 continue
+      if(vtsou9pz.eq.1)then
+      call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd
+     *gtqi)
+      else
+      call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb,
+     * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk
+     *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt
+     *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd
+     *gtqi)
+      endif
+      if(zjkrtol8 .ne. 0)then
+      return
+      endif
+      kpzavbj3(ayfnwr1v,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl
+      lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2)
+23714 continue
+23715 continue
+23712 continue
+23713 continue
+      xui7hqwl(5) = idlosrw8
+      return
+      end
diff --git a/src/ei.f b/src/ei.f
new file mode 100644
index 0000000..5575ee2
--- /dev/null
+++ b/src/ei.f
@@ -0,0 +1,535 @@
+      SUBROUTINE calcei(ARG,RESULT,INT)
+C----------------------------------------------------------------------
+C
+C This Fortran 77 packet computes the exponential integrals Ei(x),
+C  E1(x), and  exp(-x)*Ei(x)  for real arguments  x  where
+C
+C           integral (from t=-infinity to t=x) (exp(t)/t),  x > 0,
+C  Ei(x) =
+C          -integral (from t=-x to t=infinity) (exp(t)/t),  x < 0,
+C
+C  and where the first integral is a principal value integral.
+C  The packet contains three function type subprograms: EI, EONE,
+C  and EXPEI;  and one subroutine type subprogram: CALCEI.  The
+C  calling statements for the primary entries are
+C
+C                 Y = EI(X),            where  X .NE. 0,
+C
+C                 Y = EONE(X),          where  X .GT. 0,
+C  and
+C                 Y = EXPEI(X),         where  X .NE. 0,
+C
+C  and where the entry points correspond to the functions Ei(x),
+C  E1(x), and exp(-x)*Ei(x), respectively.  The routine CALCEI
+C  is intended for internal packet use only, all computations within
+C  the packet being concentrated in this routine.  The function
+C  subprograms invoke CALCEI with the Fortran statement
+C         CALL CALCEI(ARG,RESULT,INT)
+C  where the parameter usage is as follows
+C
+C     Function                  Parameters for CALCEI
+C       Call                 ARG             RESULT         INT
+C
+C      EI(X)              X .NE. 0          Ei(X)            1
+C      EONE(X)            X .GT. 0         -Ei(-X)           2
+C      EXPEI(X)           X .NE. 0          exp(-X)*Ei(X)    3
+C
+C  The main computation involves evaluation of rational Chebyshev
+C  approximations published in Math. Comp. 22, 641-649 (1968), and
+C  Math. Comp. 23, 289-303 (1969) by Cody and Thacher.  This
+C  transportable program is patterned after the machine-dependent
+C  FUNPACK packet  NATSEI,  but cannot match that version for
+C  efficiency or accuracy.  This version uses rational functions
+C  that theoretically approximate the exponential integrals to
+C  at least 18 significant decimal digits.  The accuracy achieved
+C  depends on the arithmetic system, the compiler, the intrinsic
+C  functions, and proper selection of the machine-dependent
+C  constants.
+C
+C
+C*******************************************************************
+C*******************************************************************
+C
+C Explanation of machine-dependent constants
+C
+C   beta = radix for the floating-point system.
+C   minexp = smallest representable power of beta.
+C   maxexp = smallest power of beta that overflows.
+C   XBIG = largest argument acceptable to EONE; solution to
+C          equation:
+C                     exp(-x)/x * (1 + 1/x) = beta ** minexp.
+C   XINF = largest positive machine number; approximately
+C                     beta ** maxexp
+C   XMAX = largest argument acceptable to EI; solution to
+C          equation:  exp(x)/x * (1 + 1/x) = beta ** maxexp.
+C
+C     Approximate values for some important machines are:
+C
+C                           beta      minexp      maxexp
+C
+C  CRAY-1        (S.P.)       2       -8193        8191
+C  Cyber 180/185 
+C    under NOS   (S.P.)       2        -975        1070
+C  IEEE (IBM/XT,
+C    SUN, etc.)  (S.P.)       2        -126         128
+C  IEEE (IBM/XT,
+C    SUN, etc.)  (D.P.)       2       -1022        1024
+C  IBM 3033      (D.P.)      16         -65          63
+C  VAX D-Format  (D.P.)       2        -128         127
+C  VAX G-Format  (D.P.)       2       -1024        1023
+C
+C                           XBIG       XINF       XMAX
+C
+C  CRAY-1        (S.P.)    5670.31  5.45E+2465   5686.21
+C  Cyber 180/185 
+C    under NOS   (S.P.)     669.31  1.26E+322     748.28
+C  IEEE (IBM/XT,
+C    SUN, etc.)  (S.P.)      82.93  3.40E+38       93.24
+C  IEEE (IBM/XT,
+C    SUN, etc.)  (D.P.)     701.84  1.79D+308     716.35
+C  IBM 3033      (D.P.)     175.05  7.23D+75      179.85
+C  VAX D-Format  (D.P.)      84.30  1.70D+38       92.54
+C  VAX G-Format  (D.P.)     703.22  8.98D+307     715.66
+C
+C*******************************************************************
+C*******************************************************************
+C
+C Error returns
+C
+C  The following table shows the types of error that may be
+C  encountered in this routine and the function value supplied
+C  in each case.
+C
+C       Error       Argument         Function values for
+C                    Range         EI      EXPEI     EONE
+C
+C     UNDERFLOW  (-)X .GT. XBIG     0        -         0
+C     OVERFLOW      X .GE. XMAX    XINF      -         -
+C     ILLEGAL X       X = 0       -XINF    -XINF     XINF
+C     ILLEGAL X      X .LT. 0       -        -     USE ABS(X)
+C
+C Intrinsic functions required are:
+C
+C     ABS, SQRT, EXP
+C
+C
+C  Author: W. J. Cody
+C          Mathematics abd Computer Science Division
+C          Argonne National Laboratory
+C          Argonne, IL 60439
+C
+C  Latest modification: September 9, 1988
+C
+C----------------------------------------------------------------------
+      INTEGER I,INT
+CS    REAL
+      DOUBLE PRECISION 
+     1       A,ARG,B,C,D,EXP40,E,EI,F,FOUR,FOURTY,FRAC,HALF,ONE,P,
+     2       PLG,PX,P037,P1,P2,Q,QLG,QX,Q1,Q2,R,RESULT,S,SIX,SUMP,
+     3       SUMQ,T,THREE,TWELVE,TWO,TWO4,W,X,XBIG,XINF,XMAX,XMX0,
+     4       X0,X01,X02,X11,Y,YSQ,ZERO
+      DIMENSION  A(7),B(6),C(9),D(9),E(10),F(10),P(10),Q(10),R(10),
+     1   S(9),P1(10),Q1(9),P2(10),Q2(9),PLG(4),QLG(4),PX(10),QX(10)
+C----------------------------------------------------------------------
+C  Mathematical constants
+C   EXP40 = exp(40)
+C   X0 = zero of Ei
+C   X01/X11 + X02 = zero of Ei to extra precision
+C----------------------------------------------------------------------
+CS    DATA ZERO,P037,HALF,ONE,TWO/0.0E0,0.037E0,0.5E0,1.0E0,2.0E0/,
+CS   1     THREE,FOUR,SIX,TWELVE,TWO4/3.0E0,4.0E0,6.0E0,12.E0,24.0E0/,
+CS   2     FOURTY,EXP40/40.0E0,2.3538526683701998541E17/,
+CS   3     X01,X11,X02/381.5E0,1024.0E0,-5.1182968633365538008E-5/,
+CS   4     X0/3.7250741078136663466E-1/
+      DATA ZERO,P037,HALF,ONE,TWO/0.0D0,0.037D0,0.5D0,1.0D0,2.0D0/,
+     1     THREE,FOUR,SIX,TWELVE,TWO4/3.0D0,4.0D0,6.0D0,12.D0,24.0D0/,
+     2     FOURTY,EXP40/40.0D0,2.3538526683701998541D17/,
+     3     X01,X11,X02/381.5D0,1024.0D0,-5.1182968633365538008D-5/,
+     4     X0/3.7250741078136663466D-1/
+C----------------------------------------------------------------------
+C Machine-dependent constants
+C----------------------------------------------------------------------
+CS    DATA XINF/3.40E+38/,XMAX/93.246E0/,XBIG/82.93E0/
+      DATA XINF/1.79D+308/,XMAX/716.351D0/,XBIG/701.84D0/
+C----------------------------------------------------------------------
+C Coefficients  for -1.0 <= X < 0.0
+C----------------------------------------------------------------------
+CS    DATA A/1.1669552669734461083368E2, 2.1500672908092918123209E3,
+CS   1       1.5924175980637303639884E4, 8.9904972007457256553251E4,
+CS   2       1.5026059476436982420737E5,-1.4815102102575750838086E5,
+CS   3       5.0196785185439843791020E0/
+CS    DATA B/4.0205465640027706061433E1, 7.5043163907103936624165E2,
+CS   1       8.1258035174768735759855E3, 5.2440529172056355429883E4,
+CS   2       1.8434070063353677359298E5, 2.5666493484897117319268E5/
+      DATA A/1.1669552669734461083368D2, 2.1500672908092918123209D3,
+     1       1.5924175980637303639884D4, 8.9904972007457256553251D4,
+     2       1.5026059476436982420737D5,-1.4815102102575750838086D5,
+     3       5.0196785185439843791020D0/
+      DATA B/4.0205465640027706061433D1, 7.5043163907103936624165D2,
+     1       8.1258035174768735759855D3, 5.2440529172056355429883D4,
+     2       1.8434070063353677359298D5, 2.5666493484897117319268D5/
+C----------------------------------------------------------------------
+C Coefficients for -4.0 <= X < -1.0
+C----------------------------------------------------------------------
+CS    DATA C/3.828573121022477169108E-1, 1.107326627786831743809E+1,
+CS   1       7.246689782858597021199E+1, 1.700632978311516129328E+2,
+CS   2       1.698106763764238382705E+2, 7.633628843705946890896E+1,
+CS   3       1.487967702840464066613E+1, 9.999989642347613068437E-1,
+CS   4       1.737331760720576030932E-8/
+CS    DATA D/8.258160008564488034698E-2, 4.344836335509282083360E+0,
+CS   1       4.662179610356861756812E+1, 1.775728186717289799677E+2,
+CS   2       2.953136335677908517423E+2, 2.342573504717625153053E+2,
+CS   3       9.021658450529372642314E+1, 1.587964570758947927903E+1,
+CS   4       1.000000000000000000000E+0/
+      DATA C/3.828573121022477169108D-1, 1.107326627786831743809D+1,
+     1       7.246689782858597021199D+1, 1.700632978311516129328D+2,
+     2       1.698106763764238382705D+2, 7.633628843705946890896D+1,
+     3       1.487967702840464066613D+1, 9.999989642347613068437D-1,
+     4       1.737331760720576030932D-8/
+      DATA D/8.258160008564488034698D-2, 4.344836335509282083360D+0,
+     1       4.662179610356861756812D+1, 1.775728186717289799677D+2,
+     2       2.953136335677908517423D+2, 2.342573504717625153053D+2,
+     3       9.021658450529372642314D+1, 1.587964570758947927903D+1,
+     4       1.000000000000000000000D+0/
+C----------------------------------------------------------------------
+C Coefficients for X < -4.0
+C----------------------------------------------------------------------
+CS    DATA E/1.3276881505637444622987E+2,3.5846198743996904308695E+4,
+CS   1       1.7283375773777593926828E+5,2.6181454937205639647381E+5,
+CS   2       1.7503273087497081314708E+5,5.9346841538837119172356E+4,
+CS   3       1.0816852399095915622498E+4,1.0611777263550331766871E03,
+CS   4       5.2199632588522572481039E+1,9.9999999999999999087819E-1/
+CS    DATA F/3.9147856245556345627078E+4,2.5989762083608489777411E+5,
+CS   1       5.5903756210022864003380E+5,5.4616842050691155735758E+5,
+CS   2       2.7858134710520842139357E+5,7.9231787945279043698718E+4,
+CS   3       1.2842808586627297365998E+4,1.1635769915320848035459E+3,
+CS   4       5.4199632588522559414924E+1,1.0E0/
+      DATA E/1.3276881505637444622987D+2,3.5846198743996904308695D+4,
+     1       1.7283375773777593926828D+5,2.6181454937205639647381D+5,
+     2       1.7503273087497081314708D+5,5.9346841538837119172356D+4,
+     3       1.0816852399095915622498D+4,1.0611777263550331766871D03,
+     4       5.2199632588522572481039D+1,9.9999999999999999087819D-1/
+      DATA F/3.9147856245556345627078D+4,2.5989762083608489777411D+5,
+     1       5.5903756210022864003380D+5,5.4616842050691155735758D+5,
+     2       2.7858134710520842139357D+5,7.9231787945279043698718D+4,
+     3       1.2842808586627297365998D+4,1.1635769915320848035459D+3,
+     4       5.4199632588522559414924D+1,1.0D0/
+C----------------------------------------------------------------------
+C  Coefficients for rational approximation to ln(x/a), |1-x/a| < .1
+C----------------------------------------------------------------------
+CS    DATA PLG/-2.4562334077563243311E+01,2.3642701335621505212E+02,
+CS   1         -5.4989956895857911039E+02,3.5687548468071500413E+02/
+CS    DATA QLG/-3.5553900764052419184E+01,1.9400230218539473193E+02,
+CS   1         -3.3442903192607538956E+02,1.7843774234035750207E+02/
+      DATA PLG/-2.4562334077563243311D+01,2.3642701335621505212D+02,
+     1         -5.4989956895857911039D+02,3.5687548468071500413D+02/
+      DATA QLG/-3.5553900764052419184D+01,1.9400230218539473193D+02,
+     1         -3.3442903192607538956D+02,1.7843774234035750207D+02/
+C----------------------------------------------------------------------
+C Coefficients for  0.0 < X < 6.0,
+C  ratio of Chebyshev polynomials
+C----------------------------------------------------------------------
+CS    DATA P/-1.2963702602474830028590E01,-1.2831220659262000678155E03,
+CS   1       -1.4287072500197005777376E04,-1.4299841572091610380064E06,
+CS   2       -3.1398660864247265862050E05,-3.5377809694431133484800E08,
+CS   3        3.1984354235237738511048E08,-2.5301823984599019348858E10,
+CS   4        1.2177698136199594677580E10,-2.0829040666802497120940E11/
+CS    DATA Q/ 7.6886718750000000000000E01,-5.5648470543369082846819E03,
+CS   1        1.9418469440759880361415E05,-4.2648434812177161405483E06,
+CS   2        6.4698830956576428587653E07,-7.0108568774215954065376E08,
+CS   3        5.4229617984472955011862E09,-2.8986272696554495342658E10,
+CS   4        9.8900934262481749439886E10,-8.9673749185755048616855E10/
+      DATA P/-1.2963702602474830028590D01,-1.2831220659262000678155D03,
+     1       -1.4287072500197005777376D04,-1.4299841572091610380064D06,
+     2       -3.1398660864247265862050D05,-3.5377809694431133484800D08,
+     3        3.1984354235237738511048D08,-2.5301823984599019348858D10,
+     4        1.2177698136199594677580D10,-2.0829040666802497120940D11/
+      DATA Q/ 7.6886718750000000000000D01,-5.5648470543369082846819D03,
+     1        1.9418469440759880361415D05,-4.2648434812177161405483D06,
+     2        6.4698830956576428587653D07,-7.0108568774215954065376D08,
+     3        5.4229617984472955011862D09,-2.8986272696554495342658D10,
+     4        9.8900934262481749439886D10,-8.9673749185755048616855D10/
+C----------------------------------------------------------------------
+C J-fraction coefficients for 6.0 <= X < 12.0
+C----------------------------------------------------------------------
+CS    DATA R/-2.645677793077147237806E00,-2.378372882815725244124E00,
+CS   1       -2.421106956980653511550E01, 1.052976392459015155422E01,
+CS   2        1.945603779539281810439E01,-3.015761863840593359165E01,
+CS   3        1.120011024227297451523E01,-3.988850730390541057912E00,
+CS   4        9.565134591978630774217E00, 9.981193787537396413219E-1/
+CS    DATA S/ 1.598517957704779356479E-4, 4.644185932583286942650E00,
+CS   1        3.697412299772985940785E02,-8.791401054875438925029E00,
+CS   2        7.608194509086645763123E02, 2.852397548119248700147E01,
+CS   3        4.731097187816050252967E02,-2.369210235636181001661E02,
+CS   4        1.249884822712447891440E00/
+      DATA R/-2.645677793077147237806D00,-2.378372882815725244124D00,
+     1       -2.421106956980653511550D01, 1.052976392459015155422D01,
+     2        1.945603779539281810439D01,-3.015761863840593359165D01,
+     3        1.120011024227297451523D01,-3.988850730390541057912D00,
+     4        9.565134591978630774217D00, 9.981193787537396413219D-1/
+      DATA S/ 1.598517957704779356479D-4, 4.644185932583286942650D00,
+     1        3.697412299772985940785D02,-8.791401054875438925029D00,
+     2        7.608194509086645763123D02, 2.852397548119248700147D01,
+     3        4.731097187816050252967D02,-2.369210235636181001661D02,
+     4        1.249884822712447891440D00/
+C----------------------------------------------------------------------
+C J-fraction coefficients for 12.0 <= X < 24.0
+C----------------------------------------------------------------------
+CS    DATA P1/-1.647721172463463140042E00,-1.860092121726437582253E01,
+CS   1        -1.000641913989284829961E01,-2.105740799548040450394E01,
+CS   2        -9.134835699998742552432E-1,-3.323612579343962284333E01,
+CS   3         2.495487730402059440626E01, 2.652575818452799819855E01,
+CS   4        -1.845086232391278674524E00, 9.999933106160568739091E-1/
+CS    DATA Q1/ 9.792403599217290296840E01, 6.403800405352415551324E01,
+CS   1         5.994932325667407355255E01, 2.538819315630708031713E02,
+CS   2         4.429413178337928401161E01, 1.192832423968601006985E03,
+CS   3         1.991004470817742470726E02,-1.093556195391091143924E01,
+CS   4         1.001533852045342697818E00/
+      DATA P1/-1.647721172463463140042D00,-1.860092121726437582253D01,
+     1        -1.000641913989284829961D01,-2.105740799548040450394D01,
+     2        -9.134835699998742552432D-1,-3.323612579343962284333D01,
+     3         2.495487730402059440626D01, 2.652575818452799819855D01,
+     4        -1.845086232391278674524D00, 9.999933106160568739091D-1/
+      DATA Q1/ 9.792403599217290296840D01, 6.403800405352415551324D01,
+     1         5.994932325667407355255D01, 2.538819315630708031713D02,
+     2         4.429413178337928401161D01, 1.192832423968601006985D03,
+     3         1.991004470817742470726D02,-1.093556195391091143924D01,
+     4         1.001533852045342697818D00/
+C----------------------------------------------------------------------
+C J-fraction coefficients for  X .GE. 24.0
+C----------------------------------------------------------------------
+CS    DATA P2/ 1.75338801265465972390E02,-2.23127670777632409550E02,
+CS   1        -1.81949664929868906455E01,-2.79798528624305389340E01,
+CS   2        -7.63147701620253630855E00,-1.52856623636929636839E01,
+CS   3        -7.06810977895029358836E00,-5.00006640413131002475E00,
+CS   4        -3.00000000320981265753E00, 1.00000000000000485503E00/
+CS    DATA Q2/ 3.97845977167414720840E04, 3.97277109100414518365E00,
+CS   1         1.37790390235747998793E02, 1.17179220502086455287E02,
+CS   2         7.04831847180424675988E01,-1.20187763547154743238E01,
+CS   3        -7.99243595776339741065E00,-2.99999894040324959612E00,
+CS   4         1.99999999999048104167E00/
+      DATA P2/ 1.75338801265465972390D02,-2.23127670777632409550D02,
+     1        -1.81949664929868906455D01,-2.79798528624305389340D01,
+     2        -7.63147701620253630855D00,-1.52856623636929636839D01,
+     3        -7.06810977895029358836D00,-5.00006640413131002475D00,
+     4        -3.00000000320981265753D00, 1.00000000000000485503D00/
+      DATA Q2/ 3.97845977167414720840D04, 3.97277109100414518365D00,
+     1         1.37790390235747998793D02, 1.17179220502086455287D02,
+     2         7.04831847180424675988D01,-1.20187763547154743238D01,
+     3        -7.99243595776339741065D00,-2.99999894040324959612D00,
+     4         1.99999999999048104167D00/
+C----------------------------------------------------------------------
+      X = ARG
+      IF (X .EQ. ZERO) THEN
+            EI = -XINF
+            IF (INT .EQ. 2) EI = -EI
+         ELSE IF ((X .LT. ZERO) .OR. (INT .EQ. 2)) THEN 
+C----------------------------------------------------------------------
+C Calculate EI for negative argument or for E1.
+C----------------------------------------------------------------------
+            Y = ABS(X)
+            IF (Y .LE. ONE) THEN
+                  SUMP = A(7) * Y + A(1)
+                  SUMQ = Y + B(1)
+                  DO 110 I = 2, 6
+                     SUMP = SUMP * Y + A(I)
+                     SUMQ = SUMQ * Y + B(I)
+  110             CONTINUE
+                  EI = LOG(Y) - SUMP / SUMQ
+                  IF (INT .EQ. 3) EI = EI * EXP(Y)
+               ELSE IF (Y .LE. FOUR) THEN
+                  W = ONE / Y
+                  SUMP = C(1)
+                  SUMQ = D(1)
+                  DO 130 I = 2, 9
+                     SUMP = SUMP * W + C(I)
+                     SUMQ = SUMQ * W + D(I)
+  130             CONTINUE
+                  EI = - SUMP / SUMQ
+                  IF (INT .NE. 3) EI = EI * EXP(-Y)
+               ELSE
+                  IF ((Y .GT. XBIG) .AND. (INT .LT. 3)) THEN
+                        EI = ZERO
+                     ELSE
+                        W = ONE / Y
+                        SUMP = E(1) 
+                        SUMQ = F(1)
+                        DO 150 I = 2, 10
+                           SUMP = SUMP * W + E(I)
+                           SUMQ = SUMQ * W + F(I)
+  150                   CONTINUE
+                        EI = -W * (ONE - W * SUMP / SUMQ )
+                        IF (INT .NE. 3) EI = EI * EXP(-Y)
+                  END IF
+            END IF
+            IF (INT .EQ. 2) EI = -EI
+         ELSE IF (X .LT. SIX) THEN
+C----------------------------------------------------------------------
+C  To improve conditioning, rational approximations are expressed
+C    in terms of Chebyshev polynomials for 0 <= X < 6, and in
+C    continued fraction form for larger X.
+C----------------------------------------------------------------------
+            T = X + X
+            T = T / THREE - TWO
+            PX(1) = ZERO
+            QX(1) = ZERO
+            PX(2) = P(1)
+            QX(2) = Q(1)
+            DO 210 I = 2, 9
+               PX(I+1) = T * PX(I) - PX(I-1) + P(I)
+               QX(I+1) = T * QX(I) - QX(I-1) + Q(I)
+  210       CONTINUE
+            SUMP = HALF * T * PX(10) - PX(9) + P(10)
+            SUMQ = HALF * T * QX(10) - QX(9) + Q(10)
+            FRAC = SUMP / SUMQ
+            XMX0 = (X - X01/X11) - X02
+            IF (ABS(XMX0) .GE. P037) THEN
+                  EI = LOG(X/X0) + XMX0 * FRAC
+                  IF (INT .EQ. 3) EI = EXP(-X) * EI
+               ELSE
+C----------------------------------------------------------------------
+C Special approximation to  ln(X/X0)  for X close to X0
+C----------------------------------------------------------------------
+                  Y = XMX0 / (X + X0)
+                  YSQ = Y*Y
+                  SUMP = PLG(1)
+                  SUMQ = YSQ + QLG(1)
+                  DO 220 I = 2, 4
+                     SUMP = SUMP*YSQ + PLG(I)
+                     SUMQ = SUMQ*YSQ + QLG(I)
+  220             CONTINUE
+                  EI = (SUMP / (SUMQ*(X+X0)) + FRAC) * XMX0
+                  IF (INT .EQ. 3) EI = EXP(-X) * EI
+            END IF
+         ELSE IF (X .LT. TWELVE) THEN
+            FRAC = ZERO
+            DO 230 I = 1, 9
+               FRAC = S(I) / (R(I) + X + FRAC)
+  230       CONTINUE
+            EI = (R(10) + FRAC) / X
+            IF (INT .NE. 3) EI = EI * EXP(X)
+         ELSE IF (X .LE. TWO4) THEN
+            FRAC = ZERO
+            DO 240 I = 1, 9
+               FRAC = Q1(I) / (P1(I) + X + FRAC)
+  240       CONTINUE
+            EI = (P1(10) + FRAC) / X
+            IF (INT .NE. 3) EI = EI * EXP(X)
+         ELSE
+            IF ((X .GE. XMAX) .AND. (INT .LT. 3)) THEN
+                  EI = XINF
+               ELSE
+                  Y = ONE / X
+                  FRAC = ZERO
+                  DO 250 I = 1, 9
+                     FRAC = Q2(I) / (P2(I) + X + FRAC)
+  250             CONTINUE
+                  FRAC = P2(10) + FRAC
+                  EI = Y + Y * Y * FRAC
+                  IF (INT .NE. 3) THEN
+                        IF (X .LE. XMAX-TWO4) THEN
+                              EI = EI * EXP(X)
+                           ELSE
+C----------------------------------------------------------------------
+C Calculation reformulated to avoid premature overflow
+C----------------------------------------------------------------------
+                              EI = (EI * EXP(X-FOURTY)) * EXP40
+                        END IF
+                  END IF
+            END IF
+      END IF
+      RESULT = EI
+      RETURN
+C---------- Last line of CALCEI ----------
+      END
+
+
+
+      SUBROUTINE einlib(X, RESULT)
+C     FUNCTION   EINLIB(X)
+C--------------------------------------------------------------------
+C
+C This function program computes approximate values for the
+C   exponential integral  Ei(x), where  x  is real.
+C
+C  Author: W. J. Cody
+C
+C  Latest modification: January 12, 1988
+C  Latest modification: 20130629 by TWY
+C
+C--------------------------------------------------------------------
+      INTEGER INT
+CS    REAL  EI
+CS    REAL  X
+CS    REAL  RESULT
+      DOUBLE PRECISION  X
+CD    DOUBLE PRECISION  EI
+      DOUBLE PRECISION  RESULT
+C--------------------------------------------------------------------
+      INT = 1
+      CALL calcei(X,RESULT,INT)
+CD    EI = RESULT
+      RETURN
+C---------- Last line of EI ----------
+      END
+
+
+
+      SUBROUTINE expeinl(X, RESULT)
+C     FUNCTION   EXPEINL(X)
+C--------------------------------------------------------------------
+C
+C This function program computes approximate values for the
+C   function  exp(-x) * Ei(x), where  Ei(x)  is the exponential
+C   integral, and  x  is real.
+C
+C  Author: W. J. Cody
+C
+C  Latest modification: January 12, 1988
+C  Latest modification: 20130629 by TWY
+C
+C--------------------------------------------------------------------
+      INTEGER INT
+CS    REAL  EXPEI
+CS    REAL  X
+CS    REAL  RESULT
+CD    DOUBLE PRECISION  EXPEI
+      DOUBLE PRECISION  X
+      DOUBLE PRECISION  RESULT
+C--------------------------------------------------------------------
+      INT = 3
+      CALL calcei(X,RESULT,INT)
+CD    EXPEI = RESULT
+      RETURN
+C---------- Last line of EXPEI ----------
+      END
+
+
+
+      SUBROUTINE eonenl(X, RESULT)
+C     FUNCTION   EONENL(X)
+C--------------------------------------------------------------------
+C
+C This function program computes approximate values for the
+C   exponential integral E1(x), where  x  is real.
+C
+C  Author: W. J. Cody
+C
+C  Latest modification: January 12, 1988
+C  Latest modification: 20130629 by TWY
+C
+C--------------------------------------------------------------------
+      INTEGER INT
+CS    REAL  EONE
+CS    REAL  X
+CS    REAL  RESULT
+CD    DOUBLE PRECISION  EONE
+      DOUBLE PRECISION  X
+      DOUBLE PRECISION  RESULT
+C--------------------------------------------------------------------
+      INT = 2
+      CALL calcei(X,RESULT,INT)
+CD    EONE = RESULT
+      RETURN
+C---------- Last line of EONE ----------
+      END
diff --git a/src/lms.f b/src/lms.f
index 9ad9072..6b7edf4 100644
--- a/src/lms.f
+++ b/src/lms.f
@@ -1,5 +1,6 @@
-      subroutine dpdlyjn(psi, i9mwnvqt, mymu, sigma, kpzavbj3ative, 
-     &lfu2qhid)
+C Output from Public domain Ratfor, version 1.01
+      subroutine dpdlyjn(psi, i9mwnvqt, mymu, sigma, kpzavbj3ative, lfu2
+     *qhid)
       implicit logical (a-z)
       integer kpzavbj3ative
       double precision psi, i9mwnvqt, mymu, sigma, lfu2qhid(3)
@@ -12,196 +13,193 @@
       mymu = 0.0d0
       sigma = 1.0d0
       cc = (psi .ge. 0.0d0)
-      if(.not.(cc))goto 23000
+      if(cc)then
       bb = i9mwnvqt
       pos = (dabs(i9mwnvqt) .le. n3iasxug)
-      goto 23001
-23000 continue
+      else
       bb = -2.0d0 + i9mwnvqt
       pos = (dabs(i9mwnvqt-2.0d0) .le. n3iasxug)
-23001 continue
+      endif
       aa = 1.0d0 + psi * bb
-      if(.not.(kpzavbj3ative .ge. 0))goto 23002
-      if(.not.(pos))goto 23004
+      if(kpzavbj3ative .ge. 0)then
+      if(pos)then
       lfu2qhid(1) = psi
-      goto 23005
-23004 continue
+      else
       lfu2qhid(1) = aa / bb
-23005 continue
-23002 continue
-      if(.not.(kpzavbj3ative .ge. 1))goto 23006
-      if(.not.(pos))goto 23008
+      endif
+      endif
+      if(kpzavbj3ative .ge. 1)then
+      if(pos)then
       lfu2qhid(2) = (lfu2qhid(1)**2) / 2
-      goto 23009
-23008 continue
+      else
       uqnkc6zg = lfu2qhid(1)
       lfu2qhid(2) = (aa * (dlog(aa)/bb) - uqnkc6zg) / bb
-23009 continue
-23006 continue
-      if(.not.(kpzavbj3ative .ge. 2))goto 23010
-      if(.not.(pos))goto 23012
+      endif
+      endif
+      if(kpzavbj3ative .ge. 2)then
+      if(pos)then
       lfu2qhid(3) = (lfu2qhid(1)**3) / 3
-      goto 23013
-23012 continue
+      else
       uqnkc6zg = lfu2qhid(2) * 2.0d0
       lfu2qhid(3) = (aa * (dlog(aa)/bb) ** 2 - uqnkc6zg) / bb
-23013 continue
-23010 continue
+      endif
+      endif
       return
       end
-      subroutine gleg11(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, lfu2qhid)
+      subroutine gleg11(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le
+     *nkpzavbj3mat, lfu2qhid)
       implicit logical (a-z)
       integer lenkpzavbj3mat
       double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), 
-     &lfu2qhid
+     *lfu2qhid
       integer hbsl0gto, itwo2, three3
       double precision psi, pim12, o3jyipdf(3), two12
       three3 = 3
       itwo2 = 2
       hbsl0gto = 1
       two12 = 1.41421356237309515d0
-      if(.not.(lenkpzavbj3mat .gt. 0))goto 23014
-      lfu2qhid = kpzavbj3mat(4) * (kpzavbj3mat(2)**2 + two12 * sigma * 
-     &ghz9vuba * kpzavbj3mat(3))
-      goto 23015
-23014 continue
+      if(lenkpzavbj3mat .gt. 0)then
+      lfu2qhid = kpzavbj3mat(4) * (kpzavbj3mat(2)**2 + two12 * sigma * g
+     *hz9vuba * kpzavbj3mat(3))
+      else
       pim12 = 0.564189583547756279d0
       psi = mymu + two12 * sigma * ghz9vuba
       call dpdlyjn(psi, i9mwnvqt, mymu, sigma, itwo2, o3jyipdf)
       lfu2qhid = (dexp(-ghz9vuba*ghz9vuba) * pim12) * (o3jyipdf(2)**2 + 
-     &(psi - mymu) * o3jyipdf(3)) / sigma**2
-23015 continue
+     *(psi - mymu) * o3jyipdf(3)) / sigma**2
+      endif
       return
       end
-      subroutine gleg12(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, lfu2qhid)
+      subroutine gleg12(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le
+     *nkpzavbj3mat, lfu2qhid)
       implicit logical (a-z)
       integer lenkpzavbj3mat
       double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), 
-     &lfu2qhid
+     *lfu2qhid
       integer hbsl0gto, itwo2
       double precision psi, pim12, two12
       double precision tad5vhsu(3)
       itwo2 = 2
       hbsl0gto = 1
-      if(.not.(lenkpzavbj3mat .gt. 0))goto 23016
+      if(lenkpzavbj3mat .gt. 0)then
       lfu2qhid = kpzavbj3mat(4) * (-kpzavbj3mat(2))
-      goto 23017
-23016 continue
+      else
       pim12 = 0.564189583547756279d0
       two12 = 1.41421356237309515d0
       psi = mymu + two12 * sigma * ghz9vuba
       call dpdlyjn(psi, i9mwnvqt, mymu, sigma, hbsl0gto, tad5vhsu)
-      lfu2qhid = (dexp(-ghz9vuba*ghz9vuba) * pim12) * (-tad5vhsu(2)) / 
-     &sigma**2
-23017 continue
+      lfu2qhid = (dexp(-ghz9vuba*ghz9vuba) * pim12) * (-tad5vhsu(2)) / s
+     *igma**2
+      endif
       return
       end
-      subroutine gleg13(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, lfu2qhid)
+      subroutine gleg13(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le
+     *nkpzavbj3mat, lfu2qhid)
       implicit logical (a-z)
       integer lenkpzavbj3mat
       double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), 
-     &lfu2qhid
+     *lfu2qhid
       integer hbsl0gto, itwo2
       double precision psi, mtpim12, two12
       double precision tad5vhsu(3)
       itwo2 = 2
       hbsl0gto = 1
-      if(.not.(lenkpzavbj3mat .gt. 0))goto 23018
-      lfu2qhid = kpzavbj3mat(4) * (-kpzavbj3mat(2)) * dsqrt(8.0d0) * 
-     &ghz9vuba
-      goto 23019
-23018 continue
+      if(lenkpzavbj3mat .gt. 0)then
+      lfu2qhid = kpzavbj3mat(4) * (-kpzavbj3mat(2)) * dsqrt(8.0d0) * ghz
+     *9vuba
+      else
       mtpim12 = -1.12837916709551256d0
       two12 = 1.41421356237309515d0
       psi = mymu + two12 * sigma * ghz9vuba
       call dpdlyjn(psi, i9mwnvqt, mymu, sigma, hbsl0gto, tad5vhsu)
-      lfu2qhid = dexp(-ghz9vuba*ghz9vuba) * mtpim12 * tad5vhsu(2) * (
-     &psi - mymu) / sigma**3
-23019 continue
+      lfu2qhid = dexp(-ghz9vuba*ghz9vuba) * mtpim12 * tad5vhsu(2) * (psi
+     * - mymu) / sigma**3
+      endif
       return
       end
       subroutine gint3(minx, maxx, wts, ahl0onwx, i9mwnvqt, mymu, sigma,
-     & kk, lfu2qhid, elemnt)
+     * kk, lfu2qhid, elemnt)
       implicit logical (a-z)
       integer kk, elemnt
-      double precision minx, maxx, wts(kk), ahl0onwx(kk), lfu2qhid, 
-     &i9mwnvqt, mymu, sigma
+      double precision minx, maxx, wts(kk), ahl0onwx(kk), lfu2qhid, i9mw
+     *nvqt, mymu, sigma
       integer gp1jxzuh, lenkpzavbj3mat
       double precision atx, dint, tint, kpzavbj3mat(4), midpt, range12
       lenkpzavbj3mat = 0
       midpt = 0.50d0 * (minx + maxx)
       range12 = 0.50d0 * (maxx - minx)
       dint = 0.0d0
-      if(.not.(elemnt .eq. 1))goto 23020
-      do 23022 gp1jxzuh=1,kk 
+      if(elemnt .eq. 1)then
+      do23022 gp1jxzuh=1,kk 
       atx = midpt + range12 * ahl0onwx(gp1jxzuh)
-      call gleg11(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, tint)
+      call gleg11(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma
+     *t, tint)
       dint = dint + tint * wts(gp1jxzuh)
 23022 continue
-      goto 23021
-23020 continue
-      if(.not.(elemnt .eq. 2))goto 23024
-      do 23026 gp1jxzuh=1,kk 
+23023 continue
+      else
+      if(elemnt .eq. 2)then
+      do23026 gp1jxzuh=1,kk 
       atx = midpt + range12 * ahl0onwx(gp1jxzuh)
-      call gleg12(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, tint)
+      call gleg12(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma
+     *t, tint)
       dint = dint + tint * wts(gp1jxzuh)
 23026 continue
-      goto 23025
-23024 continue
-      if(.not.(elemnt .eq. 3))goto 23028
-      do 23030 gp1jxzuh=1,kk 
+23027 continue
+      else
+      if(elemnt .eq. 3)then
+      do23030 gp1jxzuh=1,kk 
       atx = midpt + range12 * ahl0onwx(gp1jxzuh)
-      call gleg13(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, 
-     &lenkpzavbj3mat, tint)
+      call gleg13(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma
+     *t, tint)
       dint = dint + tint * wts(gp1jxzuh)
 23030 continue
-23028 continue
-23025 continue
-23021 continue
+23031 continue
+      endif
+      endif
+      endif
       lfu2qhid = lfu2qhid + range12 * dint
       return
       end
-      subroutine yjngintf(minx, maxx, ahl0onwx, wts, kuzxj1lo, kk, 
-     &i9mwnvqt, mymu, sigma, lfu2qhid, qaltf0nz)
+      subroutine yjngintf(minx, maxx, ahl0onwx, wts, kuzxj1lo, kk, i9mwn
+     *vqt, mymu, sigma, lfu2qhid, qaltf0nz)
       implicit logical (a-z)
       integer kuzxj1lo, kk
-      double precision minx(kuzxj1lo), maxx(kuzxj1lo), wts(kk), 
-     &ahl0onwx(kk), i9mwnvqt(kuzxj1lo), mymu(kuzxj1lo), sigma(kuzxj1lo),
-     & lfu2qhid(3,kuzxj1lo), qaltf0nz
-      integer ayfnwr1v, iii, gp1jxzuh, lencomp, ipzbcvw3, hmayv1xt, 
-     &elemnt, hbsl0gto, itwo2
+      double precision minx(kuzxj1lo), maxx(kuzxj1lo), wts(kk), ahl0onwx
+     *(kk), i9mwnvqt(kuzxj1lo), mymu(kuzxj1lo), sigma(kuzxj1lo), lfu2qhi
+     *d(3,kuzxj1lo), qaltf0nz
+      integer ayfnwr1v, iii, gp1jxzuh, lencomp, ipzbcvw3, hmayv1xt, elem
+     *nt, hbsl0gto, itwo2
       double precision xd4mybgj, j4qgxvlk, wiptsjx8
       hbsl0gto = 1
       itwo2 = 2
       lencomp = 12
-      do 23032 ayfnwr1v = 1,kuzxj1lo 
-      do 23034 elemnt=1,3 
+      do23032 ayfnwr1v = 1,kuzxj1lo 
+      do23034 elemnt=1,3 
       j4qgxvlk = -10.0d0
-      do 23036 iii=2,lencomp 
+      do23036 iii=2,lencomp 
       ipzbcvw3 = 2 ** iii
       xd4mybgj = (maxx(ayfnwr1v) - minx(ayfnwr1v)) / ipzbcvw3
       lfu2qhid(elemnt,ayfnwr1v) = 0.0d0
-      do 23038 gp1jxzuh=1,ipzbcvw3 
-      call gint3(minx(ayfnwr1v)+(gp1jxzuh-1)*xd4mybgj, minx(ayfnwr1v)+
-     &gp1jxzuh*xd4mybgj, wts, ahl0onwx, i9mwnvqt(ayfnwr1v), mymu(
-     &ayfnwr1v), sigma(ayfnwr1v), kk, lfu2qhid(elemnt,ayfnwr1v), elemnt)
+      do23038 gp1jxzuh=1,ipzbcvw3 
+      call gint3(minx(ayfnwr1v)+(gp1jxzuh-1)*xd4mybgj, minx(ayfnwr1v)+gp
+     *1jxzuh*xd4mybgj, wts, ahl0onwx, i9mwnvqt(ayfnwr1v), mymu(ayfnwr1v)
+     *, sigma(ayfnwr1v), kk, lfu2qhid(elemnt,ayfnwr1v), elemnt)
 23038 continue
-      wiptsjx8 = dabs(lfu2qhid(elemnt,ayfnwr1v) - j4qgxvlk) / (1.0d0 + 
-     &dabs(lfu2qhid(elemnt,ayfnwr1v)))
-      if(.not.(wiptsjx8 .lt. qaltf0nz))goto 23040
+23039 continue
+      wiptsjx8 = dabs(lfu2qhid(elemnt,ayfnwr1v) - j4qgxvlk) / (1.0d0 + d
+     *abs(lfu2qhid(elemnt,ayfnwr1v)))
+      if(wiptsjx8 .lt. qaltf0nz)then
       goto 234
-      goto 23041
-23040 continue
+      else
       j4qgxvlk = lfu2qhid(elemnt,ayfnwr1v)
-23041 continue
+      endif
 23036 continue
+23037 continue
 234   hmayv1xt = 0
 23034 continue
+23035 continue
 23032 continue
+23033 continue
       return
       end
diff --git a/src/rgam.f b/src/rgam.f
index 1853d5b..45d3bb3 100644
--- a/src/rgam.f
+++ b/src/rgam.f
@@ -1,38 +1,39 @@
-      subroutine dnaoqj0l(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, 
-     &ankcghz2,coef,sz,ifys6woa, qcpiaj7f,wbkq9zyi,parms, scrtch, 
-     &gp0xjetb,l3zpbstu,e5knafcg,wep0oibc,fbd5yktj)
+C Output from Public domain Ratfor, version 1.01
+      subroutine dnaoqj0l(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz
+     *2,coef,sz,ifys6woa, qcpiaj7f,wbkq9zyi,parms, scrtch, gp0xjetb,l3zp
+     *bstu,e5knafcg,wep0oibc,fbd5yktj)
       implicit logical (a-z)
-      integer kuzxj1lo, nk, gp0xjetb, l3zpbstu(3), e5knafcg, wep0oibc, 
-     &fbd5yktj
+      integer kuzxj1lo, nk, gp0xjetb, l3zpbstu(3), e5knafcg, wep0oibc, f
+     *bd5yktj
       double precision penalt, pjb6wfoq, xs(kuzxj1lo), ys(kuzxj1lo), ws(
-     &kuzxj1lo), ankcghz2(nk+4), coef(nk), sz(kuzxj1lo), ifys6woa(
-     &kuzxj1lo), qcpiaj7f, wbkq9zyi, parms(3), scrtch(*)
-      call hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz2,
-     &coef,sz,ifys6woa, qcpiaj7f,l3zpbstu(1),wbkq9zyi,l3zpbstu(2), 
-     &l3zpbstu(3), parms(1),parms(2),parms(3), gp0xjetb, scrtch(1), 
-     &scrtch(nk+1),scrtch(2*nk+1),scrtch(3*nk+1),scrtch(4*nk+1), scrtch(
-     &5*nk+1),scrtch(6*nk+1),scrtch(7*nk+1),scrtch(8*nk+1), scrtch(9*nk+
-     &1),scrtch(9*nk+e5knafcg*nk+1),scrtch(9*nk+2*e5knafcg*nk+1), 
-     &e5knafcg,wep0oibc,fbd5yktj)
+     *kuzxj1lo), ankcghz2(nk+4), coef(nk), sz(kuzxj1lo), ifys6woa(kuzxj1
+     *lo), qcpiaj7f, wbkq9zyi, parms(3), scrtch(*)
+      call hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz2,coef
+     *,sz,ifys6woa, qcpiaj7f,l3zpbstu(1),wbkq9zyi,l3zpbstu(2), l3zpbstu(
+     *3), parms(1),parms(2),parms(3), gp0xjetb, scrtch(1), scrtch(nk+1),
+     *scrtch(2*nk+1),scrtch(3*nk+1),scrtch(4*nk+1), scrtch(5*nk+1),scrtc
+     *h(6*nk+1),scrtch(7*nk+1),scrtch(8*nk+1), scrtch(9*nk+1),scrtch(9*n
+     *k+e5knafcg*nk+1),scrtch(9*nk+2*e5knafcg*nk+1), e5knafcg,wep0oibc,f
+     *bd5yktj)
       return
       end
-      subroutine hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, 
-     &ankcghz2,coef,sz,ifys6woa, qcpiaj7f,icrit,i9mwnvqt,ispar, 
-     &c5aesxku, mynl7uaq,zustx4fw,tol, gp0xjetb, xwy, zvau2lct,f6lsuzax,
-     &fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,
-     &fulcp8wa,plj0trqx, e5knafcg,wep0oibc,fbd5yktj)
+      subroutine hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz
+     *2,coef,sz,ifys6woa, qcpiaj7f,icrit,i9mwnvqt,ispar, c5aesxku, mynl7
+     *uaq,zustx4fw,tol, gp0xjetb, xwy, zvau2lct,f6lsuzax,fvh2rwtc,dcfir2
+     *no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp8wa,plj0trq
+     *x, e5knafcg,wep0oibc,fbd5yktj)
       implicit logical (a-z)
-      integer kuzxj1lo,nk, icrit,ispar, gp0xjetb, e5knafcg,wep0oibc,
-     &fbd5yktj
+      integer kuzxj1lo,nk, icrit,ispar, gp0xjetb, e5knafcg,wep0oibc,fbd5
+     *yktj
       integer c5aesxku
-      double precision penalt,pjb6wfoq,xs(kuzxj1lo),ys(kuzxj1lo),ws(
-     &kuzxj1lo), ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(
-     &kuzxj1lo), qcpiaj7f,i9mwnvqt,mynl7uaq,zustx4fw,tol, xwy(nk), 
-     &zvau2lct(nk),f6lsuzax(nk),fvh2rwtc(nk),dcfir2no(nk), xecbg0pf(nk),
-     &z4grbpiq(nk),d7glzhbj(nk),v2eydbxs(nk), buhyalv4(e5knafcg,nk),
-     &fulcp8wa(e5knafcg,nk),plj0trqx(wep0oibc,nk)
-      double precision t1,t2,ratio, a,b,c,d,e,qaltf0nz,xm,p,q,r,tol1,
-     &tol2,u,v,w, fu,fv,fw,fx,x, ax,bx
+      double precision penalt,pjb6wfoq,xs(kuzxj1lo),ys(kuzxj1lo),ws(kuzx
+     *j1lo), ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(kuzxj1lo), q
+     *cpiaj7f,i9mwnvqt,mynl7uaq,zustx4fw,tol, xwy(nk), zvau2lct(nk),f6ls
+     *uzax(nk),fvh2rwtc(nk),dcfir2no(nk), xecbg0pf(nk),z4grbpiq(nk),d7gl
+     *zhbj(nk),v2eydbxs(nk), buhyalv4(e5knafcg,nk),fulcp8wa(e5knafcg,nk)
+     *,plj0trqx(wep0oibc,nk)
+      double precision t1,t2,ratio, a,b,c,d,e,qaltf0nz,xm,p,q,r,tol1,tol
+     *2,u,v,w, fu,fv,fw,fx,x, ax,bx
       integer ayfnwr1v, viter
       double precision yjpnro8d, hmayv1xt
       yjpnro8d = 8.0d88
@@ -41,35 +42,37 @@
       u = 0.5d0
       ratio = 0.5d0
       ayfnwr1v = 1
-23000 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23002
-      if(.not.(ws(ayfnwr1v).gt.0.0d0))goto 23003
+23000 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23002
+      if(ws(ayfnwr1v).gt.0.0d0)then
       ws(ayfnwr1v) = dsqrt(ws(ayfnwr1v))
-23003 continue
-       ayfnwr1v = ayfnwr1v+1
+      endif
+23001 ayfnwr1v = ayfnwr1v+1
       goto 23000
 23002 continue
-      if(.not.(gp0xjetb .eq. 0))goto 23005
+      if(gp0xjetb .eq. 0)then
       call zosq7hub(xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs,ankcghz2,nk)
-      call gt9iulbf(xs,ys,ws,ankcghz2, kuzxj1lo,nk, xwy,zvau2lct,
-     &f6lsuzax,fvh2rwtc,dcfir2no)
+      call gt9iulbf(xs,ys,ws,ankcghz2, kuzxj1lo,nk, xwy,zvau2lct,f6lsuza
+     *x,fvh2rwtc,dcfir2no)
       t1 = 0.0d0 
       t2 = 0.0d0
-      do 23007 ayfnwr1v = 3,nk-3 
+      do23007 ayfnwr1v = 3,nk-3 
       t1 = t1 + zvau2lct(ayfnwr1v) 
 23007 continue
-      do 23009 ayfnwr1v = 3,nk-3 
+23008 continue
+      do23009 ayfnwr1v = 3,nk-3 
       t2 = t2 + xecbg0pf(ayfnwr1v) 
 23009 continue
+23010 continue
       ratio = t1/t2
       gp0xjetb = 1
-23005 continue
-      if(.not.(ispar .eq. 1))goto 23011
-      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, 
-     &ankcghz2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,
-     &f6lsuzax,fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, 
-     &buhyalv4,fulcp8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
+      endif
+      if(ispar .eq. 1)then
+      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz
+     *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2
+     *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp
+     *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
       return
-23011 continue
+      endif
       ax = mynl7uaq 
       bx = zustx4fw
       c = 0.381966011250105097d0
@@ -82,109 +85,103 @@
       x = v
       e = 0.0d0
       i9mwnvqt = ratio * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0))
-      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, 
-     &ankcghz2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,
-     &f6lsuzax,fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, 
-     &buhyalv4,fulcp8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
+      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz
+     *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2
+     *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp
+     *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
       fx = qcpiaj7f
       fv = fx
       fw = fx
-23013 if(.not.(fbd5yktj .eq. 0))goto 23014
+23013 if(fbd5yktj .eq. 0)then
       viter = viter + 1
       xm = 0.5d0*(a + b)
       tol1 = qaltf0nz*dabs(x) + tol/3.0d0
       tol2 = 2.0d0*tol1
-      if(.not.((dabs(x - xm) .le. (tol2 - 0.5d0*(b - a))) .or.(viter 
-     &.gt. c5aesxku)))goto 23015
+      if((dabs(x - xm) .le. (tol2 - 0.5d0*(b - a))) .or. (viter .gt. c5a
+     *esxku))then
       go to 90
-23015 continue
-      if(.not.((dabs(e) .le. tol1) .or.(fx .ge. yjpnro8d) .or.(fv .ge. 
-     &yjpnro8d) .or.(fw .ge. yjpnro8d)))goto 23017
+      endif
+      if((dabs(e) .le. tol1) .or. (fx .ge. yjpnro8d) .or. (fv .ge. yjpnr
+     *o8d) .or. (fw .ge. yjpnro8d))then
       go to 40
-23017 continue
+      endif
       r = (x - w)*(fx - fv)
       q = (x - v)*(fx - fw)
       p = (x - v)*q - (x - w)*r
       q = 2.0d0 * (q - r)
-      if(.not.(q .gt. 0.0d0))goto 23019
+      if(q .gt. 0.0d0)then
       p = -p
-23019 continue
+      endif
       q = dabs(q)
       r = e
       e = d
-      if(.not.((dabs(p) .ge. dabs(0.5d0*q*r)) .or.(q .eq. 0.0d0)))goto 2
-     &3021
+      if((dabs(p) .ge. dabs(0.5d0*q*r)) .or. (q .eq. 0.0d0))then
       go to 40
-23021 continue
-      if(.not.((p .le. q*(a - x)) .or. (p .ge. q*(b - x))))goto 23023
+      endif
+      if((p .le. q*(a - x)) .or. (p .ge. q*(b - x)))then
       go to 40
-23023 continue
+      endif
       d = p/q
       u = x + d
-      if(.not.((u - a) .lt. tol2))goto 23025
+      if((u - a) .lt. tol2)then
       d = dsign(tol1, xm - x)
-23025 continue
-      if(.not.((b - u) .lt. tol2))goto 23027
+      endif
+      if((b - u) .lt. tol2)then
       d = dsign(tol1, xm - x)
-23027 continue
+      endif
       go to 50
-40    if(.not.(x .ge. xm))goto 23029
+40    if(x .ge. xm)then
       e = a - x
-      goto 23030
-23029 continue
+      else
       e = b - x
-23030 continue
+      endif
       d = c*e
-50    if(.not.(dabs(d) .ge. tol1))goto 23031
+50    if(dabs(d) .ge. tol1)then
       u = x + d
-      goto 23032
-23031 continue
+      else
       u = x + dsign(tol1, d)
-23032 continue
+      endif
       i9mwnvqt = ratio * dexp((-2.0d0 + u*6.0) * dlog(16.0d0))
-      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, 
-     &ankcghz2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,
-     &f6lsuzax,fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, 
-     &buhyalv4,fulcp8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
+      call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz
+     *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2
+     *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp
+     *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj)
       fu = qcpiaj7f
-      if(.not.(fu .gt. yjpnro8d))goto 23033
+      if(fu .gt. yjpnro8d)then
       fu = 2.0d0 * yjpnro8d
-23033 continue
-      if(.not.(fu .le. fx))goto 23035
-      if(.not.(u .ge. x))goto 23037
+      endif
+      if(fu .le. fx)then
+      if(u .ge. x)then
       a = x
-      goto 23038
-23037 continue
+      else
       b = x
-23038 continue
+      endif
       v = w
       fv = fw
       w = x
       fw = fx
       x = u
       fx = fu
-      goto 23036
-23035 continue
-      if(.not.(u .lt. x))goto 23039
+      else
+      if(u .lt. x)then
       a = u
-      goto 23040
-23039 continue
+      else
       b = u
-23040 continue
-      if(.not.((fu .le. fw) .or. (w .eq. x)))goto 23041
+      endif
+      if((fu .le. fw) .or. (w .eq. x))then
       v = w
       fv = fw
       w = u
       fw = fu
-      goto 23042
-23041 continue
-      if(.not.((fu .le. fv) .or. (v .eq. x) .or. (v .eq. w)))goto 23043
+      else
+      if((fu .le. fv) .or. (v .eq. x) .or. (v .eq. w))then
       v = u
       fv = fu
-23043 continue
-23042 continue
-23036 continue
+      endif
+      endif
+      endif
       goto 23013
+      endif
 23014 continue
 90    hmayv1xt = 0.0d0
       i9mwnvqt = ratio * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0))
@@ -195,8 +192,8 @@
       subroutine zosq7hub(xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs,tb,nb)
       implicit logical (a-z)
       integer nb
-      double precision xecbg0pf(nb),z4grbpiq(nb),d7glzhbj(nb),v2eydbxs(
-     &nb),tb(nb+4)
+      double precision xecbg0pf(nb),z4grbpiq(nb),d7glzhbj(nb),v2eydbxs(n
+     *b),tb(nb+4)
       integer dqlr5bse,ilo,pqzfxw4i, three3, ifour4, nbp1
       integer ayfnwr1v,iii,yq6lorbx
       integer i2svdbx3tk
@@ -206,116 +203,121 @@
       three3 = 3
       ifour4 = 4
       nbp1 = nb + 1
-      do 23045 ayfnwr1v = 1,nb 
+      do23045 ayfnwr1v = 1,nb 
       xecbg0pf(ayfnwr1v) = 0.0d0
       z4grbpiq(ayfnwr1v) = 0.0d0
       d7glzhbj(ayfnwr1v) = 0.0d0
       v2eydbxs(ayfnwr1v) = 0.0d0 
 23045 continue
+23046 continue
       ilo = 1
-      do 23047 ayfnwr1v = 1,nb 
+      do23047 ayfnwr1v = 1,nb 
       call vinterv(tb(1), nbp1 ,tb(ayfnwr1v),dqlr5bse,pqzfxw4i)
       call vbsplvd(tb,ifour4,tb(ayfnwr1v),dqlr5bse,work,g9fvdrbw,three3)
-      do 23049 iii = 1,4 
+      do23049 iii = 1,4 
       yw1(iii) = g9fvdrbw(iii,3) 
 23049 continue
-      call vbsplvd(tb,ifour4,tb(ayfnwr1v+1),dqlr5bse,work,g9fvdrbw,
-     &three3)
-      do 23051 iii = 1,4 
+23050 continue
+      call vbsplvd(tb,ifour4,tb(ayfnwr1v+1),dqlr5bse,work,g9fvdrbw,three
+     *3)
+      do23051 iii = 1,4 
       yw2(iii) = g9fvdrbw(iii,3) - yw1(iii) 
 23051 continue
+23052 continue
       wpt = tb(ayfnwr1v+1) - tb(ayfnwr1v)
-      if(.not.(dqlr5bse .ge. 4))goto 23053
-      do 23055 iii = 1,4 
+      if(dqlr5bse .ge. 4)then
+      do23055 iii = 1,4 
       yq6lorbx = iii
       i2svdbx3tk = dqlr5bse-4+iii
       xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt * (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
+     *yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50
+     * + yw2(iii)*yw2(yq6lorbx)*othird)
       yq6lorbx = iii+1
-      if(.not.(yq6lorbx .le. 4))goto 23057
-      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23057 continue
+      if(yq6lorbx .le. 4)then
+      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
       yq6lorbx = iii+2
-      if(.not.(yq6lorbx .le. 4))goto 23059
-      d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23059 continue
+      if(yq6lorbx .le. 4)then
+      d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
       yq6lorbx = iii+3
-      if(.not.(yq6lorbx .le. 4))goto 23061
-      v2eydbxs(i2svdbx3tk) = v2eydbxs(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23061 continue
+      if(yq6lorbx .le. 4)then
+      v2eydbxs(i2svdbx3tk) = v2eydbxs(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
 23055 continue
-      goto 23054
-23053 continue
-      if(.not.(dqlr5bse .eq. 3))goto 23063
-      do 23065 iii = 1,3 
+23056 continue
+      else
+      if(dqlr5bse .eq. 3)then
+      do23065 iii = 1,3 
       yq6lorbx = iii
       i2svdbx3tk = dqlr5bse-3+iii
-      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
+      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
       yq6lorbx = iii+1
-      if(.not.(yq6lorbx .le. 3))goto 23067
-      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23067 continue
+      if(yq6lorbx .le. 3)then
+      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
       yq6lorbx = iii+2
-      if(.not.(yq6lorbx .le. 3))goto 23069
-      d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23069 continue
+      if(yq6lorbx .le. 3)then
+      d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
 23065 continue
-      goto 23064
-23063 continue
-      if(.not.(dqlr5bse .eq. 2))goto 23071
-      do 23073 iii = 1,2 
+23066 continue
+      else
+      if(dqlr5bse .eq. 2)then
+      do23073 iii = 1,2 
       yq6lorbx = iii
       i2svdbx3tk = dqlr5bse-2+iii
-      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
+      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
       yq6lorbx = iii+1
-      if(.not.(yq6lorbx .le. 2))goto 23075
-      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
-23075 continue
+      if(yq6lorbx .le. 2)then
+      z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
+      endif
 23073 continue
-      goto 23072
-23071 continue
-      if(.not.(dqlr5bse .eq. 1))goto 23077
-      do 23079 iii = 1,1 
+23074 continue
+      else
+      if(dqlr5bse .eq. 1)then
+      do23079 iii = 1,1 
       yq6lorbx = iii
       i2svdbx3tk = dqlr5bse-1+iii
-      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(
-     &yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.
-     &50 + yw2(iii)*yw2(yq6lorbx)*othird)
+      xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y
+     *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 
+     *+ yw2(iii)*yw2(yq6lorbx)*othird)
 23079 continue
-23077 continue
-23072 continue
-23064 continue
-23054 continue
+23080 continue
+      endif
+      endif
+      endif
+      endif
 23047 continue
+23048 continue
       return
       end
-      subroutine vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,
-     &wep0oibc,iflag)
+      subroutine vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,wep0oi
+     *bc,iflag)
       implicit logical (a-z)
       integer e5knafcg,nk,wep0oibc,iflag
-      double precision buhyalv4(e5knafcg,nk), fulcp8wa(e5knafcg,nk), 
-     &plj0trqx(wep0oibc,nk)
+      double precision buhyalv4(e5knafcg,nk), fulcp8wa(e5knafcg,nk), plj
+     *0trqx(wep0oibc,nk)
       integer ayfnwr1v, yq6lorbx, gp1jxzuh
       double precision wjm3(3),wjm2(2),wjm1(1),c0,c1,c2,c3
       double precision pcsuow9k, qdbgu6oi, upwkh5xz, rul5fnyd, ueydbrg6,
-     & plce2srm, k3yvomnh, bfdjhu7l, ctfvwdu0
+     * plce2srm, k3yvomnh, bfdjhu7l, ctfvwdu0
       c1 = 0.0d0
       c2 = 0.0d0
       c3 = 0.0d0
@@ -325,35 +327,32 @@
       wjm2(1) = 0.0d0
       wjm2(2) = 0.0d0
       wjm1(1) = 0.0d0
-      do 23081 ayfnwr1v = 1,nk 
+      do23081 ayfnwr1v = 1,nk 
       yq6lorbx = nk-ayfnwr1v+1
       c0 = 1.0d0 / buhyalv4(4,yq6lorbx)
-      if(.not.(yq6lorbx .le. (nk-3)))goto 23083
+      if(yq6lorbx .le. (nk-3))then
       c1 = buhyalv4(1,yq6lorbx+3)*c0
       c2 = buhyalv4(2,yq6lorbx+2)*c0
       c3 = buhyalv4(3,yq6lorbx+1)*c0
-      goto 23084
-23083 continue
-      if(.not.(yq6lorbx .eq. (nk-2)))goto 23085
+      else
+      if(yq6lorbx .eq. (nk-2))then
       c1 = 0.0d0
       c2 = buhyalv4(2,yq6lorbx+2)*c0
       c3 = buhyalv4(3,yq6lorbx+1)*c0
-      goto 23086
-23085 continue
-      if(.not.(yq6lorbx .eq. (nk-1)))goto 23087
+      else
+      if(yq6lorbx .eq. (nk-1))then
       c1 = 0.0d0
       c2 = 0.0d0
       c3 = buhyalv4(3,yq6lorbx+1)*c0
-      goto 23088
-23087 continue
-      if(.not.(yq6lorbx .eq. nk))goto 23089
+      else
+      if(yq6lorbx .eq. nk)then
       c1 = 0.0d0
       c2 = 0.0d0
       c3 = 0.0d0
-23089 continue
-23088 continue
-23086 continue
-23084 continue
+      endif
+      endif
+      endif
+      endif
       pcsuow9k = c1*wjm3(1)
       qdbgu6oi = c2*wjm3(2)
       upwkh5xz = c3*wjm3(3)
@@ -366,8 +365,8 @@
       fulcp8wa(1,yq6lorbx) = 0.0d0 - (pcsuow9k+qdbgu6oi+upwkh5xz)
       fulcp8wa(2,yq6lorbx) = 0.0d0 - (rul5fnyd+ueydbrg6+plce2srm)
       fulcp8wa(3,yq6lorbx) = 0.0d0 - (k3yvomnh+bfdjhu7l+ctfvwdu0)
-      fulcp8wa(4,yq6lorbx) = c0**2 + c1*(pcsuow9k + 2.0d0*(qdbgu6oi + 
-     &upwkh5xz)) + c2*(ueydbrg6 + 2.0d0* plce2srm) + c3*ctfvwdu0
+      fulcp8wa(4,yq6lorbx) = c0**2 + c1*(pcsuow9k + 2.0d0*(qdbgu6oi + up
+     *wkh5xz)) + c2*(ueydbrg6 + 2.0d0* plce2srm) + c3*ctfvwdu0
       wjm3(1) = wjm2(1)
       wjm3(2) = wjm2(2)
       wjm3(3) = fulcp8wa(2,yq6lorbx)
@@ -375,53 +374,57 @@
       wjm2(2) = fulcp8wa(3,yq6lorbx)
       wjm1(1) = fulcp8wa(4,yq6lorbx)
 23081 continue
-      if(.not.(iflag .eq. 0))goto 23091
+23082 continue
+      if(iflag .eq. 0)then
       return
-23091 continue
-      do 23093 ayfnwr1v = 1,nk 
+      endif
+      do23093 ayfnwr1v = 1,nk 
       yq6lorbx = nk-ayfnwr1v+1
       gp1jxzuh = 1
-23095 if(.not.(gp1jxzuh.le.4.and.yq6lorbx+gp1jxzuh-1.le.nk))goto 23097
-      plj0trqx(yq6lorbx,yq6lorbx+gp1jxzuh-1) = fulcp8wa(5-gp1jxzuh,
-     &yq6lorbx)
-       gp1jxzuh = gp1jxzuh+1
+23095 if(.not.(gp1jxzuh .le. 4 .and. yq6lorbx+gp1jxzuh-1 .le. nk))goto 2
+     *3097
+      plj0trqx(yq6lorbx,yq6lorbx+gp1jxzuh-1) = fulcp8wa(5-gp1jxzuh,yq6lo
+     *rbx)
+23096 gp1jxzuh = gp1jxzuh+1
       goto 23095
 23097 continue
 23093 continue
-      do 23098 ayfnwr1v = 1,nk 
+23094 continue
+      do23098 ayfnwr1v = 1,nk 
       yq6lorbx = nk-ayfnwr1v+1
       gp1jxzuh = yq6lorbx-4
-23100 if(.not.(gp1jxzuh.ge.1))goto 23102
+23100 if(.not.(gp1jxzuh .ge. 1))goto 23102
       c0 = 1.0 / buhyalv4(4,gp1jxzuh) 
       c1 = buhyalv4(1,gp1jxzuh+3)*c0
       c2 = buhyalv4(2,gp1jxzuh+2)*c0 
       c3 = buhyalv4(3,gp1jxzuh+1)*c0
-      plj0trqx(gp1jxzuh,yq6lorbx) = 0.0d0- ( c1*plj0trqx(gp1jxzuh+3,
-     &yq6lorbx) + c2*plj0trqx(gp1jxzuh+2,yq6lorbx) + c3*plj0trqx(
-     &gp1jxzuh+1,yq6lorbx) )
-       gp1jxzuh = gp1jxzuh-1
+      plj0trqx(gp1jxzuh,yq6lorbx) = 0.0d0- ( c1*plj0trqx(gp1jxzuh+3,yq6l
+     *orbx) + c2*plj0trqx(gp1jxzuh+2,yq6lorbx) + c3*plj0trqx(gp1jxzuh+1,
+     *yq6lorbx) )
+23101 gp1jxzuh = gp1jxzuh-1
       goto 23100
 23102 continue
 23098 continue
+23099 continue
       return
       end
-      subroutine wmhctl9x(penalt,pjb6wfoq,x,y,w, kuzxj1lo,nk,icrit, 
-     &ankcghz2,coef,sz,ifys6woa, qcpiaj7f, i9mwnvqt, xwy, zvau2lct,
-     &f6lsuzax,fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, 
-     &buhyalv4,fulcp8wa,plj0trqx, e5knafcg,wep0oibc,info)
+      subroutine wmhctl9x(penalt,pjb6wfoq,x,y,w, kuzxj1lo,nk,icrit, ankc
+     *ghz2,coef,sz,ifys6woa, qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,
+     *fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,f
+     *ulcp8wa,plj0trqx, e5knafcg,wep0oibc,info)
       implicit logical (a-z)
       integer kuzxj1lo,nk,icrit, e5knafcg,wep0oibc,info
-      double precision penalt,pjb6wfoq,x(kuzxj1lo),y(kuzxj1lo),w(
-     &kuzxj1lo)
-      double precision ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(
-     &kuzxj1lo), qcpiaj7f, i9mwnvqt, xwy(nk)
-      double precision zvau2lct(nk),f6lsuzax(nk),fvh2rwtc(nk),dcfir2no(
-     &nk)
-      double precision xecbg0pf(nk),z4grbpiq(nk),d7glzhbj(nk),v2eydbxs(
-     &nk), buhyalv4(e5knafcg,nk),fulcp8wa(e5knafcg,nk),plj0trqx(
-     &wep0oibc,nk)
-      double precision resss, work(16), b0,b1,b2,b3,qaltf0nz, g9fvdrbw(
-     &4,1), xv,eqdf
+      double precision penalt,pjb6wfoq,x(kuzxj1lo),y(kuzxj1lo),w(kuzxj1l
+     *o)
+      double precision ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(ku
+     *zxj1lo), qcpiaj7f, i9mwnvqt, xwy(nk)
+      double precision zvau2lct(nk),f6lsuzax(nk),fvh2rwtc(nk),dcfir2no(n
+     *k)
+      double precision xecbg0pf(nk),z4grbpiq(nk),d7glzhbj(nk),v2eydbxs(n
+     *k), buhyalv4(e5knafcg,nk),fulcp8wa(e5knafcg,nk),plj0trqx(wep0oibc,
+     *nk)
+      double precision resss, work(16), b0,b1,b2,b3,qaltf0nz, g9fvdrbw(4
+     *,1), xv,eqdf
       double precision qtce8hzo
       double precision rxeqjn0y
       integer izero0, three3, ilo, pqzfxw4i, yq6lorbx, ayfnwr1v
@@ -433,105 +436,113 @@
       ifour4 = 4
       hbsl0gto = 1
       nkp1 = nk + 1
-      do 23103 ayfnwr1v = 1,nk 
+      do23103 ayfnwr1v = 1,nk 
       coef(ayfnwr1v) = xwy(ayfnwr1v) 
 23103 continue
-      do 23105 ayfnwr1v = 1,nk 
-      buhyalv4(4,ayfnwr1v) = zvau2lct(ayfnwr1v)+i9mwnvqt*xecbg0pf(
-     &ayfnwr1v) 
+23104 continue
+      do23105 ayfnwr1v = 1,nk 
+      buhyalv4(4,ayfnwr1v) = zvau2lct(ayfnwr1v)+i9mwnvqt*xecbg0pf(ayfnwr
+     *1v) 
 23105 continue
-      do 23107 ayfnwr1v = 1,(nk-1) 
-      buhyalv4(3,ayfnwr1v+1) = f6lsuzax(ayfnwr1v)+i9mwnvqt*z4grbpiq(
-     &ayfnwr1v) 
+23106 continue
+      do23107 ayfnwr1v = 1,(nk-1) 
+      buhyalv4(3,ayfnwr1v+1) = f6lsuzax(ayfnwr1v)+i9mwnvqt*z4grbpiq(ayfn
+     *wr1v) 
 23107 continue
-      do 23109 ayfnwr1v = 1,(nk-2) 
-      buhyalv4(2,ayfnwr1v+2) = fvh2rwtc(ayfnwr1v)+i9mwnvqt*d7glzhbj(
-     &ayfnwr1v) 
+23108 continue
+      do23109 ayfnwr1v = 1,(nk-2) 
+      buhyalv4(2,ayfnwr1v+2) = fvh2rwtc(ayfnwr1v)+i9mwnvqt*d7glzhbj(ayfn
+     *wr1v) 
 23109 continue
-      do 23111 ayfnwr1v = 1,(nk-3) 
-      buhyalv4(1,ayfnwr1v+3) = dcfir2no(ayfnwr1v)+i9mwnvqt*v2eydbxs(
-     &ayfnwr1v) 
+23110 continue
+      do23111 ayfnwr1v = 1,(nk-3) 
+      buhyalv4(1,ayfnwr1v+3) = dcfir2no(ayfnwr1v)+i9mwnvqt*v2eydbxs(ayfn
+     *wr1v) 
 23111 continue
+23112 continue
       call dpbfa8(buhyalv4,e5knafcg,nk,three3,info)
-      if(.not.(info .ne. 0))goto 23113
+      if(info .ne. 0)then
       return
-23113 continue
+      endif
       call dpbsl8(buhyalv4,e5knafcg,nk,three3,coef)
       icoef = 1
-      do 23115 ayfnwr1v = 1,kuzxj1lo 
+      do23115 ayfnwr1v = 1,kuzxj1lo 
       xv = x(ayfnwr1v)
       call wbvalue(ankcghz2,coef, nk,ifour4,xv,izero0, sz(ayfnwr1v))
 23115 continue
-      if(.not.(icrit .eq. 0))goto 23117
+23116 continue
+      if(icrit .eq. 0)then
       return
-23117 continue
-      call vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,wep0oibc,
-     &izero0)
-      do 23119 ayfnwr1v = 1,kuzxj1lo 
+      endif
+      call vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,wep0oibc,ize
+     *ro0)
+      do23119 ayfnwr1v = 1,kuzxj1lo 
       xv = x(ayfnwr1v)
       call vinterv(ankcghz2(1), nkp1 ,xv,dqlr5bse,pqzfxw4i)
-      if(.not.(pqzfxw4i .eq. -1))goto 23121
+      if(pqzfxw4i .eq. -1)then
       dqlr5bse = 4 
       xv = ankcghz2(4) + qaltf0nz 
-23121 continue
-      if(.not.(pqzfxw4i .eq. 1))goto 23123
+      endif
+      if(pqzfxw4i .eq. 1)then
       dqlr5bse = nk 
       xv = ankcghz2(nk+1) - qaltf0nz 
-23123 continue
+      endif
       yq6lorbx = dqlr5bse-3
       call vbsplvd(ankcghz2,ifour4,xv,dqlr5bse,work,g9fvdrbw,hbsl0gto)
       b0 = g9fvdrbw(1,1)
       b1 = g9fvdrbw(2,1)
       b2 = g9fvdrbw(3,1)
       b3 = g9fvdrbw(4,1)
-      qtce8hzo = (b0 *(fulcp8wa(4,yq6lorbx)*b0 + 2.0d0*(fulcp8wa(3,
-     &yq6lorbx)*b1 + fulcp8wa(2,yq6lorbx)*b2 + fulcp8wa(1,yq6lorbx)*b3))
-     & + b1 *(fulcp8wa(4,yq6lorbx+1)*b1 + 2.0d0*(fulcp8wa(3,yq6lorbx+1)*
-     &b2 + fulcp8wa(2,yq6lorbx+1)*b3)) + b2 *(fulcp8wa(4,yq6lorbx+2)*b2 
-     &+ 2.0d0* fulcp8wa(3,yq6lorbx+2)*b3 )+ b3**2* fulcp8wa(4,yq6lorbx+
-     &3)) * w(ayfnwr1v)**2
+      qtce8hzo = (b0 *(fulcp8wa(4,yq6lorbx)*b0 + 2.0d0*(fulcp8wa(3,yq6lo
+     *rbx)*b1 + fulcp8wa(2,yq6lorbx)*b2 + fulcp8wa(1,yq6lorbx)*b3)) + b1
+     * *(fulcp8wa(4,yq6lorbx+1)*b1 + 2.0d0*(fulcp8wa(3,yq6lorbx+1)*b2 + 
+     *fulcp8wa(2,yq6lorbx+1)*b3)) + b2 *(fulcp8wa(4,yq6lorbx+2)*b2 + 2.0
+     *d0* fulcp8wa(3,yq6lorbx+2)*b3 )+ b3**2* fulcp8wa(4,yq6lorbx+3)) * 
+     *w(ayfnwr1v)**2
       ifys6woa(ayfnwr1v) = qtce8hzo
 23119 continue
-      if(.not.(icrit .eq. 1))goto 23125
+23120 continue
+      if(icrit .eq. 1)then
       resss = 0.0d0 
       eqdf = 0.0d0 
       rxeqjn0y = 0.0d0
-      do 23127 ayfnwr1v = 1,kuzxj1lo 
+      do23127 ayfnwr1v = 1,kuzxj1lo 
       resss = resss + ((y(ayfnwr1v)-sz(ayfnwr1v))*w(ayfnwr1v))**2
       eqdf = eqdf + ifys6woa(ayfnwr1v)
       rxeqjn0y = rxeqjn0y + w(ayfnwr1v)*w(ayfnwr1v)
 23127 continue
-      qcpiaj7f = (resss/rxeqjn0y)/((1.0d0-(pjb6wfoq+penalt*eqdf)/
-     &rxeqjn0y)**2)
-      goto 23126
-23125 continue
-      if(.not.(icrit .eq. 2))goto 23129
+23128 continue
+      qcpiaj7f = (resss/rxeqjn0y)/((1.0d0-(pjb6wfoq+penalt*eqdf)/rxeqjn0
+     *y)**2)
+      else
+      if(icrit .eq. 2)then
       qcpiaj7f = 0.0d0
       rxeqjn0y = 0.0d0
-      do 23131 ayfnwr1v = 1,kuzxj1lo 
-      qcpiaj7f = qcpiaj7f + (((y(ayfnwr1v)-sz(ayfnwr1v))*w(ayfnwr1v))/(
-     &1.0d0-ifys6woa(ayfnwr1v)))**2
+      do23131 ayfnwr1v = 1,kuzxj1lo 
+      qcpiaj7f = qcpiaj7f + (((y(ayfnwr1v)-sz(ayfnwr1v))*w(ayfnwr1v))/(1
+     *.0d0-ifys6woa(ayfnwr1v)))**2
       rxeqjn0y = rxeqjn0y + w(ayfnwr1v)*w(ayfnwr1v)
 23131 continue
+23132 continue
       qcpiaj7f = qcpiaj7f / rxeqjn0y
-      goto 23130
-23129 continue
+      else
       qcpiaj7f = 0.0d0
-      do 23133 ayfnwr1v = 1,kuzxj1lo 
+      do23133 ayfnwr1v = 1,kuzxj1lo 
       qcpiaj7f = qcpiaj7f+ifys6woa(ayfnwr1v)
 23133 continue
+23134 continue
       qcpiaj7f = 3.0d0 + (pjb6wfoq-qcpiaj7f)**2
-23130 continue
-23126 continue
+      endif
+      endif
       return
       end
-      subroutine gt9iulbf(he7mqnvy,ghz9vuba,w,gkdx5jal, rvy1fpli,
-     &kuzxj1lo, bhcji9glto,zvau2lct,f6lsuzax,fvh2rwtc,dcfir2no)
+      subroutine gt9iulbf(he7mqnvy,ghz9vuba,w,gkdx5jal, rvy1fpli,kuzxj1l
+     *o, bhcji9glto,zvau2lct,f6lsuzax,fvh2rwtc,dcfir2no)
       implicit logical (a-z)
       integer rvy1fpli,kuzxj1lo
       double precision he7mqnvy(rvy1fpli),ghz9vuba(rvy1fpli),w(rvy1fpli)
-     &,gkdx5jal(kuzxj1lo+4), bhcji9glto(kuzxj1lo), zvau2lct(kuzxj1lo),
-     &f6lsuzax(kuzxj1lo),fvh2rwtc(kuzxj1lo),dcfir2no(kuzxj1lo)
+     *,gkdx5jal(kuzxj1lo+4), bhcji9glto(kuzxj1lo), zvau2lct(kuzxj1lo),f6
+     *lsuzax(kuzxj1lo),fvh2rwtc(kuzxj1lo),dcfir2no(kuzxj1lo)
       double precision qaltf0nz,g9fvdrbw(4,1),work(16)
       double precision w2svdbx3tk, wv2svdbx3tk
       integer yq6lorbx,ayfnwr1v,ilo,dqlr5bse,pqzfxw4i, nhnpt1zym1
@@ -539,66 +550,66 @@
       hbsl0gto = 1
       ifour4 = 4
       nhnpt1zym1 = kuzxj1lo + 1
-      do 23135 ayfnwr1v = 1,kuzxj1lo 
+      do23135 ayfnwr1v = 1,kuzxj1lo 
       bhcji9glto(ayfnwr1v) = 0.0d0 
       zvau2lct(ayfnwr1v) = 0.0d0 
       f6lsuzax(ayfnwr1v) = 0.0d0
       fvh2rwtc(ayfnwr1v) = 0.0d0 
       dcfir2no(ayfnwr1v) = 0.0d0
 23135 continue
+23136 continue
       ilo = 1
       qaltf0nz = 0.1d-9
-      do 23137 ayfnwr1v = 1,rvy1fpli 
+      do23137 ayfnwr1v = 1,rvy1fpli 
       call vinterv(gkdx5jal(1), nhnpt1zym1 ,he7mqnvy(ayfnwr1v),dqlr5bse,
-     &pqzfxw4i)
-      if(.not.(pqzfxw4i .eq. 1))goto 23139
-      if(.not.(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz)))
-     &goto 23141
+     *pqzfxw4i)
+      if(pqzfxw4i .eq. 1)then
+      if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then
       dqlr5bse = dqlr5bse-1
-      goto 23142
-23141 continue
+      else
       return
-23142 continue
-23139 continue
-      call vbsplvd(gkdx5jal,ifour4,he7mqnvy(ayfnwr1v),dqlr5bse,work,
-     &g9fvdrbw,hbsl0gto)
+      endif
+      endif
+      call vbsplvd(gkdx5jal,ifour4,he7mqnvy(ayfnwr1v),dqlr5bse,work,g9fv
+     *drbw,hbsl0gto)
       yq6lorbx = dqlr5bse-4+1
       w2svdbx3tk = w(ayfnwr1v)**2
       wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(1,1)
-      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*
-     &ghz9vuba(ayfnwr1v)
-      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(1,
-     &1)
-      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,
-     &1)
-      fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,
-     &1)
-      dcfir2no(yq6lorbx) = dcfir2no(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,
-     &1)
+      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba
+     *(ayfnwr1v)
+      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(1,1
+     *)
+      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,1
+     *)
+      fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1
+     *)
+      dcfir2no(yq6lorbx) = dcfir2no(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1
+     *)
       yq6lorbx = dqlr5bse-4+2
       wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(2,1)
-      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*
-     &ghz9vuba(ayfnwr1v)
-      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,
-     &1)
-      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,
-     &1)
-      fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,
-     &1)
+      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba
+     *(ayfnwr1v)
+      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,1
+     *)
+      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1
+     *)
+      fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1
+     *)
       yq6lorbx = dqlr5bse-4+3
       wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(3,1)
-      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*
-     &ghz9vuba(ayfnwr1v)
-      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,
-     &1)
-      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,
-     &1)
+      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba
+     *(ayfnwr1v)
+      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1
+     *)
+      f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1
+     *)
       yq6lorbx = dqlr5bse
       wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(4,1)
-      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*
-     &ghz9vuba(ayfnwr1v)
-      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,
-     &1)
+      bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba
+     *(ayfnwr1v)
+      zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1
+     *)
 23137 continue
+23138 continue
       return
       end
diff --git a/src/rgam3.c b/src/rgam3.c
index cc81eee..1fe0b1d 100644
--- a/src/rgam3.c
+++ b/src/rgam3.c
@@ -290,7 +290,8 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
           goto a3bdsirf;
         }
         if (wkumc9idp <= wkumc9idq * (wkumc9ida - wkumc9idx) ||
-            wkumc9idp >= wkumc9idq * (wkumc9idb - wkumc9idx))    goto a3bdsirf;
+            wkumc9idp >= wkumc9idq * (wkumc9idb - wkumc9idx))
+          goto a3bdsirf;
 
         wkumc9idd = wkumc9idp / wkumc9idq;
 
@@ -300,7 +301,8 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
         wkumc9idu = wkumc9idx + wkumc9idd;
 
         if (wkumc9idu - wkumc9ida < Tol2 ||
-            wkumc9idb - wkumc9idu < Tol2)  wkumc9idd = fsign(Tol1, wkumc9idxm - wkumc9idx);
+            wkumc9idb - wkumc9idu < Tol2)
+          wkumc9idd = fsign(Tol1, wkumc9idxm - wkumc9idx);
 
         goto ceqzd1hi50;
 
@@ -330,26 +332,29 @@ ceqzd1hi50: wkumc9idu = wkumc9idx +
             wkumc9idfu = 2.0e0 * yjpnro8d;
 
         if (wkumc9idfu <= wkumc9idfx) {
-            if (wkumc9idu >= wkumc9idx) wkumc9ida = wkumc9idx; else wkumc9idb = wkumc9idx;
-            wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw;
-            wkumc9idw = wkumc9idx; wkumc9idfw = wkumc9idfx;
-            wkumc9idx = wkumc9idu; wkumc9idfx = wkumc9idfu;
+          if (wkumc9idu >= wkumc9idx) wkumc9ida = wkumc9idx; else wkumc9idb = wkumc9idx;
+          wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw;
+          wkumc9idw = wkumc9idx; wkumc9idfw = wkumc9idfx;
+          wkumc9idx = wkumc9idu; wkumc9idfx = wkumc9idfu;
         } else {
-            if (wkumc9idu < wkumc9idx) wkumc9ida = wkumc9idu; else wkumc9idb = wkumc9idu;
-            if (wkumc9idfu <= wkumc9idfw || wkumc9idw == wkumc9idx) {
-                wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw;
-                wkumc9idw = wkumc9idu; wkumc9idfw = wkumc9idfu;
-            } else
-            if (wkumc9idfu <= wkumc9idfv || wkumc9idv == wkumc9idx || wkumc9idv == wkumc9idw){
-                wkumc9idv = wkumc9idu; wkumc9idfv = wkumc9idfu;
+        if (wkumc9idu < wkumc9idx) wkumc9ida = wkumc9idu; else wkumc9idb = wkumc9idu;
+          if (wkumc9idfu <= wkumc9idfw || wkumc9idw == wkumc9idx) {
+            wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw;
+            wkumc9idw = wkumc9idu; wkumc9idfw = wkumc9idfu;
+          } else
+            if (wkumc9idfu <= wkumc9idfv ||
+                wkumc9idv  == wkumc9idx  ||
+                wkumc9idv  == wkumc9idw) {
+              wkumc9idv  = wkumc9idu;
+              wkumc9idfv = wkumc9idfu;
             }
         }
     }
     L_End: bk3ymcih = 0.0e0;
 
-        *wbkq9zyi = wkumc9idx;
-        *qcpiaj7f = wkumc9idfx;
-        return;
+    *wbkq9zyi = wkumc9idx;
+    *qcpiaj7f = wkumc9idfx;
+    return;
 }
 
 
@@ -381,112 +386,112 @@ void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], d
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
 
 
-      F77_CALL(vinterv)(gkdx5jal, &nkplus1, gkdx5jal + ayfnwr1v-1, &dqlr5bse, &pqzfxw4i);
+    F77_CALL(vinterv)(gkdx5jal, &nkplus1, gkdx5jal + ayfnwr1v-1, &dqlr5bse, &pqzfxw4i);
 
-      F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v - 1, &dqlr5bse, ms0qypiw,
-                        g9fvdrbw, &bvsquk3z);
+    F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v - 1, &dqlr5bse, ms0qypiw,
+                      g9fvdrbw, &bvsquk3z);
 
-      for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) {
-          yw1[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4];
-       }
+    for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) {
+      yw1[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4];
+    }
 
-      F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v, &dqlr5bse, ms0qypiw,
-                        g9fvdrbw, &bvsquk3z);
+    F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v, &dqlr5bse, ms0qypiw,
+                      g9fvdrbw, &bvsquk3z);
 
-      for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) { 
-          yw2[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4] - yw1[gp1jxzuh-1];
+    for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) { 
+      yw2[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4] - yw1[gp1jxzuh-1];
+    }
+    wrk1 = gkdx5jal[ayfnwr1v] - gkdx5jal[ayfnwr1v-1];
+
+    if (dqlr5bse >= 4) {
+      for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) {
+        yq6lorbx = gp1jxzuh;
+        urohxe6t = dqlr5bse - 4 + gp1jxzuh;
+        xecbg0pf[urohxe6t-1] +=
+           wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                   yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 +
+                   yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        yq6lorbx = gp1jxzuh + 1;
+        if (yq6lorbx <= 4) {
+          z4grbpiq[urohxe6t-1] +=
+            wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                   (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                    yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                    yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
+        yq6lorbx = gp1jxzuh + 2;
+        if (yq6lorbx <= 4) {
+          d7glzhbj[urohxe6t-1] +=
+          wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                 (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                  yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
+        yq6lorbx = gp1jxzuh + 3;
+        if (yq6lorbx <= 4) {
+          v2eydbxs[urohxe6t-1] +=
+          wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                 (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                  yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
+      }
+    } else if (dqlr5bse == 3) {
+      for (gp1jxzuh = 1; gp1jxzuh <= 3; gp1jxzuh++) {
+        yq6lorbx = gp1jxzuh;
+        urohxe6t = dqlr5bse - 3 + gp1jxzuh;
+        xecbg0pf[urohxe6t-1] +=
+           wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                   yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                   yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        yq6lorbx = gp1jxzuh + 1;
+        if (yq6lorbx <= 3) {
+          z4grbpiq[urohxe6t-1] +=
+              wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                     (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                      yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                      yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
+        yq6lorbx = gp1jxzuh + 2;
+        if (yq6lorbx <= 3) {
+          d7glzhbj[urohxe6t-1] +=
+             wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                    (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                     yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                     yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
+      }
+    } else if (dqlr5bse == 2) {
+      for (gp1jxzuh = 1; gp1jxzuh <= 2; gp1jxzuh++) {
+        yq6lorbx = gp1jxzuh;
+        urohxe6t = dqlr5bse - 2 + gp1jxzuh;
+        xecbg0pf[urohxe6t-1] +=
+          wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                 (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                  yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        yq6lorbx = gp1jxzuh + 1;
+        if (yq6lorbx <= 2) {
+          z4grbpiq[urohxe6t-1] +=
+            wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                   (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                    yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                    yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
+        }
       }
-      wrk1 = gkdx5jal[ayfnwr1v] - gkdx5jal[ayfnwr1v-1];
-
-      if (dqlr5bse >= 4) {
-          for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) {
-              yq6lorbx = gp1jxzuh;
-              urohxe6t = dqlr5bse - 4 + gp1jxzuh;
-              xecbg0pf[urohxe6t-1] +=
-                 wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                         yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 +
-                         yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              yq6lorbx = gp1jxzuh + 1;
-              if (yq6lorbx <= 4) {
-                 z4grbpiq[urohxe6t-1] +=
-                 wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                         yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                         yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-              yq6lorbx = gp1jxzuh + 2;
-              if (yq6lorbx <= 4) {
-                 d7glzhbj[urohxe6t-1] +=
-                 wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                         yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                         yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-              yq6lorbx = gp1jxzuh + 3;
-              if (yq6lorbx <= 4) {
-                 v2eydbxs[urohxe6t-1] +=
-                 wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                         yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                         yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-          }
-      } else if (dqlr5bse == 3) {
-          for (gp1jxzuh = 1; gp1jxzuh <= 3; gp1jxzuh++) {
-              yq6lorbx = gp1jxzuh;
-              urohxe6t = dqlr5bse - 3 + gp1jxzuh;
-              xecbg0pf[urohxe6t-1] +=
-                 wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                         yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                         yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              yq6lorbx = gp1jxzuh + 1;
-              if (yq6lorbx <= 3) {
-                 z4grbpiq[urohxe6t-1] +=
-                     wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                            (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                             yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                             yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-              yq6lorbx = gp1jxzuh + 2;
-              if (yq6lorbx <= 3) {
-                  d7glzhbj[urohxe6t-1] +=
-                     wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                            (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                             yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                             yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-          }
-      } else if (dqlr5bse == 2) {
-          for (gp1jxzuh = 1; gp1jxzuh <= 2; gp1jxzuh++) {
-              yq6lorbx = gp1jxzuh;
-              urohxe6t = dqlr5bse - 2 + gp1jxzuh;
-              xecbg0pf[urohxe6t-1] +=
-                wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                       (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                        yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              yq6lorbx = gp1jxzuh + 1;
-              if (yq6lorbx <= 2) {
-                  z4grbpiq[urohxe6t-1] +=
-                    wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                           (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                            yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                            yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-              }
-          }
-      } else if (dqlr5bse == 1) {
-          for (gp1jxzuh = 1; gp1jxzuh <= 1; gp1jxzuh++) {
-              yq6lorbx = gp1jxzuh;
-              urohxe6t = dqlr5bse - 1 + gp1jxzuh;
-              xecbg0pf[urohxe6t-1] +=
-                wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                       (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
-                        yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
-                        yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
-          }
+    } else if (dqlr5bse == 1) {
+      for (gp1jxzuh = 1; gp1jxzuh <= 1; gp1jxzuh++) {
+        yq6lorbx = gp1jxzuh;
+        urohxe6t = dqlr5bse - 1 + gp1jxzuh;
+        xecbg0pf[urohxe6t-1] +=
+          wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                 (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] +
+                  yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50  +
+                  yw2[gp1jxzuh-1]*yw2[yq6lorbx-1]  * othird);
       }
+    }
   }
 }
 
@@ -515,81 +520,81 @@ void n5aioudkvmnweiy2(double buhyalv4[], double fulcp8wa[], double plj0trqx[], i
   wjm1[0] = 0.0e0;
 
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-      yq6lorbx = *acpios9q - ayfnwr1v + 1;
-      c0 = 1.0e0 / buhyalv4[3 + (yq6lorbx-1) * *xtov9rbf];
-      if (yq6lorbx <= (*acpios9q-3)) {
-          c1 = buhyalv4[0 + (yq6lorbx+2) * *xtov9rbf] * c0;
-          c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0;
-          c3 = buhyalv4[2 + (yq6lorbx+0) * *xtov9rbf] * c0;
-      } else if (yq6lorbx == (*acpios9q - 2)) {
-          c1 = 0.0e0;
-          c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0;
-          c3 = buhyalv4[2 +  yq6lorbx    * *xtov9rbf] * c0;
-      } else if (yq6lorbx == (*acpios9q - 1)) {
-          c1 =
-          c2 = 0.0e0;
-          c3 = buhyalv4[2 +  yq6lorbx    * *xtov9rbf] * c0;
-      } else if (yq6lorbx ==  *acpios9q) {
-          c1 =
-          c2 =
-          c3 = 0.0e0;
-      }
+    yq6lorbx = *acpios9q - ayfnwr1v + 1;
+    c0 = 1.0e0 / buhyalv4[3 + (yq6lorbx-1) * *xtov9rbf];
+    if (yq6lorbx <= (*acpios9q-3)) {
+      c1 = buhyalv4[0 + (yq6lorbx+2) * *xtov9rbf] * c0;
+      c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0;
+      c3 = buhyalv4[2 + (yq6lorbx+0) * *xtov9rbf] * c0;
+    } else if (yq6lorbx == (*acpios9q - 2)) {
+      c1 = 0.0e0;
+      c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0;
+      c3 = buhyalv4[2 +  yq6lorbx    * *xtov9rbf] * c0;
+    } else if (yq6lorbx == (*acpios9q - 1)) {
+      c1 =
+      c2 = 0.0e0;
+      c3 = buhyalv4[2 +  yq6lorbx    * *xtov9rbf] * c0;
+    } else if (yq6lorbx ==  *acpios9q) {
+      c1 =
+      c2 =
+      c3 = 0.0e0;
+    }
 
-      pcsuow9k = c1 * wjm3[0];
-      qdbgu6oi = c2 * wjm3[1];
-      upwkh5xz = c3 * wjm3[2];
-      rul5fnyd = c1 * wjm3[1];
-      ueydbrg6 = c2 * wjm2[0];
-      plce2srm = c3 * wjm2[1];
-      k3yvomnh = c1 * wjm3[2];
-      bfdjhu7l = c2 * wjm2[1];
-      ctfvwdu0 = c3 * wjm1[0];
-      fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (pcsuow9k+qdbgu6oi+upwkh5xz);
-      fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (rul5fnyd+ueydbrg6+plce2srm);
-      fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (k3yvomnh+bfdjhu7l+ctfvwdu0);
-
-      fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] = pow(c0, (double) 2.0) +
-                  c1 * (pcsuow9k + 2.0e0 * (qdbgu6oi + upwkh5xz)) +
-                  c2 * (ueydbrg6 + 2.0e0 *  plce2srm) +
-                  c3 *  ctfvwdu0;
-
-
-      wjm3[0] = wjm2[0];
-      wjm3[1] = wjm2[1];
-      wjm3[2] = fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf];
-      wjm2[0] = wjm1[0];
-      wjm2[1] = fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf];
-      wjm1[0] = fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf];
+    pcsuow9k = c1 * wjm3[0];
+    qdbgu6oi = c2 * wjm3[1];
+    upwkh5xz = c3 * wjm3[2];
+    rul5fnyd = c1 * wjm3[1];
+    ueydbrg6 = c2 * wjm2[0];
+    plce2srm = c3 * wjm2[1];
+    k3yvomnh = c1 * wjm3[2];
+    bfdjhu7l = c2 * wjm2[1];
+    ctfvwdu0 = c3 * wjm1[0];
+    fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (pcsuow9k+qdbgu6oi+upwkh5xz);
+    fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (rul5fnyd+ueydbrg6+plce2srm);
+    fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (k3yvomnh+bfdjhu7l+ctfvwdu0);
+
+    fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] = pow(c0, (double) 2.0) +
+                c1 * (pcsuow9k + 2.0e0 * (qdbgu6oi + upwkh5xz)) +
+                c2 * (ueydbrg6 + 2.0e0 *  plce2srm) +
+                c3 *  ctfvwdu0;
+
+
+    wjm3[0] = wjm2[0];
+    wjm3[1] = wjm2[1];
+    wjm3[2] = fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf];
+    wjm2[0] = wjm1[0];
+    wjm2[1] = fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf];
+    wjm1[0] = fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf];
   }
 
 
   if (*iflag == 0) {
-      return;
+    return;
   }
-      Rprintf("plj0trqx must not be a double of length one!\n");
-
-      for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-        yq6lorbx = *acpios9q - ayfnwr1v + 1;
-        for (gp1jxzuh = 1; gp1jxzuh <= 4 &&
-                         yq6lorbx + gp1jxzuh-1 <= *acpios9q; gp1jxzuh++) {
-             plj0trqx[yq6lorbx-1 + (yq6lorbx+gp1jxzuh-2) * *wep0oibc] =
-             fulcp8wa[4-gp1jxzuh + (yq6lorbx-1)        * *xtov9rbf];
-        }
+  Rprintf("plj0trqx must not be a double of length one!\n");
+
+    for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
+      yq6lorbx = *acpios9q - ayfnwr1v + 1;
+      for (gp1jxzuh = 1; gp1jxzuh <= 4 &&
+                       yq6lorbx + gp1jxzuh-1 <= *acpios9q; gp1jxzuh++) {
+           plj0trqx[yq6lorbx-1 + (yq6lorbx+gp1jxzuh-2) * *wep0oibc] =
+           fulcp8wa[4-gp1jxzuh + (yq6lorbx-1)        * *xtov9rbf];
       }
+    }
 
-      for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-          yq6lorbx = *acpios9q - ayfnwr1v + 1;
-          for (gp1jxzuh = yq6lorbx-4; gp1jxzuh >= 1; gp1jxzuh--) {
-              c0 = 1.0 / buhyalv4[3 + (gp1jxzuh-1) * *xtov9rbf];
-              c1 = buhyalv4[0 + (gp1jxzuh+2) * *xtov9rbf] * c0;
-              c2 = buhyalv4[1 + (gp1jxzuh+1) * *xtov9rbf] * c0;
-              c3 = buhyalv4[2 +  gp1jxzuh    * *xtov9rbf] * c0;
-                       plj0trqx[gp1jxzuh-1 + (yq6lorbx-1) * *wep0oibc] = 0.0e0 -
-                ( c1 * plj0trqx[gp1jxzuh+2 + (yq6lorbx-1) * *wep0oibc] +
-                  c2 * plj0trqx[gp1jxzuh+1 + (yq6lorbx-1) * *wep0oibc] +
-                  c3 * plj0trqx[gp1jxzuh   + (yq6lorbx-1) * *wep0oibc] );
-          }
+    for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
+      yq6lorbx = *acpios9q - ayfnwr1v + 1;
+      for (gp1jxzuh = yq6lorbx-4; gp1jxzuh >= 1; gp1jxzuh--) {
+        c0 = 1.0 / buhyalv4[3 + (gp1jxzuh-1) * *xtov9rbf];
+        c1 = buhyalv4[0 + (gp1jxzuh+2) * *xtov9rbf] * c0;
+        c2 = buhyalv4[1 + (gp1jxzuh+1) * *xtov9rbf] * c0;
+        c3 = buhyalv4[2 +  gp1jxzuh    * *xtov9rbf] * c0;
+                 plj0trqx[gp1jxzuh-1 + (yq6lorbx-1) * *wep0oibc] = 0.0e0 -
+          ( c1 * plj0trqx[gp1jxzuh+2 + (yq6lorbx-1) * *wep0oibc] +
+            c2 * plj0trqx[gp1jxzuh+1 + (yq6lorbx-1) * *wep0oibc] +
+            c3 * plj0trqx[gp1jxzuh   + (yq6lorbx-1) * *wep0oibc] );
       }
+    }
 }
 
 
@@ -648,10 +653,10 @@ void n5aioudkwmhctl9x(double *qgnl3toc, double sjwyig9t[],
   }
   F77_CALL(dpbsl8)(buhyalv4, xtov9rbf, acpios9q, &bvsquk3z, rpyis2kc);
 
-      chw8lzty = sjwyig9t;  qnwamo0e1 = imdvf4hx;
+  chw8lzty = sjwyig9t;  qnwamo0e1 = imdvf4hx;
   for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) {
-      F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr,
-                        chw8lzty++, &yu6izdrc, qnwamo0e1++);
+    F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr,
+                      chw8lzty++, &yu6izdrc, qnwamo0e1++);
   }
 
 
diff --git a/src/specfun3.c b/src/specfun3.c
new file mode 100644
index 0000000..6f89273
--- /dev/null
+++ b/src/specfun3.c
@@ -0,0 +1,93 @@
+
+
+
+
+
+#include<math.h>
+#include<stdio.h>
+#include<stdlib.h>
+#include<R.h>
+#include<Rmath.h>
+
+
+
+void sf_C_expint(double *x, int *size, double *bzmd6ftv);
+void sf_C_expexpint(double *x, int *size, double *bzmd6ftv);
+void sf_C_expint_e1(double *x, int *size, double *bzmd6ftv);
+void VGAM_C_kend_tau(double *x, double *y, int *f8yswcat, double *bqelz3cy);
+
+
+void F77_NAME(einlib)(double*, double*);
+void F77_NAME(expeinl)(double*, double*);
+void F77_NAME(eonenl)(double*, double*);
+
+
+
+
+
+
+
+void sf_C_expint(double *x,
+                 int *size,
+                 double *bzmd6ftv) {
+  int ayfnwr1v;
+
+  for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++)
+    F77_NAME(einlib)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v);
+}
+
+
+
+void sf_C_expexpint(double *x,
+                    int *size,
+                    double *bzmd6ftv) {
+  int ayfnwr1v;
+
+  for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++)
+    F77_NAME(expeinl)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v);
+}
+
+
+
+void sf_C_expint_e1(double *x,
+                    int *size,
+                    double *bzmd6ftv) {
+  int ayfnwr1v;
+
+  for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++)
+    F77_NAME(eonenl)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v);
+}
+
+
+
+
+void VGAM_C_kend_tau(double *x, double *y, int *f8yswcat, double *bqelz3cy) {
+
+
+  int ayfnwr1v, yq6lorbx, gp1jxzuh = *f8yswcat    ;
+  double q6zdcwxk1, q6zdcwxk2;
+
+  for (ayfnwr1v = 0; ayfnwr1v < 3; ayfnwr1v++)
+    bqelz3cy[ayfnwr1v] = 0.0;
+
+  for (ayfnwr1v = 0; ayfnwr1v < gp1jxzuh; ayfnwr1v++) {
+    for (yq6lorbx = ayfnwr1v + 1; yq6lorbx < *f8yswcat; yq6lorbx++) {
+      q6zdcwxk1 = x[ayfnwr1v] - x[yq6lorbx];
+      q6zdcwxk2 = y[ayfnwr1v] - y[yq6lorbx];
+
+      if (q6zdcwxk1 == 0.0 || q6zdcwxk2 == 0.0) {
+        bqelz3cy[1] += 1.0;
+      } else if ((q6zdcwxk1 < 0.0 && q6zdcwxk2 < 0.0) ||
+                 (q6zdcwxk1 > 0.0 && q6zdcwxk2 > 0.0)) {
+        bqelz3cy[0] += 1.0;
+      } else {
+        bqelz3cy[2] += 1.0;
+      }
+    }
+  }
+}
+
+
+
+
+
diff --git a/src/tyeepolygamma.f b/src/tyeepolygamma.f
new file mode 100644
index 0000000..775303a
--- /dev/null
+++ b/src/tyeepolygamma.f
@@ -0,0 +1,153 @@
+C Output from Public domain Ratfor, version 1.01
+      subroutine vdgam1(x, lfu2qhid, dvhw1ulq)
+      implicit logical (a-z)
+      double precision x, lfu2qhid
+      integer dvhw1ulq
+      double precision w, series, obr6tcex
+      dvhw1ulq = 1
+      if(x .le. 0.0d0)then
+      dvhw1ulq = 0
+      return
+      endif
+      if(x .lt. 6.0d0)then
+      call vdgam2(x + 6.0d0, obr6tcex, dvhw1ulq)
+      lfu2qhid = obr6tcex - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0
+     *d0) - 1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0)
+      return
+      endif
+      w = 1.0d0 / (x * x)
+      series = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.0
+     *d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w * 
+     *(691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.0d
+     *0)))))))))))))))))))))
+      lfu2qhid = ( dlog(x) - 0.5d0/x + series )
+      return
+      end
+      subroutine vdgam2(x, lfu2qhid, dvhw1ulq)
+      implicit logical (a-z)
+      double precision x, lfu2qhid
+      integer dvhw1ulq
+      double precision w, series, obr6tcex
+      dvhw1ulq = 1
+      if(x .le. 0.0d0)then
+      dvhw1ulq = 0
+      return
+      endif
+      if(x .lt. 6.0d0)then
+      call vdgam1(x + 6.0d0, obr6tcex, dvhw1ulq)
+      lfu2qhid = obr6tcex - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0
+     *d0) - 1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0)
+      return
+      endif
+      w = 1.0d0 / (x * x)
+      series = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.0
+     *d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w * 
+     *(691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.0d
+     *0)))))))))))))))))))))
+      lfu2qhid = ( dlog(x) - 0.5d0/x + series )
+      return
+      end
+      subroutine vtgam1(x, lfu2qhid, dvhw1ulq)
+      implicit logical (a-z)
+      double precision x, lfu2qhid
+      integer dvhw1ulq
+      double precision w, series, obr6tcex
+      dvhw1ulq = 1
+      if(x .le. 0.0d0)then
+      dvhw1ulq = 0
+      return
+      endif
+      if(x .lt. 6.0d0)then
+      call vtgam2(x + 6.0d0, obr6tcex, dvhw1ulq)
+      lfu2qhid = obr6tcex + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x
+     * + 2.0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0
+     */(x + 5.0d0)**2
+      return
+      endif
+      w = 1.0d0 / (x * x)
+      series = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (1
+     *.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-69
+     *1.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0)))))))
+     *)))))))
+      lfu2qhid = 0.5d0 * w + series / x
+      return
+      end
+      subroutine vtgam2(x, lfu2qhid, dvhw1ulq)
+      implicit logical (a-z)
+      double precision x, lfu2qhid
+      integer dvhw1ulq
+      double precision w, series, obr6tcex
+      dvhw1ulq = 1
+      if(x .le. 0.0d0)then
+      dvhw1ulq = 0
+      return
+      endif
+      if(x .lt. 6.0d0)then
+      call vtgam1(x + 6.0d0, obr6tcex, dvhw1ulq)
+      lfu2qhid = obr6tcex + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x
+     * + 2.0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0
+     */(x + 5.0d0)**2
+      return
+      endif
+      w = 1.0d0 / (x * x)
+      series = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (1
+     *.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-69
+     *1.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0)))))))
+     *)))))))
+      lfu2qhid = 0.5d0 * w + series / x
+      return
+      end
+      subroutine dgam1w(x, lfu2qhid, n, dvhw1ulq)
+      implicit logical (a-z)
+      integer n, dvhw1ulq
+      double precision x(n), lfu2qhid(n)
+      integer i, okobr6tcex
+      dvhw1ulq = 1
+      do23016 i=1,n 
+      call vdgam1(x(i), lfu2qhid(i), okobr6tcex)
+      if(okobr6tcex .ne. 1)then
+      dvhw1ulq = okobr6tcex
+      endif
+23016 continue
+23017 continue
+      return
+      end
+      subroutine tgam1w(x, lfu2qhid, n, dvhw1ulq)
+      implicit logical (a-z)
+      integer n, dvhw1ulq
+      double precision x(n), lfu2qhid(n)
+      integer i, okobr6tcex
+      dvhw1ulq = 1
+      do23020 i=1,n 
+      call vtgam1(x(i), lfu2qhid(i), okobr6tcex)
+      if(okobr6tcex .ne. 1)then
+      dvhw1ulq = okobr6tcex
+      endif
+23020 continue
+23021 continue
+      return
+      end
+      subroutine cum8sum(ci1oyxas, lfu2qhid, nlfu2qhid, valong, ntot, no
+     *tdvhw1ulq)
+      implicit logical (a-z)
+      integer nlfu2qhid, ntot, notdvhw1ulq
+      double precision ci1oyxas(ntot), lfu2qhid(nlfu2qhid), valong(ntot)
+      integer ayfnwr1v, iii
+      iii = 1
+      lfu2qhid(iii) = ci1oyxas(iii)
+      do23024 ayfnwr1v=2,ntot 
+      if(valong(ayfnwr1v) .gt. valong(ayfnwr1v-1))then
+      lfu2qhid(iii) = lfu2qhid(iii) + ci1oyxas(ayfnwr1v)
+      else
+      iii = iii + 1
+      lfu2qhid(iii) = ci1oyxas(ayfnwr1v)
+      endif
+23024 continue
+23025 continue
+      if(iii .eq. nlfu2qhid)then
+      notdvhw1ulq = 0
+      else
+      notdvhw1ulq = 1
+      endif
+      return
+      end
diff --git a/src/vgam.f b/src/vgam.f
index f689f0c..964ef3a 100644
--- a/src/vgam.f
+++ b/src/vgam.f
@@ -1,138 +1,145 @@
+C Output from Public domain Ratfor, version 1.01
       subroutine vbvs(kuzxj1lo,ankcghz2,rpyis2kc,nk,he7mqnvy,smat,order,
-     &wy1vqfzu)
+     *wy1vqfzu)
       integer kuzxj1lo, nk, order, wy1vqfzu
-      double precision ankcghz2(nk+4), rpyis2kc(nk,wy1vqfzu), he7mqnvy(
-     &kuzxj1lo), smat(kuzxj1lo,wy1vqfzu)
+      double precision ankcghz2(nk+4), rpyis2kc(nk,wy1vqfzu), he7mqnvy(k
+     *uzxj1lo), smat(kuzxj1lo,wy1vqfzu)
       double precision chw8lzty
       integer ayfnwr1v, yq6lorbx, ifour4
       ifour4 = 4
-      do 23000 yq6lorbx=1,wy1vqfzu 
-      do 23002 ayfnwr1v=1,kuzxj1lo 
+      do23000 yq6lorbx=1,wy1vqfzu 
+      do23002 ayfnwr1v=1,kuzxj1lo 
       chw8lzty = he7mqnvy(ayfnwr1v)
       call wbvalue(ankcghz2, rpyis2kc(1,yq6lorbx), nk, ifour4, chw8lzty,
-     & order, smat(ayfnwr1v,yq6lorbx))
+     * order, smat(ayfnwr1v,yq6lorbx))
 23002 continue
+23003 continue
 23000 continue
+23001 continue
       return
       end
       subroutine tfeswo7c(osiz4fxy, nk, wy1vqfzu, ldk, wbkq9zyi, sgmat)
       implicit logical (a-z)
       integer nk, wy1vqfzu, ldk
-      double precision osiz4fxy(ldk,nk*wy1vqfzu), wbkq9zyi(wy1vqfzu), 
-     &sgmat(nk,4)
+      double precision osiz4fxy(ldk,nk*wy1vqfzu), wbkq9zyi(wy1vqfzu), sg
+     *mat(nk,4)
       integer ayfnwr1v, yq6lorbx
-      do 23004 ayfnwr1v=1,nk 
-      do 23006 yq6lorbx=1,wy1vqfzu 
-      osiz4fxy(ldk,(ayfnwr1v-1)*wy1vqfzu+yq6lorbx) = osiz4fxy(ldk,(
-     &ayfnwr1v-1)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) * sgmat(
-     &ayfnwr1v,1)
+      do23004 ayfnwr1v=1,nk 
+      do23006 yq6lorbx=1,wy1vqfzu 
+      osiz4fxy(ldk,(ayfnwr1v-1)*wy1vqfzu+yq6lorbx) = osiz4fxy(ldk,(ayfnw
+     *r1v-1)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) * sgmat(ayfnwr1v,1)
 23006 continue
+23007 continue
 23004 continue
-      do 23008 ayfnwr1v=1,(nk-1) 
-      do 23010 yq6lorbx=1,wy1vqfzu 
-      osiz4fxy(ldk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) = osiz4fxy(
-     &ldk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) 
-     &* sgmat(ayfnwr1v,2)
+23005 continue
+      do23008 ayfnwr1v=1,(nk-1) 
+      do23010 yq6lorbx=1,wy1vqfzu 
+      osiz4fxy(ldk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) = osiz4fxy(l
+     *dk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) *
+     * sgmat(ayfnwr1v,2)
 23010 continue
+23011 continue
 23008 continue
-      do 23012 ayfnwr1v=1,(nk-2) 
-      do 23014 yq6lorbx=1,wy1vqfzu 
-      osiz4fxy(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) = 
-     &osiz4fxy(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) + 
-     &wbkq9zyi(yq6lorbx) * sgmat(ayfnwr1v,3)
+23009 continue
+      do23012 ayfnwr1v=1,(nk-2) 
+      do23014 yq6lorbx=1,wy1vqfzu 
+      osiz4fxy(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) = osiz4fxy
+     *(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorb
+     *x) * sgmat(ayfnwr1v,3)
 23014 continue
+23015 continue
 23012 continue
-      do 23016 ayfnwr1v=1,(nk-3) 
-      do 23018 yq6lorbx=1,wy1vqfzu 
-      osiz4fxy(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) = 
-     &osiz4fxy(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) + 
-     &wbkq9zyi(yq6lorbx) * sgmat(ayfnwr1v,4)
+23013 continue
+      do23016 ayfnwr1v=1,(nk-3) 
+      do23018 yq6lorbx=1,wy1vqfzu 
+      osiz4fxy(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) = osiz4fxy
+     *(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorb
+     *x) * sgmat(ayfnwr1v,4)
 23018 continue
+23019 continue
 23016 continue
+23017 continue
       return
       end
-      subroutine ybnagt8k(iii, cz8qdfyj, tesdm5kv, g9fvdrbw, osiz4fxy, 
-     &wmat, kxvq6sfw, nyfu9rod, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, 
-     &tgiyxdw1, dufozmt7)
+      subroutine ybnagt8k(iii, cz8qdfyj, tesdm5kv, g9fvdrbw, osiz4fxy, w
+     *mat, kxvq6sfw, nyfu9rod, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxd
+     *w1, dufozmt7)
       implicit logical (a-z)
-      integer iii, cz8qdfyj, tesdm5kv, kxvq6sfw, nyfu9rod, wy1vqfzu, 
-     &ldk, dimw, kuzxj1lo, nk, tgiyxdw1(*), dufozmt7(*)
-      double precision g9fvdrbw(4,*), osiz4fxy(ldk, nk*wy1vqfzu), wmat(
-     &kuzxj1lo,dimw)
+      integer iii, cz8qdfyj, tesdm5kv, kxvq6sfw, nyfu9rod, wy1vqfzu, ldk
+     *, dimw, kuzxj1lo, nk, tgiyxdw1(*), dufozmt7(*)
+      double precision g9fvdrbw(4,*), osiz4fxy(ldk, nk*wy1vqfzu), wmat(k
+     *uzxj1lo,dimw)
       double precision obr6tcex
       integer urohxe6t, nead, bcol, brow, biuvowq2, nbj8tdsk
       bcol = cz8qdfyj + tesdm5kv
       brow = cz8qdfyj
-      do 23020 urohxe6t=1,dimw 
-      obr6tcex = wmat(iii,urohxe6t) * g9fvdrbw(kxvq6sfw,1) * g9fvdrbw(
-     &nyfu9rod,1)
+      do23020 urohxe6t=1,dimw 
+      obr6tcex = wmat(iii,urohxe6t) * g9fvdrbw(kxvq6sfw,1) * g9fvdrbw(ny
+     *fu9rod,1)
       biuvowq2 = (brow-1)*wy1vqfzu + tgiyxdw1(urohxe6t)
       nbj8tdsk = (bcol-1)*wy1vqfzu + dufozmt7(urohxe6t)
       nead = nbj8tdsk - biuvowq2
-      osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + 
-     &obr6tcex
-      if(.not.(tesdm5kv .gt. 0 .and. dufozmt7(urohxe6t) .ne. tgiyxdw1(
-     &urohxe6t)))goto 23022
+      osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + obr6
+     *tcex
+      if(tesdm5kv .gt. 0 .and. dufozmt7(urohxe6t) .ne. tgiyxdw1(urohxe6t
+     *))then
       biuvowq2 = (brow-1)*wy1vqfzu + dufozmt7(urohxe6t)
       nbj8tdsk = (bcol-1)*wy1vqfzu + tgiyxdw1(urohxe6t)
       nead = nbj8tdsk - biuvowq2
-      osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + 
-     &obr6tcex
-23022 continue
+      osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + obr6
+     *tcex
+      endif
 23020 continue
+23021 continue
       return
       end
-      subroutine vsplin(he7mqnvy,rbne6ouj,wmat,kuzxj1lo,gkdx5jal, nk,
-     &ldk,wy1vqfzu,dimw, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, 
-     &t8hwvalr, rpyis2kc, osiz4fxy, btwy, sgdub, ui8ysltq, yzoe1rsp, 
-     &bmb, ifys6woa, dof, scrtch, fbd5yktj, truen)
+      subroutine vsplin(he7mqnvy,rbne6ouj,wmat,kuzxj1lo,gkdx5jal, nk,ldk
+     *,wy1vqfzu,dimw, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, t8hwvalr,
+     * rpyis2kc, osiz4fxy, btwy, sgdub, ui8ysltq, yzoe1rsp, bmb, ifys6wo
+     *a, dof, scrtch, fbd5yktj, truen)
       implicit logical (a-z)
       integer kuzxj1lo, nk, ldk, wy1vqfzu, dimw, tgiyxdw1(*), dufozmt7(*
-     &), info, fbd5yktj, truen
+     *), info, fbd5yktj, truen
       integer yzoe1rsp
       double precision he7mqnvy(kuzxj1lo), rbne6ouj(kuzxj1lo,wy1vqfzu), 
-     &wmat(kuzxj1lo,dimw), gkdx5jal(nk+4), wkmm(wy1vqfzu,wy1vqfzu,16), 
-     &wbkq9zyi(wy1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu), rpyis2kc(nk,
-     &wy1vqfzu), osiz4fxy(ldk,nk*wy1vqfzu), btwy(wy1vqfzu,nk)
-      double precision sgdub(nk,wy1vqfzu), ui8ysltq(truen,wy1vqfzu), 
-     &bmb(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu), dof(wy1vqfzu)
-     &, scrtch(*)
+     *wmat(kuzxj1lo,dimw), gkdx5jal(nk+4), wkmm(wy1vqfzu,wy1vqfzu,16), w
+     *bkq9zyi(wy1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu), rpyis2kc(nk,wy1vqf
+     *zu), osiz4fxy(ldk,nk*wy1vqfzu), btwy(wy1vqfzu,nk)
+      double precision sgdub(nk,wy1vqfzu), ui8ysltq(truen,wy1vqfzu), bmb
+     *(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu), dof(wy1vqfzu), s
+     *crtch(*)
       integer yq6lorbx, ayfnwr1v, dqlr5bse, pqzfxw4i, urohxe6t, icrit
       integer gp0xjetb, e5knafcg, wep0oibc, l3zpbstu(3), ispar, i1loc
-      double precision qaltf0nz, g9fvdrbw(4,1), ms0qypiw(16), penalt, 
-     &qcpiaj7f, fp6nozvx, waiez6nt, toldf, parms(3)
-      do 23024 yq6lorbx=1,wy1vqfzu 
-      if(.not.(wbkq9zyi(yq6lorbx) .eq. 0.0d0))goto 23026
+      double precision qaltf0nz, g9fvdrbw(4,1), ms0qypiw(16), penalt, qc
+     *piaj7f, fp6nozvx, waiez6nt, toldf, parms(3)
+      do23024 yq6lorbx=1,wy1vqfzu 
+      if(wbkq9zyi(yq6lorbx) .eq. 0.0d0)then
       ispar=0
       icrit=3
-      goto 23027
-23026 continue
+      else
       ispar=1
       icrit=1
-23027 continue
-      if(.not.((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu) .or. (ispar 
-     &.eq. 0)))goto 23028
+      endif
+      if((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu) .or. (ispar .eq. 0))t
+     *hen
       e5knafcg = 4
       fp6nozvx = 1.50d0
       waiez6nt = 0.00d0
       wep0oibc = 1
       toldf=0.001d0
-      if(.not.(wy1vqfzu.eq.1))goto 23030
+      if(wy1vqfzu.eq.1)then
       toldf=0.005d0
-      goto 23031
-23030 continue
-      if(.not.(wy1vqfzu.eq.2))goto 23032
+      else
+      if(wy1vqfzu.eq.2)then
       toldf=0.015d0
-      goto 23033
-23032 continue
-      if(.not.(wy1vqfzu.eq.3))goto 23034
+      else
+      if(wy1vqfzu.eq.3)then
       toldf=0.025d0
-      goto 23035
-23034 continue
+      else
       toldf=0.045d0
-23035 continue
-23033 continue
-23031 continue
+      endif
+      endif
+      endif
       l3zpbstu(1) = icrit
       l3zpbstu(2) = ispar
       l3zpbstu(3) = 300
@@ -140,398 +147,450 @@
       parms(2) = fp6nozvx
       parms(3) = toldf
       gp0xjetb=0
-      if(.not.((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu)))goto 23036
-      do 23038 ayfnwr1v=1,kuzxj1lo 
-      rbne6ouj(ayfnwr1v,yq6lorbx) = rbne6ouj(ayfnwr1v,yq6lorbx) / wmat(
-     &ayfnwr1v,yq6lorbx)
+      if((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu))then
+      do23038 ayfnwr1v=1,kuzxj1lo 
+      rbne6ouj(ayfnwr1v,yq6lorbx) = rbne6ouj(ayfnwr1v,yq6lorbx) / wmat(a
+     *yfnwr1v,yq6lorbx)
 23038 continue
-      call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, rbne6ouj(1,
-     &yq6lorbx), wmat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,
-     &yq6lorbx), t8hwvalr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,
-     &wbkq9zyi(yq6lorbx),parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,
-     &wep0oibc,fbd5yktj)
-      if(.not.(fbd5yktj .ne. 0))goto 23040
+23039 continue
+      call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, rbne6ouj(1,yq6lorbx
+     *), wmat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,yq6lorbx), t
+     *8hwvalr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,wbkq9zyi(yq6lo
+     *rbx),parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,wep0oibc,fbd5yktj)
+      if(fbd5yktj .ne. 0)then
       return
-23040 continue
-      do 23042 ayfnwr1v=1,kuzxj1lo 
+      endif
+      do23042 ayfnwr1v=1,kuzxj1lo 
       wmat(ayfnwr1v,yq6lorbx) = wmat(ayfnwr1v,yq6lorbx) * wmat(ayfnwr1v,
-     &yq6lorbx)
+     *yq6lorbx)
 23042 continue
-      if(.not.(yzoe1rsp .ne. 0))goto 23044
-      do 23046 ayfnwr1v=1,kuzxj1lo 
-      ui8ysltq(ayfnwr1v,yq6lorbx) = ifys6woa(ayfnwr1v,yq6lorbx) / wmat(
-     &ayfnwr1v,yq6lorbx)
+23043 continue
+      if(yzoe1rsp .ne. 0)then
+      do23046 ayfnwr1v=1,kuzxj1lo 
+      ui8ysltq(ayfnwr1v,yq6lorbx) = ifys6woa(ayfnwr1v,yq6lorbx) / wmat(a
+     *yfnwr1v,yq6lorbx)
 23046 continue
-23044 continue
-      goto 23037
-23036 continue
-      call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, btwy(1,yq6lorbx), 
-     &wmat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,yq6lorbx),
-     &t8hwvalr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,wbkq9zyi(
-     &yq6lorbx),parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,wep0oibc,
-     &fbd5yktj)
-      if(.not.(fbd5yktj .ne. 0))goto 23048
+23047 continue
+      endif
+      else
+      call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, btwy(1,yq6lorbx), w
+     *mat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,yq6lorbx),t8hwva
+     *lr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,wbkq9zyi(yq6lorbx),
+     *parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,wep0oibc,fbd5yktj)
+      if(fbd5yktj .ne. 0)then
       return
-23048 continue
-      do 23050 ayfnwr1v=1,kuzxj1lo 
+      endif
+      do23050 ayfnwr1v=1,kuzxj1lo 
       wmat(ayfnwr1v,yq6lorbx) = wmat(ayfnwr1v,yq6lorbx) * wmat(ayfnwr1v,
-     &yq6lorbx)
+     *yq6lorbx)
 23050 continue
-23037 continue
-      if(.not.(fbd5yktj .ne. 0))goto 23052
+23051 continue
+      endif
+      if(fbd5yktj .ne. 0)then
       return
-23052 continue
-23028 continue
+      endif
+      endif
 23024 continue
-      if(.not.((wy1vqfzu .eq. 1) .or. (dimw .eq. wy1vqfzu)))goto 23054
+23025 continue
+      if((wy1vqfzu .eq. 1) .or. (dimw .eq. wy1vqfzu))then
       return
-23054 continue
-      do 23056 ayfnwr1v=1,nk 
-      do 23058 yq6lorbx=1,wy1vqfzu 
+      endif
+      do23056 ayfnwr1v=1,nk 
+      do23058 yq6lorbx=1,wy1vqfzu 
       btwy(yq6lorbx,ayfnwr1v)=0.0d0
 23058 continue
+23059 continue
 23056 continue
-      do 23060 ayfnwr1v=1,(nk*wy1vqfzu) 
-      do 23062 yq6lorbx=1,ldk 
+23057 continue
+      do23060 ayfnwr1v=1,(nk*wy1vqfzu) 
+      do23062 yq6lorbx=1,ldk 
       osiz4fxy(yq6lorbx,ayfnwr1v) = 0.0d0
 23062 continue
+23063 continue
 23060 continue
+23061 continue
       qaltf0nz = 0.1d-9
-      do 23064 ayfnwr1v=1,kuzxj1lo 
-      call vinterv(gkdx5jal(1),(nk+1),he7mqnvy(ayfnwr1v),dqlr5bse,
-     &pqzfxw4i)
-      if(.not.(pqzfxw4i .eq. 1))goto 23066
-      if(.not.(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz)))
-     &goto 23068
+      do23064 ayfnwr1v=1,kuzxj1lo 
+      call vinterv(gkdx5jal(1),(nk+1),he7mqnvy(ayfnwr1v),dqlr5bse,pqzfxw
+     *4i)
+      if(pqzfxw4i .eq. 1)then
+      if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then
       dqlr5bse=dqlr5bse-1
-      goto 23069
-23068 continue
+      else
       return
-23069 continue
-23066 continue
-      call vbsplvd(gkdx5jal,4,he7mqnvy(ayfnwr1v),dqlr5bse,ms0qypiw,
-     &g9fvdrbw,1)
+      endif
+      endif
+      call vbsplvd(gkdx5jal,4,he7mqnvy(ayfnwr1v),dqlr5bse,ms0qypiw,g9fvd
+     *rbw,1)
       yq6lorbx= dqlr5bse-4+1
-      do 23070 urohxe6t=1,wy1vqfzu 
-      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(
-     &ayfnwr1v,urohxe6t) * g9fvdrbw(1,1)
+      do23070 urohxe6t=1,wy1vqfzu 
+      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1
+     *v,urohxe6t) * g9fvdrbw(1,1)
 23070 continue
+23071 continue
       call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 1, 
-     &1, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *1, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 1, 
-     &2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 2, g9fvdrbw, osiz4fxy, wmat, 1, 
-     &3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 3, g9fvdrbw, osiz4fxy, wmat, 1, 
-     &4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       yq6lorbx= dqlr5bse-4+2
-      do 23072 urohxe6t=1,wy1vqfzu 
-      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(
-     &ayfnwr1v,urohxe6t) * g9fvdrbw(2,1)
+      do23072 urohxe6t=1,wy1vqfzu 
+      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1
+     *v,urohxe6t) * g9fvdrbw(2,1)
 23072 continue
+23073 continue
       call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 2, 
-     &2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 2, 
-     &3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 2, g9fvdrbw, osiz4fxy, wmat, 2, 
-     &4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       yq6lorbx= dqlr5bse-4+3
-      do 23074 urohxe6t=1,wy1vqfzu 
-      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(
-     &ayfnwr1v,urohxe6t) * g9fvdrbw(3,1)
+      do23074 urohxe6t=1,wy1vqfzu 
+      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1
+     *v,urohxe6t) * g9fvdrbw(3,1)
 23074 continue
+23075 continue
       call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 3, 
-     &3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 3, 
-     &4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
       yq6lorbx= dqlr5bse-4+4
-      do 23076 urohxe6t=1,wy1vqfzu 
-      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(
-     &ayfnwr1v,urohxe6t) * g9fvdrbw(4,1)
+      do23076 urohxe6t=1,wy1vqfzu 
+      btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1
+     *v,urohxe6t) * g9fvdrbw(4,1)
 23076 continue
+23077 continue
       call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 4, 
-     &4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
+     *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7)
 23064 continue
-      call zosq7hub(sgdub(1,1), sgdub(1,2), sgdub(1,3), sgdub(1,4), 
-     &gkdx5jal, nk)
+23065 continue
+      call zosq7hub(sgdub(1,1), sgdub(1,2), sgdub(1,3), sgdub(1,4), gkdx
+     *5jal, nk)
       call tfeswo7c(osiz4fxy, nk, wy1vqfzu, ldk, wbkq9zyi, sgdub)
       call vdpbfa7(osiz4fxy, ldk, nk*wy1vqfzu, ldk-1, info, sgdub)
-      if(.not.(info .ne. 0))goto 23078
+      if(info .ne. 0)then
       return
-23078 continue
+      endif
       call vdpbsl7(osiz4fxy, ldk, nk*wy1vqfzu, ldk-1, btwy, sgdub)
       i1loc = 0
-      do 23080 ayfnwr1v=1,nk 
-      do 23082 yq6lorbx=1,wy1vqfzu 
+      do23080 ayfnwr1v=1,nk 
+      do23082 yq6lorbx=1,wy1vqfzu 
       i1loc = i1loc + 1
       rpyis2kc(ayfnwr1v,yq6lorbx) = btwy(yq6lorbx,ayfnwr1v)
 23082 continue
+23083 continue
 23080 continue
-      call cn8kzpab(gkdx5jal, he7mqnvy, rpyis2kc, kuzxj1lo, nk, 
-     &wy1vqfzu, t8hwvalr)
+23081 continue
+      call cn8kzpab(gkdx5jal, he7mqnvy, rpyis2kc, kuzxj1lo, nk, wy1vqfzu
+     *, t8hwvalr)
       call vicb2(osiz4fxy, osiz4fxy, sgdub, wkmm, ldk-1, nk*wy1vqfzu)
-      call icpd0omv(osiz4fxy, he7mqnvy, gkdx5jal, ui8ysltq, ldk, 
-     &kuzxj1lo, nk, wy1vqfzu, yzoe1rsp, bmb, wkmm, wmat, ifys6woa, dimw,
-     & tgiyxdw1, dufozmt7, truen)
+      call icpd0omv(osiz4fxy, he7mqnvy, gkdx5jal, ui8ysltq, ldk, kuzxj1l
+     *o, nk, wy1vqfzu, yzoe1rsp, bmb, wkmm, wmat, ifys6woa, dimw, tgiyxd
+     *w1, dufozmt7, truen)
       return
       end
-      subroutine cn8kzpab(ankcghz2, he7mqnvy, rpyis2kc, kuzxj1lo, nk, 
-     &wy1vqfzu, t8hwvalr)
+      subroutine cn8kzpab(ankcghz2, he7mqnvy, rpyis2kc, kuzxj1lo, nk, wy
+     *1vqfzu, t8hwvalr)
       implicit logical (a-z)
       integer kuzxj1lo, nk, wy1vqfzu
-      double precision ankcghz2(nk+4), he7mqnvy(kuzxj1lo), rpyis2kc(nk,
-     &wy1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu)
+      double precision ankcghz2(nk+4), he7mqnvy(kuzxj1lo), rpyis2kc(nk,w
+     *y1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu)
       double precision chw8lzty
       integer ayfnwr1v, yq6lorbx, izero0, ifour4
       izero0 = 0
       ifour4 = 4
-      do 23084 ayfnwr1v=1,kuzxj1lo 
+      do23084 ayfnwr1v=1,kuzxj1lo 
       chw8lzty = he7mqnvy(ayfnwr1v)
-      do 23086 yq6lorbx=1,wy1vqfzu 
+      do23086 yq6lorbx=1,wy1vqfzu 
       call wbvalue(ankcghz2, rpyis2kc(1,yq6lorbx), nk, ifour4, chw8lzty,
-     & izero0, t8hwvalr(ayfnwr1v,yq6lorbx))
+     * izero0, t8hwvalr(ayfnwr1v,yq6lorbx))
 23086 continue
+23087 continue
 23084 continue
+23085 continue
       return
       end
-      subroutine vsuff9(kuzxj1lo,nef,ezlgm2up, he7mqnvy,tlgduey8,wmat, 
-     &pygsw6ko,pasjmo8g,wbar,uwbar,wpasjmo8g, wy1vqfzu, dimw, dimu, 
-     &tgiyxdw1, dufozmt7, work, work2, hjm2ktyr, kgwmz4ip, iz2nbfjc, 
-     &wuwbar, dvhw1ulq)
+      subroutine vsuff9(kuzxj1lo,nef,ezlgm2up, he7mqnvy,tlgduey8,wmat, p
+     *ygsw6ko,pasjmo8g,wbar,uwbar,wpasjmo8g, wy1vqfzu, dimw, dimu, tgiyx
+     *dw1, dufozmt7, work, work2, hjm2ktyr, kgwmz4ip, iz2nbfjc, wuwbar, 
+     *dvhw1ulq)
       implicit logical (a-z)
-      integer kuzxj1lo, nef, ezlgm2up(kuzxj1lo), wy1vqfzu, dimw, dimu, 
-     &kgwmz4ip, iz2nbfjc, wuwbar, dvhw1ulq, tgiyxdw1(*),dufozmt7(*)
+      integer kuzxj1lo, nef, ezlgm2up(kuzxj1lo), wy1vqfzu, dimw, dimu, k
+     *gwmz4ip, iz2nbfjc, wuwbar, dvhw1ulq, tgiyxdw1(*),dufozmt7(*)
       double precision he7mqnvy(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu), 
-     &wmat(kuzxj1lo,dimw), pygsw6ko(nef), pasjmo8g(nef,wy1vqfzu), wbar(
-     &nef,*), uwbar(dimu,nef), wpasjmo8g(nef,wy1vqfzu), work(wy1vqfzu,
-     &wy1vqfzu+1), work2(kgwmz4ip,kgwmz4ip+1), hjm2ktyr(wy1vqfzu,
-     &kgwmz4ip)
-      integer ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, 
-     &imk5wjxg
+     *wmat(kuzxj1lo,dimw), pygsw6ko(nef), pasjmo8g(nef,wy1vqfzu), wbar(n
+     *ef,*), uwbar(dimu,nef), wpasjmo8g(nef,wy1vqfzu), work(wy1vqfzu,wy1
+     *vqfzu+1), work2(kgwmz4ip,kgwmz4ip+1), hjm2ktyr(wy1vqfzu,kgwmz4ip)
+      integer ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, imk5wjxg
       integer oneint
       oneint = 1
-      if(.not.(iz2nbfjc .eq. 1))goto 23088
-      if(.not.((dimu .ne. dimw) .or. (kgwmz4ip .ne. wy1vqfzu)))goto 2309
-     &0
+      if(iz2nbfjc .eq. 1)then
+      if((dimu .ne. dimw) .or. (kgwmz4ip .ne. wy1vqfzu))then
       dvhw1ulq = 0
       return
-23090 continue
-23088 continue
+      endif
+      endif
       imk5wjxg = wy1vqfzu * (wy1vqfzu+1) / 2
-      if(.not.(dimw .gt. imk5wjxg))goto 23092
-23092 continue
+      if(dimw .gt. imk5wjxg)then
+      endif
       call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
-      do 23094 ayfnwr1v=1,kuzxj1lo 
+      do23094 ayfnwr1v=1,kuzxj1lo 
       pygsw6ko(ezlgm2up(ayfnwr1v))=he7mqnvy(ayfnwr1v)
 23094 continue
-      do 23096 yq6lorbx=1,wy1vqfzu 
-      do 23098 ayfnwr1v=1,nef 
+23095 continue
+      do23096 yq6lorbx=1,wy1vqfzu 
+      do23098 ayfnwr1v=1,nef 
       wpasjmo8g(ayfnwr1v,yq6lorbx) = 0.0d0
 23098 continue
+23099 continue
 23096 continue
-      do 23100 yq6lorbx=1,dimw 
-      do 23102 ayfnwr1v=1,nef 
+23097 continue
+      do23100 yq6lorbx=1,dimw 
+      do23102 ayfnwr1v=1,nef 
       wbar(ayfnwr1v,yq6lorbx) = 0.0d0
 23102 continue
+23103 continue
 23100 continue
-      if(.not.(dimw .ne. imk5wjxg))goto 23104
-      do 23106 gp1jxzuh=1,wy1vqfzu 
-      do 23108 yq6lorbx=1,wy1vqfzu 
+23101 continue
+      if(dimw .ne. imk5wjxg)then
+      do23106 gp1jxzuh=1,wy1vqfzu 
+      do23108 yq6lorbx=1,wy1vqfzu 
       work(yq6lorbx,gp1jxzuh) = 0.0d0
 23108 continue
+23109 continue
 23106 continue
-23104 continue
-      do 23110 ayfnwr1v=1,kuzxj1lo 
-      do 23112 yq6lorbx=1,dimw 
-      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wmat(ayfnwr1v,
-     &yq6lorbx)
-      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(
-     &yq6lorbx),dufozmt7(yq6lorbx))
+23107 continue
+      endif
+      do23110 ayfnwr1v=1,kuzxj1lo 
+      do23112 yq6lorbx=1,dimw 
+      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wmat(ayfnwr1v,yq6lor
+     *bx)
+      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor
+     *bx),dufozmt7(yq6lorbx))
 23112 continue
-      do 23114 yq6lorbx=1,wy1vqfzu 
-      do 23116 gp1jxzuh=1,wy1vqfzu 
-      wpasjmo8g(ezlgm2up(ayfnwr1v),yq6lorbx) = wpasjmo8g(ezlgm2up(
-     &ayfnwr1v),yq6lorbx) + work(yq6lorbx,gp1jxzuh)*tlgduey8(ayfnwr1v,
-     &gp1jxzuh)
+23113 continue
+      do23114 yq6lorbx=1,wy1vqfzu 
+      do23116 gp1jxzuh=1,wy1vqfzu 
+      wpasjmo8g(ezlgm2up(ayfnwr1v),yq6lorbx) = wpasjmo8g(ezlgm2up(ayfnwr
+     *1v),yq6lorbx) + work(yq6lorbx,gp1jxzuh)*tlgduey8(ayfnwr1v,gp1jxzuh
+     *)
 23116 continue
+23117 continue
 23114 continue
-      do 23118 yq6lorbx=1,dimw 
-      wbar(ezlgm2up(ayfnwr1v),yq6lorbx) = wbar(ezlgm2up(ayfnwr1v),
-     &yq6lorbx) + wmat(ayfnwr1v,yq6lorbx)
+23115 continue
+      do23118 yq6lorbx=1,dimw 
+      wbar(ezlgm2up(ayfnwr1v),yq6lorbx) = wbar(ezlgm2up(ayfnwr1v),yq6lor
+     *bx) + wmat(ayfnwr1v,yq6lorbx)
 23118 continue
+23119 continue
 23110 continue
+23111 continue
       dvhw1ulq = 1
-      if(.not.(iz2nbfjc .eq. 1))goto 23120
-      do 23122 ayfnwr1v=1,nef 
-      do 23124 yq6lorbx=1,dimw 
-      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,
-     &yq6lorbx)
-      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(
-     &yq6lorbx),dufozmt7(yq6lorbx))
+      if(iz2nbfjc .eq. 1)then
+      do23122 ayfnwr1v=1,nef 
+      do23124 yq6lorbx=1,dimw 
+      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,yq6lor
+     *bx)
+      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor
+     *bx),dufozmt7(yq6lorbx))
 23124 continue
-      do 23126 yq6lorbx=1,wy1vqfzu 
+23125 continue
+      do23126 yq6lorbx=1,wy1vqfzu 
       work(yq6lorbx,wy1vqfzu+1)=wpasjmo8g(ayfnwr1v,yq6lorbx)
 23126 continue
+23127 continue
       call vcholf(work, work(1,wy1vqfzu+1), wy1vqfzu, dvhw1ulq, oneint)
-      if(.not.(dvhw1ulq .ne. 1))goto 23128
+      if(dvhw1ulq .ne. 1)then
       return
-23128 continue
-      if(.not.(wuwbar .ne. 0))goto 23130
-      do 23132 yq6lorbx=1,dimw 
-      uwbar(yq6lorbx,ayfnwr1v) = work(tgiyxdw1(yq6lorbx),dufozmt7(
-     &yq6lorbx))
+      endif
+      if(wuwbar .ne. 0)then
+      do23132 yq6lorbx=1,dimw 
+      uwbar(yq6lorbx,ayfnwr1v) = work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lor
+     *bx))
 23132 continue
-23130 continue
-      do 23134 yq6lorbx=1,wy1vqfzu 
+23133 continue
+      endif
+      do23134 yq6lorbx=1,wy1vqfzu 
       pasjmo8g(ayfnwr1v,yq6lorbx)=work(yq6lorbx,wy1vqfzu+1)
 23134 continue
+23135 continue
 23122 continue
-      goto 23121
-23120 continue
-      if(.not.(dimw .ne. imk5wjxg))goto 23136
-      do 23138 yq6lorbx=1,wy1vqfzu 
-      do 23140 gp1jxzuh=1,wy1vqfzu 
+23123 continue
+      else
+      if(dimw .ne. imk5wjxg)then
+      do23138 yq6lorbx=1,wy1vqfzu 
+      do23140 gp1jxzuh=1,wy1vqfzu 
       work(yq6lorbx,gp1jxzuh) = 0.0d0
 23140 continue
+23141 continue
 23138 continue
-23136 continue
-      do 23142 ayfnwr1v=1,nef 
+23139 continue
+      endif
+      do23142 ayfnwr1v=1,nef 
       call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
-      do 23144 yq6lorbx=1,dimw 
-      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,
-     &yq6lorbx)
-      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(
-     &yq6lorbx),dufozmt7(yq6lorbx))
+      do23144 yq6lorbx=1,dimw 
+      work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,yq6lor
+     *bx)
+      work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor
+     *bx),dufozmt7(yq6lorbx))
 23144 continue
-      do 23146 yq6lorbx=1,wy1vqfzu 
+23145 continue
+      do23146 yq6lorbx=1,wy1vqfzu 
       work(yq6lorbx,wy1vqfzu+1)=wpasjmo8g(ayfnwr1v,yq6lorbx)
 23146 continue
-      do 23148 yq6lorbx=1,kgwmz4ip 
-      do 23150 gp1jxzuh=yq6lorbx,kgwmz4ip 
+23147 continue
+      do23148 yq6lorbx=1,kgwmz4ip 
+      do23150 gp1jxzuh=yq6lorbx,kgwmz4ip 
       work2(yq6lorbx,gp1jxzuh) = 0.0d0
-      do 23152 urohxe6t=1,wy1vqfzu 
-      do 23154 bpvaqm5z=1,wy1vqfzu 
-      work2(yq6lorbx,gp1jxzuh) = work2(yq6lorbx,gp1jxzuh) + hjm2ktyr(
-     &urohxe6t,yq6lorbx) * work(urohxe6t,bpvaqm5z) * hjm2ktyr(bpvaqm5z,
-     &gp1jxzuh)
+      do23152 urohxe6t=1,wy1vqfzu 
+      do23154 bpvaqm5z=1,wy1vqfzu 
+      work2(yq6lorbx,gp1jxzuh) = work2(yq6lorbx,gp1jxzuh) + hjm2ktyr(uro
+     *hxe6t,yq6lorbx) * work(urohxe6t,bpvaqm5z) * hjm2ktyr(bpvaqm5z,gp1j
+     *xzuh)
 23154 continue
+23155 continue
 23152 continue
+23153 continue
 23150 continue
+23151 continue
 23148 continue
+23149 continue
       call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
-      do 23156 yq6lorbx=1,dimu 
-      wbar(ayfnwr1v,yq6lorbx) = work2(tgiyxdw1(yq6lorbx),dufozmt7(
-     &yq6lorbx))
+      do23156 yq6lorbx=1,dimu 
+      wbar(ayfnwr1v,yq6lorbx) = work2(tgiyxdw1(yq6lorbx),dufozmt7(yq6lor
+     *bx))
 23156 continue
-      do 23158 yq6lorbx=1,kgwmz4ip 
+23157 continue
+      do23158 yq6lorbx=1,kgwmz4ip 
       work2(yq6lorbx,kgwmz4ip+1) = 0.0d0
-      do 23160 urohxe6t=1,wy1vqfzu 
-      work2(yq6lorbx,kgwmz4ip+1) = work2(yq6lorbx,kgwmz4ip+1) + 
-     &hjm2ktyr(urohxe6t,yq6lorbx) * work(urohxe6t,wy1vqfzu+1)
+      do23160 urohxe6t=1,wy1vqfzu 
+      work2(yq6lorbx,kgwmz4ip+1) = work2(yq6lorbx,kgwmz4ip+1) + hjm2ktyr
+     *(urohxe6t,yq6lorbx) * work(urohxe6t,wy1vqfzu+1)
 23160 continue
+23161 continue
 23158 continue
-      do 23162 yq6lorbx=1,kgwmz4ip 
+23159 continue
+      do23162 yq6lorbx=1,kgwmz4ip 
       wpasjmo8g(ayfnwr1v,yq6lorbx) = work2(yq6lorbx,kgwmz4ip+1)
 23162 continue
-      call vcholf(work2, work2(1,kgwmz4ip+1), kgwmz4ip, dvhw1ulq, 
-     &oneint)
-      if(.not.(dvhw1ulq .ne. 1))goto 23164
+23163 continue
+      call vcholf(work2, work2(1,kgwmz4ip+1), kgwmz4ip, dvhw1ulq, oneint
+     *)
+      if(dvhw1ulq .ne. 1)then
       return
-23164 continue
-      if(.not.(wuwbar .ne. 0))goto 23166
-      do 23168 yq6lorbx=1,dimu 
-      uwbar(yq6lorbx,ayfnwr1v) = work2(tgiyxdw1(yq6lorbx),dufozmt7(
-     &yq6lorbx))
+      endif
+      if(wuwbar .ne. 0)then
+      do23168 yq6lorbx=1,dimu 
+      uwbar(yq6lorbx,ayfnwr1v) = work2(tgiyxdw1(yq6lorbx),dufozmt7(yq6lo
+     *rbx))
 23168 continue
-23166 continue
-      do 23170 yq6lorbx=1,kgwmz4ip 
+23169 continue
+      endif
+      do23170 yq6lorbx=1,kgwmz4ip 
       pasjmo8g(ayfnwr1v,yq6lorbx) = work2(yq6lorbx,kgwmz4ip+1)
 23170 continue
+23171 continue
 23142 continue
-23121 continue
+23143 continue
+      endif
       return
       end
-      subroutine icpd0omv(enaqpzk9, he7mqnvy, gkdx5jal, grmuyvx9, ldk, 
-     &kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, bmb, work, wmat, ifys6woa, dimw,
-     & tgiyxdw1, dufozmt7, truen)
+      subroutine icpd0omv(enaqpzk9, he7mqnvy, gkdx5jal, grmuyvx9, ldk, k
+     *uzxj1lo, nk, wy1vqfzu, jzwsy6tp, bmb, work, wmat, ifys6woa, dimw, 
+     *tgiyxdw1, dufozmt7, truen)
       implicit logical (a-z)
       integer ldk, kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, dimw, tgiyxdw1(*), 
-     &dufozmt7(*), truen
-      double precision enaqpzk9(ldk,nk*wy1vqfzu), he7mqnvy(kuzxj1lo), 
-     &gkdx5jal(nk+4), grmuyvx9(truen,wy1vqfzu), bmb(wy1vqfzu,wy1vqfzu), 
-     &work(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), ifys6woa(kuzxj1lo,
-     &wy1vqfzu)
-      integer ayfnwr1v, yq6lorbx, gp1jxzuh, dqlr5bse, pqzfxw4i, 
-     &urohxe6t, bpvaqm5z
+     *dufozmt7(*), truen
+      double precision enaqpzk9(ldk,nk*wy1vqfzu), he7mqnvy(kuzxj1lo), gk
+     *dx5jal(nk+4), grmuyvx9(truen,wy1vqfzu), bmb(wy1vqfzu,wy1vqfzu), wo
+     *rk(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), ifys6woa(kuzxj1lo,wy1v
+     *qfzu)
+      integer ayfnwr1v, yq6lorbx, gp1jxzuh, dqlr5bse, pqzfxw4i, urohxe6t
+     *, bpvaqm5z
       double precision qaltf0nz, ms0qypiw(16), g9fvdrbw(4,1)
-      if(.not.(jzwsy6tp .ne. 0))goto 23172
-      do 23174 gp1jxzuh=1,wy1vqfzu 
-      do 23176 ayfnwr1v=1,kuzxj1lo 
+      if(jzwsy6tp .ne. 0)then
+      do23174 gp1jxzuh=1,wy1vqfzu 
+      do23176 ayfnwr1v=1,kuzxj1lo 
       grmuyvx9(ayfnwr1v,gp1jxzuh) = 0.0d0
 23176 continue
+23177 continue
 23174 continue
-23172 continue
+23175 continue
+      endif
       qaltf0nz = 0.10d-9
       call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
-      do 23178 ayfnwr1v=1,kuzxj1lo 
-      do 23180 yq6lorbx=1,wy1vqfzu 
-      do 23182 gp1jxzuh=1,wy1vqfzu 
+      do23178 ayfnwr1v=1,kuzxj1lo 
+      do23180 yq6lorbx=1,wy1vqfzu 
+      do23182 gp1jxzuh=1,wy1vqfzu 
       bmb(yq6lorbx,gp1jxzuh)=0.0d0
 23182 continue
+23183 continue
 23180 continue
-      call vinterv(gkdx5jal(1), (nk+1), he7mqnvy(ayfnwr1v), dqlr5bse, 
-     &pqzfxw4i)
-      if(.not.(pqzfxw4i.eq. 1))goto 23184
-      if(.not.(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz)))
-     &goto 23186
+23181 continue
+      call vinterv(gkdx5jal(1), (nk+1), he7mqnvy(ayfnwr1v), dqlr5bse, pq
+     *zfxw4i)
+      if(pqzfxw4i.eq. 1)then
+      if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then
       dqlr5bse=dqlr5bse-1
-      goto 23187
-23186 continue
+      else
       return
-23187 continue
-23184 continue
+      endif
+      endif
       call vbsplvd(gkdx5jal, 4, he7mqnvy(ayfnwr1v), dqlr5bse, ms0qypiw, 
-     &g9fvdrbw, 1)
+     *g9fvdrbw, 1)
       yq6lorbx= dqlr5bse-4+1
-      do 23188 urohxe6t=yq6lorbx,yq6lorbx+3 
+      do23188 urohxe6t=yq6lorbx,yq6lorbx+3 
       call vsel(urohxe6t, urohxe6t, wy1vqfzu, nk, ldk, enaqpzk9, work)
-      call o0xlszqr(wy1vqfzu, g9fvdrbw(urohxe6t-yq6lorbx+1,1) * 
-     &g9fvdrbw(urohxe6t-yq6lorbx+1,1), work, bmb)
+      call o0xlszqr(wy1vqfzu, g9fvdrbw(urohxe6t-yq6lorbx+1,1) * g9fvdrbw
+     *(urohxe6t-yq6lorbx+1,1), work, bmb)
 23188 continue
-      do 23190 urohxe6t=yq6lorbx,yq6lorbx+3 
-      do 23192 bpvaqm5z=urohxe6t+1,yq6lorbx+3 
+23189 continue
+      do23190 urohxe6t=yq6lorbx,yq6lorbx+3 
+      do23192 bpvaqm5z=urohxe6t+1,yq6lorbx+3 
       call vsel(urohxe6t, bpvaqm5z, wy1vqfzu, nk, ldk, enaqpzk9, work)
       call o0xlszqr(wy1vqfzu, 2.0d0 * g9fvdrbw(urohxe6t-yq6lorbx+1,1) * 
-     &g9fvdrbw(bpvaqm5z-yq6lorbx+1,1), work, bmb)
+     *g9fvdrbw(bpvaqm5z-yq6lorbx+1,1), work, bmb)
 23192 continue
+23193 continue
 23190 continue
-      if(.not.(jzwsy6tp .ne. 0))goto 23194
-      do 23196 yq6lorbx=1,wy1vqfzu 
+23191 continue
+      if(jzwsy6tp .ne. 0)then
+      do23196 yq6lorbx=1,wy1vqfzu 
       grmuyvx9(ayfnwr1v,yq6lorbx) = bmb(yq6lorbx,yq6lorbx)
 23196 continue
-23194 continue
+23197 continue
+      endif
       call ovjnsmt2(bmb, wmat, work, ifys6woa, wy1vqfzu, kuzxj1lo, dimw,
-     & tgiyxdw1, dufozmt7, ayfnwr1v)
+     * tgiyxdw1, dufozmt7, ayfnwr1v)
 23178 continue
+23179 continue
       return
       end
       subroutine o0xlszqr(wy1vqfzu, g9fvdrbw, work, bmb)
       implicit logical (a-z)
       integer wy1vqfzu
-      double precision g9fvdrbw, work(wy1vqfzu,wy1vqfzu), bmb(wy1vqfzu,
-     &wy1vqfzu)
+      double precision g9fvdrbw, work(wy1vqfzu,wy1vqfzu), bmb(wy1vqfzu,w
+     *y1vqfzu)
       integer yq6lorbx, gp1jxzuh
-      do 23198 yq6lorbx=1,wy1vqfzu 
-      do 23200 gp1jxzuh=1,wy1vqfzu 
+      do23198 yq6lorbx=1,wy1vqfzu 
+      do23200 gp1jxzuh=1,wy1vqfzu 
       work(yq6lorbx,gp1jxzuh) = work(yq6lorbx,gp1jxzuh) * g9fvdrbw
 23200 continue
+23201 continue
 23198 continue
-      do 23202 yq6lorbx=1,wy1vqfzu 
-      do 23204 gp1jxzuh=1,wy1vqfzu 
-      bmb(gp1jxzuh,yq6lorbx) = bmb(gp1jxzuh,yq6lorbx) + work(gp1jxzuh,
-     &yq6lorbx)
+23199 continue
+      do23202 yq6lorbx=1,wy1vqfzu 
+      do23204 gp1jxzuh=1,wy1vqfzu 
+      bmb(gp1jxzuh,yq6lorbx) = bmb(gp1jxzuh,yq6lorbx) + work(gp1jxzuh,yq
+     *6lorbx)
 23204 continue
+23205 continue
 23202 continue
+23203 continue
       return
       end
       subroutine vsel(s, t, wy1vqfzu, nk, ldk, minv, work)
@@ -539,215 +598,229 @@
       integer s, t, wy1vqfzu, nk, ldk
       double precision minv(ldk,nk*wy1vqfzu), work(wy1vqfzu,wy1vqfzu)
       integer ayfnwr1v, yq6lorbx, biuvowq2, nbj8tdsk
-      do 23206 ayfnwr1v=1,wy1vqfzu 
-      do 23208 yq6lorbx=1,wy1vqfzu 
+      do23206 ayfnwr1v=1,wy1vqfzu 
+      do23208 yq6lorbx=1,wy1vqfzu 
       work(ayfnwr1v,yq6lorbx) = 0.0d0
 23208 continue
+23209 continue
 23206 continue
-      if(.not.(s .ne. t))goto 23210
-      do 23212 ayfnwr1v=1,wy1vqfzu 
+23207 continue
+      if(s .ne. t)then
+      do23212 ayfnwr1v=1,wy1vqfzu 
       biuvowq2 = (s-1)*wy1vqfzu + ayfnwr1v
-      do 23214 yq6lorbx=1,wy1vqfzu 
+      do23214 yq6lorbx=1,wy1vqfzu 
       nbj8tdsk = (t-1)*wy1vqfzu + yq6lorbx
       work(ayfnwr1v,yq6lorbx) = minv(ldk-(nbj8tdsk-biuvowq2), nbj8tdsk)
 23214 continue
+23215 continue
 23212 continue
-      goto 23211
-23210 continue
-      do 23216 ayfnwr1v=1,wy1vqfzu 
+23213 continue
+      else
+      do23216 ayfnwr1v=1,wy1vqfzu 
       biuvowq2 = (s-1)*wy1vqfzu + ayfnwr1v
-      do 23218 yq6lorbx=ayfnwr1v,wy1vqfzu 
+      do23218 yq6lorbx=ayfnwr1v,wy1vqfzu 
       nbj8tdsk = (t-1)*wy1vqfzu + yq6lorbx
       work(ayfnwr1v,yq6lorbx) = minv(ldk-(nbj8tdsk-biuvowq2), nbj8tdsk)
 23218 continue
+23219 continue
 23216 continue
-      do 23220 ayfnwr1v=1,wy1vqfzu 
-      do 23222 yq6lorbx=ayfnwr1v+1,wy1vqfzu 
+23217 continue
+      do23220 ayfnwr1v=1,wy1vqfzu 
+      do23222 yq6lorbx=ayfnwr1v+1,wy1vqfzu 
       work(yq6lorbx,ayfnwr1v) = work(ayfnwr1v,yq6lorbx)
 23222 continue
+23223 continue
 23220 continue
-23211 continue
+23221 continue
+      endif
       return
       end
       subroutine ovjnsmt2(bmb, wmat, work, ifys6woa, wy1vqfzu, kuzxj1lo,
-     & dimw, tgiyxdw1, dufozmt7, iii)
+     * dimw, tgiyxdw1, dufozmt7, iii)
       implicit logical (a-z)
       integer wy1vqfzu, kuzxj1lo, dimw, tgiyxdw1(*), dufozmt7(*), iii
-      double precision bmb(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), 
-     &work(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu)
+      double precision bmb(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), work
+     *(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu)
       double precision q6zdcwxk, obr6tcex
       integer yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z
-      do 23224 bpvaqm5z=1,wy1vqfzu 
-      do 23226 yq6lorbx=1,wy1vqfzu 
-      do 23228 gp1jxzuh=1,wy1vqfzu 
+      do23224 bpvaqm5z=1,wy1vqfzu 
+      do23226 yq6lorbx=1,wy1vqfzu 
+      do23228 gp1jxzuh=1,wy1vqfzu 
       work(gp1jxzuh,yq6lorbx) = 0.0d0
 23228 continue
+23229 continue
 23226 continue
-      do 23230 urohxe6t=1,dimw 
+23227 continue
+      do23230 urohxe6t=1,dimw 
       obr6tcex = wmat(iii,urohxe6t)
       work(tgiyxdw1(urohxe6t),dufozmt7(urohxe6t)) = obr6tcex
       work(dufozmt7(urohxe6t),tgiyxdw1(urohxe6t)) = obr6tcex
 23230 continue
+23231 continue
       q6zdcwxk = 0.0d0
-      do 23232 yq6lorbx=1,wy1vqfzu 
-      q6zdcwxk = q6zdcwxk + bmb(bpvaqm5z,yq6lorbx) * work(yq6lorbx,
-     &bpvaqm5z)
+      do23232 yq6lorbx=1,wy1vqfzu 
+      q6zdcwxk = q6zdcwxk + bmb(bpvaqm5z,yq6lorbx) * work(yq6lorbx,bpvaq
+     *m5z)
 23232 continue
+23233 continue
       ifys6woa(iii,bpvaqm5z) = q6zdcwxk
 23224 continue
+23225 continue
       return
       end
       subroutine vicb2(enaqpzk9, wpuarq2m, d, uu, wy1vqfzu, kuzxj1lo)
       implicit logical (a-z)
       integer wy1vqfzu, kuzxj1lo
       double precision enaqpzk9(wy1vqfzu+1,kuzxj1lo), wpuarq2m(wy1vqfzu+
-     &1,kuzxj1lo), d(kuzxj1lo), uu(wy1vqfzu+1,wy1vqfzu+1)
+     *1,kuzxj1lo), d(kuzxj1lo), uu(wy1vqfzu+1,wy1vqfzu+1)
       integer ayfnwr1v, gp1jxzuh, lsvdbx3tk, uplim, sedf7mxb, hofjnx2e, 
-     &kij0gwer
+     *kij0gwer
       enaqpzk9(wy1vqfzu+1,kuzxj1lo) = 1.0d0 / d(kuzxj1lo)
       hofjnx2e = wy1vqfzu+1
       sedf7mxb = kuzxj1lo+1 - hofjnx2e
-      do 23234 kij0gwer=sedf7mxb,kuzxj1lo 
-      do 23236 ayfnwr1v=1,hofjnx2e 
+      do23234 kij0gwer=sedf7mxb,kuzxj1lo 
+      do23236 ayfnwr1v=1,hofjnx2e 
       uu(ayfnwr1v, kij0gwer-sedf7mxb+1) = wpuarq2m(ayfnwr1v, kij0gwer)
 23236 continue
+23237 continue
 23234 continue
+23235 continue
       ayfnwr1v = kuzxj1lo-1 
-23238 if(.not.(ayfnwr1v.ge.1))goto 23240
-      if(.not.(wy1vqfzu .lt. kuzxj1lo-ayfnwr1v))goto 23241
+23238 if(.not.(ayfnwr1v .ge. 1))goto 23240
+      if(wy1vqfzu .lt. kuzxj1lo-ayfnwr1v)then
       uplim = wy1vqfzu
-      goto 23242
-23241 continue
+      else
       uplim = kuzxj1lo-ayfnwr1v
-23242 continue
+      endif
       lsvdbx3tk=1
-23243 if(.not.(lsvdbx3tk.le.uplim))goto 23245
+23243 if(.not.(lsvdbx3tk .le. uplim))goto 23245
       enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = 0.0d0
       gp1jxzuh=1
-23246 if(.not.(gp1jxzuh.le.lsvdbx3tk))goto 23248
-      enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-
-     &lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+
-     &1,ayfnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(gp1jxzuh-lsvdbx3tk+
-     &wy1vqfzu+1,ayfnwr1v+lsvdbx3tk)
-       gp1jxzuh=gp1jxzuh+1
+23246 if(.not.(gp1jxzuh .le. lsvdbx3tk))goto 23248
+      enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-lsv
+     *dbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+1,ay
+     *fnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(gp1jxzuh-lsvdbx3tk+wy1vqfz
+     *u+1,ayfnwr1v+lsvdbx3tk)
+23247 gp1jxzuh=gp1jxzuh+1
       goto 23246
 23248 continue
-23249 if(.not.(gp1jxzuh.le.uplim))goto 23251
-      enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-
-     &lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+
-     &1,ayfnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(lsvdbx3tk-gp1jxzuh+
-     &wy1vqfzu+1,ayfnwr1v+gp1jxzuh)
-       gp1jxzuh=gp1jxzuh+1
+23249 if(.not.(gp1jxzuh .le. uplim))goto 23251
+      enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-lsv
+     *dbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+1,ay
+     *fnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(lsvdbx3tk-gp1jxzuh+wy1vqfz
+     *u+1,ayfnwr1v+gp1jxzuh)
+23250 gp1jxzuh=gp1jxzuh+1
       goto 23249
 23251 continue
-       lsvdbx3tk=lsvdbx3tk+1
+23244 lsvdbx3tk=lsvdbx3tk+1
       goto 23243
 23245 continue
       enaqpzk9(wy1vqfzu+1,ayfnwr1v) = 1.0d0 / d(ayfnwr1v)
       lsvdbx3tk = 1
-23252 if(.not.(lsvdbx3tk.le.uplim))goto 23254
-      enaqpzk9(wy1vqfzu+1,ayfnwr1v) = enaqpzk9(wy1vqfzu+1,ayfnwr1v) - 
-     &uu(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk -sedf7mxb+1) * 
-     &enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk)
-       lsvdbx3tk=lsvdbx3tk+1
+23252 if(.not.(lsvdbx3tk .le. uplim))goto 23254
+      enaqpzk9(wy1vqfzu+1,ayfnwr1v) = enaqpzk9(wy1vqfzu+1,ayfnwr1v) - uu
+     *(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk -sedf7mxb+1) * enaqpzk9(
+     *-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk)
+23253 lsvdbx3tk=lsvdbx3tk+1
       goto 23252
 23254 continue
-      if(.not.(ayfnwr1v .eq. sedf7mxb))goto 23255
+      if(ayfnwr1v .eq. sedf7mxb)then
       sedf7mxb = sedf7mxb-1
-      if(.not.(sedf7mxb .lt. 1))goto 23257
+      if(sedf7mxb .lt. 1)then
       sedf7mxb = 1
-      goto 23258
-23257 continue
+      else
       kij0gwer=hofjnx2e-1
-23259 if(.not.(kij0gwer.ge.1))goto 23261
+23259 if(.not.(kij0gwer .ge. 1))goto 23261
       gp1jxzuh=1
-23262 if(.not.(gp1jxzuh.le.hofjnx2e))goto 23264
+23262 if(.not.(gp1jxzuh .le. hofjnx2e))goto 23264
       uu(gp1jxzuh,kij0gwer+1) = uu(gp1jxzuh,kij0gwer)
-       gp1jxzuh=gp1jxzuh+1
+23263 gp1jxzuh=gp1jxzuh+1
       goto 23262
 23264 continue
-       kij0gwer=kij0gwer-1
+23260 kij0gwer=kij0gwer-1
       goto 23259
 23261 continue
       gp1jxzuh=1
-23265 if(.not.(gp1jxzuh.le.hofjnx2e))goto 23267
+23265 if(.not.(gp1jxzuh .le. hofjnx2e))goto 23267
       uu(gp1jxzuh,1) = wpuarq2m(gp1jxzuh,sedf7mxb)
-       gp1jxzuh=gp1jxzuh+1
+23266 gp1jxzuh=gp1jxzuh+1
       goto 23265
 23267 continue
-23258 continue
-23255 continue
-       ayfnwr1v = ayfnwr1v-1
+      endif
+      endif
+23239 ayfnwr1v = ayfnwr1v-1
       goto 23238
 23240 continue
       return
       end
-      subroutine ewg7qruh(sjwyig9tto,tlgduey8,wmat, kuzxj1lo,wy1vqfzu,
-     &ezlgm2up,nef, wbkq9zyi,dof,smo,cov, s0, xin,yin,rbne6ouj,win, 
-     &work1,work3, dimw, fbd5yktj, ldk, info, yzoe1rsp, sgdub, rpyis2kc,
-     & zv2xfhei, acpios9q,tgiyxdw1,dufozmt7, bmb, ifys6woa, wkmm, 
-     &iz2nbfjc,kgwmz4ip,ges1xpkr, hjm2ktyr, beta, fasrkub3, sout, 
-     &r0oydcxb, ub4xioar, effect, uwin)
+      subroutine ewg7qruh(sjwyig9tto,tlgduey8,wmat, kuzxj1lo,wy1vqfzu,ez
+     *lgm2up,nef, wbkq9zyi,dof,smo,cov, s0, xin,yin,rbne6ouj,win, work1,
+     *work3, dimw, fbd5yktj, ldk, info, yzoe1rsp, sgdub, rpyis2kc, zv2xf
+     *hei, acpios9q,tgiyxdw1,dufozmt7, bmb, ifys6woa, wkmm, iz2nbfjc,kgw
+     *mz4ip,ges1xpkr, hjm2ktyr, beta, fasrkub3, sout, r0oydcxb, ub4xioar
+     *, effect, uwin)
       implicit logical (a-z)
       integer kuzxj1lo,wy1vqfzu,ezlgm2up(kuzxj1lo),nef, dimw, fbd5yktj, 
-     &ldk, info, yzoe1rsp, acpios9q,tgiyxdw1(*),dufozmt7(*), iz2nbfjc, 
-     &kgwmz4ip, ges1xpkr(kgwmz4ip*2)
+     *ldk, info, yzoe1rsp, acpios9q,tgiyxdw1(*),dufozmt7(*), iz2nbfjc, k
+     *gwmz4ip, ges1xpkr(kgwmz4ip*2)
       double precision sjwyig9tto(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu)
-     &, wmat(kuzxj1lo,dimw), wbkq9zyi(kgwmz4ip), dof(kgwmz4ip), smo(
-     &kuzxj1lo,kgwmz4ip), cov(kuzxj1lo,kgwmz4ip)
+     *, wmat(kuzxj1lo,dimw), wbkq9zyi(kgwmz4ip), dof(kgwmz4ip), smo(kuzx
+     *j1lo,kgwmz4ip), cov(kuzxj1lo,kgwmz4ip)
       double precision s0(2*kgwmz4ip, 2*kgwmz4ip,2)
-      double precision work1(*), work3(*), sgdub(*), rpyis2kc(*), 
-     &zv2xfhei(acpios9q+4)
-      double precision xin(nef), yin(nef,wy1vqfzu), rbne6ouj(nef,
-     &wy1vqfzu), win(nef,*), bmb(*), ifys6woa(nef,kgwmz4ip), wkmm(
-     &wy1vqfzu,wy1vqfzu,16), hjm2ktyr(wy1vqfzu,kgwmz4ip)
+      double precision work1(*), work3(*), sgdub(*), rpyis2kc(*), zv2xfh
+     *ei(acpios9q+4)
+      double precision xin(nef), yin(nef,wy1vqfzu), rbne6ouj(nef,wy1vqfz
+     *u), win(nef,*), bmb(*), ifys6woa(nef,kgwmz4ip), wkmm(wy1vqfzu,wy1v
+     *qfzu,16), hjm2ktyr(wy1vqfzu,kgwmz4ip)
       double precision beta(2*kgwmz4ip), fasrkub3(2*kgwmz4ip), sout(nef,
-     &kgwmz4ip), r0oydcxb(kgwmz4ip,nef), ub4xioar(kgwmz4ip,nef), effect(
-     &nef*kgwmz4ip), uwin(*)
+     *kgwmz4ip), r0oydcxb(kgwmz4ip,nef), ub4xioar(kgwmz4ip,nef), effect(
+     *nef*kgwmz4ip), uwin(*)
       integer dimwin
-      integer ayfnwr1v, yq6lorbx, gp1jxzuh, rutyk8mg, xjc4ywlh, job, 
-     &qemj9asg, dvhw1ulq
+      integer ayfnwr1v, yq6lorbx, gp1jxzuh, rutyk8mg, xjc4ywlh, job, qem
+     *j9asg, dvhw1ulq
       integer oneint
       double precision xmin, xrange, pvofyg8z
       oneint = 1
-      if(.not.(iz2nbfjc .eq. 1))goto 23268
+      if(iz2nbfjc .eq. 1)then
       dimwin = dimw
-      goto 23269
-23268 continue
+      else
       dimwin = kgwmz4ip*(kgwmz4ip+1)/2
-23269 continue
+      endif
       call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
-      call vsuff9(kuzxj1lo,nef,ezlgm2up, sjwyig9tto,tlgduey8,wmat, xin,
-     &yin,win,uwin,rbne6ouj, wy1vqfzu, dimw, dimwin, tgiyxdw1, dufozmt7,
-     & wkmm, wkmm(1,1,3), hjm2ktyr, kgwmz4ip, iz2nbfjc, oneint, 
-     &dvhw1ulq)
-      if(.not.(dvhw1ulq .ne. 1))goto 23270
+      call vsuff9(kuzxj1lo,nef,ezlgm2up, sjwyig9tto,tlgduey8,wmat, xin,y
+     *in,win,uwin,rbne6ouj, wy1vqfzu, dimw, dimwin, tgiyxdw1, dufozmt7, 
+     *wkmm, wkmm(1,1,3), hjm2ktyr, kgwmz4ip, iz2nbfjc, oneint, dvhw1ulq)
+      if(dvhw1ulq .ne. 1)then
       return
-23270 continue
+      endif
       xmin = xin(1)
       xrange = xin(nef)-xin(1)
-      do 23272 ayfnwr1v=1,nef 
+      do23272 ayfnwr1v=1,nef 
       xin(ayfnwr1v) = (xin(ayfnwr1v)-xmin)/xrange
 23272 continue
+23273 continue
       ldk = 4*kgwmz4ip
       fbd5yktj = 0
-      do 23274 yq6lorbx=1,kgwmz4ip 
-      if(.not.(wbkq9zyi(yq6lorbx) .eq. 0.0d0))goto 23276
+      do23274 yq6lorbx=1,kgwmz4ip 
+      if(wbkq9zyi(yq6lorbx) .eq. 0.0d0)then
       dof(yq6lorbx) = dof(yq6lorbx) + 1.0d0
-23276 continue
+      endif
 23274 continue
+23275 continue
       call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
-      call vsplin(xin,rbne6ouj,win,nef,zv2xfhei, acpios9q,ldk,kgwmz4ip,
-     &dimwin, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, sout, rpyis2kc, 
-     &work3(1), work3(1+acpios9q*kgwmz4ip*ldk), sgdub, cov, yzoe1rsp, 
-     &bmb, ifys6woa, dof, work1, fbd5yktj, kuzxj1lo)
-      do 23278 yq6lorbx=1,kgwmz4ip 
+      call vsplin(xin,rbne6ouj,win,nef,zv2xfhei, acpios9q,ldk,kgwmz4ip,d
+     *imwin, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, sout, rpyis2kc, wo
+     *rk3(1), work3(1+acpios9q*kgwmz4ip*ldk), sgdub, cov, yzoe1rsp, bmb,
+     * ifys6woa, dof, work1, fbd5yktj, kuzxj1lo)
+      do23278 yq6lorbx=1,kgwmz4ip 
       dof(yq6lorbx) = -1.0d0
-      do 23280 ayfnwr1v=1,nef 
+      do23280 ayfnwr1v=1,nef 
       dof(yq6lorbx)=dof(yq6lorbx)+ifys6woa(ayfnwr1v,yq6lorbx)
 23280 continue
+23281 continue
 23278 continue
-      if(.not.(kgwmz4ip .ge. 1))goto 23282
+23279 continue
+      if(kgwmz4ip .ge. 1)then
       pvofyg8z = 1.0d-7
       rutyk8mg = nef*kgwmz4ip
       xjc4ywlh = 2*kgwmz4ip
@@ -756,70 +829,73 @@
       call x6kanjdh(xin, work3, nef, kgwmz4ip)
       call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
       call mxrbkut0f(uwin, work3, kgwmz4ip, xjc4ywlh, nef, wkmm(1,1,1), 
-     &wkmm(1,1,2), tgiyxdw1, dufozmt7, dimwin, rutyk8mg)
-      do 23284 gp1jxzuh=1,xjc4ywlh 
+     *wkmm(1,1,2), tgiyxdw1, dufozmt7, dimwin, rutyk8mg)
+      do23284 gp1jxzuh=1,xjc4ywlh 
       ges1xpkr(gp1jxzuh) = gp1jxzuh
 23284 continue
-      call vqrdca(work3,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr,
-     &work1,qemj9asg,pvofyg8z)
+23285 continue
+      call vqrdca(work3,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr,wor
+     *k1,qemj9asg,pvofyg8z)
       call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
-      call nudh6szqf(uwin,sout,r0oydcxb,dimwin,tgiyxdw1,dufozmt7,nef,
-     &kgwmz4ip,wkmm)
-      call vdqrsl(work3,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3,r0oydcxb,
-     &work1(1),effect,beta, work1(1),ub4xioar,job,info)
-      call vbksf(uwin,ub4xioar,kgwmz4ip,nef,wkmm,tgiyxdw1,dufozmt7,
-     &dimwin)
-      if(.not.(yzoe1rsp .ne. 0))goto 23286
+      call nudh6szqf(uwin,sout,r0oydcxb,dimwin,tgiyxdw1,dufozmt7,nef,kgw
+     *mz4ip,wkmm)
+      call vdqrsl(work3,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3,r0oydcxb,wor
+     *k1(1),effect,beta, work1(1),ub4xioar,job,info)
+      call vbksf(uwin,ub4xioar,kgwmz4ip,nef,wkmm,tgiyxdw1,dufozmt7,dimwi
+     *n)
+      if(yzoe1rsp .ne. 0)then
       call vrinvf9(work3, rutyk8mg, xjc4ywlh, dvhw1ulq, s0(1,1,1), s0(1,
-     &1,2))
-      if(.not.(dvhw1ulq .ne. 1))goto 23288
+     *1,2))
+      if(dvhw1ulq .ne. 1)then
       return
-23288 continue
-      do 23290 yq6lorbx=1,kgwmz4ip 
-      do 23292 ayfnwr1v=1,nef 
-      cov(ayfnwr1v,yq6lorbx) = cov(ayfnwr1v,yq6lorbx) - s0(yq6lorbx,
-     &yq6lorbx,1) - xin(ayfnwr1v) * (2.0d0 * s0(yq6lorbx,yq6lorbx+
-     &kgwmz4ip,1) + xin(ayfnwr1v) * s0(yq6lorbx+kgwmz4ip,yq6lorbx+
-     &kgwmz4ip,1))
+      endif
+      do23290 yq6lorbx=1,kgwmz4ip 
+      do23292 ayfnwr1v=1,nef 
+      cov(ayfnwr1v,yq6lorbx) = cov(ayfnwr1v,yq6lorbx) - s0(yq6lorbx,yq6l
+     *orbx,1) - xin(ayfnwr1v) * (2.0d0 * s0(yq6lorbx,yq6lorbx+kgwmz4ip,1
+     *) + xin(ayfnwr1v) * s0(yq6lorbx+kgwmz4ip,yq6lorbx+kgwmz4ip,1))
 23292 continue
+23293 continue
 23290 continue
-23286 continue
-      goto 23283
-23282 continue
+23291 continue
+      endif
+      else
       call dsrt0gem(nef, xin, win, sout, ub4xioar, cov, yzoe1rsp)
-23283 continue
-      do 23294 ayfnwr1v=1,nef 
-      do 23296 yq6lorbx=1,kgwmz4ip 
-      sout(ayfnwr1v,yq6lorbx) = sout(ayfnwr1v,yq6lorbx) - ub4xioar(
-     &yq6lorbx,ayfnwr1v)
+      endif
+      do23294 ayfnwr1v=1,nef 
+      do23296 yq6lorbx=1,kgwmz4ip 
+      sout(ayfnwr1v,yq6lorbx) = sout(ayfnwr1v,yq6lorbx) - ub4xioar(yq6lo
+     *rbx,ayfnwr1v)
 23296 continue
+23297 continue
 23294 continue
-      do 23298 yq6lorbx=1,kgwmz4ip 
-      call shm8ynte(kuzxj1lo, nef, ezlgm2up, sout(1,yq6lorbx), smo(1,
-     &yq6lorbx))
+23295 continue
+      do23298 yq6lorbx=1,kgwmz4ip 
+      call shm8ynte(kuzxj1lo, nef, ezlgm2up, sout(1,yq6lorbx), smo(1,yq6
+     *lorbx))
 23298 continue
+23299 continue
       return
       end
-      subroutine vbfa( n,wy1vqfzu,psdvgce3, he7mqnvy,tlgduey8,wmat,
-     &wbkq9zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, 
-     &beta,cov,zpcqv3uj, vc6hatuj,fasrkub3, ges1xpkr, xbig, wpuarq2m, 
-     &hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, 
-     &wkmm, work3, sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv2xfhei, 
-     &resss, nbzjkpi3, acpios9q, itwk, jwbkl9fp)
+      subroutine vbfa( n,wy1vqfzu,psdvgce3, he7mqnvy,tlgduey8,wmat,wbkq9
+     *zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, beta,c
+     *ov,zpcqv3uj, vc6hatuj,fasrkub3, ges1xpkr, xbig, wpuarq2m, hjm2ktyr
+     *, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wkmm, work3,
+     * sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv2xfhei, resss, nbzjkp
+     *i3, acpios9q, itwk, jwbkl9fp)
       implicit logical (a-z)
-      integer irhm4cfa, n, wy1vqfzu, psdvgce3(15), ezlgm2up(*),nef(*),
-     &which(*), ges1xpkr(*)
-      integer jnxpuym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*), 
-     &nbzjkpi3(*), acpios9q(*), itwk(*), jwbkl9fp(*)
+      integer irhm4cfa, n, wy1vqfzu, psdvgce3(15), ezlgm2up(*),nef(*),wh
+     *ich(*), ges1xpkr(*)
+      integer jnxpuym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*), nbzjkp
+     *i3(*), acpios9q(*), itwk(*), jwbkl9fp(*)
       double precision he7mqnvy(*),tlgduey8(*),wmat(*),wbkq9zyi(*),dof(*
-     &), ub4xioar(*),kispwgx3(*), m0ibglfx(*), s0(wy1vqfzu), beta(*),
-     &cov(*),zpcqv3uj, vc6hatuj(*),fasrkub3(*)
+     *), ub4xioar(*),kispwgx3(*), m0ibglfx(*), s0(wy1vqfzu), beta(*),cov
+     *(*),zpcqv3uj, vc6hatuj(*),fasrkub3(*)
       double precision xbig(*), wpuarq2m(*), hjm2ktyr(*), work1(*), wk2(
-     &n,wy1vqfzu,3), wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), 
-     &bmb(*), ifys6woa(*), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), 
-     &resss
+     *n,wy1vqfzu,3), wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb
+     *(*), ifys6woa(*), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss
       integer p,q,yzoe1rsp,niter,gtrlbz3e, rutyk8mg, xjc4ywlh, lyma1kwc,
-     & dimw, dimu, fbd5yktj,ldk
+     * dimw, dimu, fbd5yktj,ldk
       integer iter
       integer xs4wtvlg
       integer ayfnwr1v, imk5wjxg, qemj9asg
@@ -828,9 +904,9 @@
       p=psdvgce3(2)
       q=psdvgce3(3)
       yzoe1rsp= 0
-      if(.not.(psdvgce3(4) .eq. 1))goto 23300
+      if(psdvgce3(4) .eq. 1)then
       yzoe1rsp = 1
-23300 continue
+      endif
       gtrlbz3e=psdvgce3(6)
       qemj9asg=psdvgce3(7)
       rutyk8mg=psdvgce3(9)
@@ -841,301 +917,330 @@
       fbd5yktj = 0
       ldk=psdvgce3(15)
       xs4wtvlg = 1
-      if(.not.(lyma1kwc .gt. 0))goto 23302
-      do 23304 ayfnwr1v=1,lyma1kwc 
+      if(lyma1kwc .gt. 0)then
+      do23304 ayfnwr1v=1,lyma1kwc 
       work1(ayfnwr1v) = dof(ayfnwr1v)
       work1(ayfnwr1v+lyma1kwc) = wbkq9zyi(ayfnwr1v)
       work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v)
 23304 continue
-23302 continue
+23305 continue
+      endif
       iter = 0
-23306 if(.not.(xs4wtvlg .ne. 0))goto 23307
+23306 if(xs4wtvlg .ne. 0)then
       iter = iter+1
-      if(.not.(iter .gt. 1))goto 23308
-      if(.not.(lyma1kwc .gt. 0))goto 23310
-      do 23312 ayfnwr1v=1,lyma1kwc 
-      if(.not.(work1(ayfnwr1v+lyma1kwc).eq.0.0d0 .and.(dabs(work1(
-     &ayfnwr1v+2*lyma1kwc)-dof(ayfnwr1v))/dof(ayfnwr1v).gt.0.05d0)))
-     &goto 23314
+      if(iter .gt. 1)then
+      if(lyma1kwc .gt. 0)then
+      do23312 ayfnwr1v=1,lyma1kwc 
+      if(work1(ayfnwr1v+lyma1kwc).eq.0.0d0 .and. (dabs(work1(ayfnwr1v+2*
+     *lyma1kwc)-dof(ayfnwr1v))/dof(ayfnwr1v).gt.0.05d0))then
       work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v)
       dof(ayfnwr1v)=work1(ayfnwr1v)
       wbkq9zyi(ayfnwr1v)=0.0d0
-      goto 23315
-23314 continue
+      else
       work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v)
-23315 continue
+      endif
 23312 continue
-23310 continue
-23308 continue
-      call vbfa1(irhm4cfa,n,wy1vqfzu, he7mqnvy,tlgduey8,wmat,wbkq9zyi,
-     &dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, beta,cov,
-     &zpcqv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig, wpuarq2m, 
-     &hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj(1), fzm1ihwj(1 + imk5wjxg),
-     & iz2nbfjc, work1(1+3*lyma1kwc), wkmm, work3, sgdub, bmb, ifys6woa,
-     & mwk, twk, rpyis2kc, zv2xfhei, resss, nbzjkpi3, acpios9q, itwk, 
-     &jwbkl9fp, p,q,yzoe1rsp,niter,gtrlbz3e, wk2(1,1,1), wk2(1,1,2), 
-     &wk2(1,1,3), rutyk8mg, xjc4ywlh, lyma1kwc, dimw, dimu, fbd5yktj, 
-     &ldk)
-      if(.not.(irhm4cfa .ne. 0))goto 23316
+23313 continue
+      endif
+      endif
+      call vbfa1(irhm4cfa,n,wy1vqfzu, he7mqnvy,tlgduey8,wmat,wbkq9zyi,do
+     *f, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, beta,cov,zpc
+     *qv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig, wpuarq2m, hjm2k
+     *tyr, jnxpuym2, hnpt1zym, fzm1ihwj(1), fzm1ihwj(1 + imk5wjxg), iz2n
+     *bfjc, work1(1+3*lyma1kwc), wkmm, work3, sgdub, bmb, ifys6woa, mwk,
+     * twk, rpyis2kc, zv2xfhei, resss, nbzjkpi3, acpios9q, itwk, jwbkl9f
+     *p, p,q,yzoe1rsp,niter,gtrlbz3e, wk2(1,1,1), wk2(1,1,2), wk2(1,1,3)
+     *, rutyk8mg, xjc4ywlh, lyma1kwc, dimw, dimu, fbd5yktj, ldk)
+      if(irhm4cfa .ne. 0)then
       call vcall2(xs4wtvlg,w,y,m0ibglfx,beta,wpuarq2m)
-      goto 23317
-23316 continue
+      else
       xs4wtvlg = 0
-23317 continue
-      if(.not.(xs4wtvlg .ne. 0))goto 23318
+      endif
+      if(xs4wtvlg .ne. 0)then
       qemj9asg=0
-23318 continue
+      endif
       goto 23306
+      endif
 23307 continue
       psdvgce3(7) = qemj9asg
       psdvgce3(5) = niter
       psdvgce3(14) = fbd5yktj
       return
       end
-      subroutine vbfa1(irhm4cfa,kuzxj1lo,wy1vqfzu, he7mqnvy,tlgduey8,
-     &wmat,wbkq9zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,
-     &s0, beta,cov,zpcqv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig,
-     & wpuarq2m, hjm2ktyr, jnxpuym2, hnpt1zym, tgiyxdw1, dufozmt7, 
-     &iz2nbfjc, work1, wkmm, work3, sgdub, bmb, ifys6woa, mwk, twk, 
-     &rpyis2kc, zv2xfhei, resss, nbzjkpi3, acpios9q, itwk, jwbkl9fp, p, 
-     &q, yzoe1rsp, niter, gtrlbz3e, ghz9vuba, oldmat, wk2, rutyk8mg, 
-     &xjc4ywlh, lyma1kwc, dimw, dimu, fbd5yktj, ldk)
+      subroutine vbfa1(irhm4cfa,kuzxj1lo,wy1vqfzu, he7mqnvy,tlgduey8,wma
+     *t,wbkq9zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0,
+     * beta,cov,zpcqv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig, wp
+     *uarq2m, hjm2ktyr, jnxpuym2, hnpt1zym, tgiyxdw1, dufozmt7, iz2nbfjc
+     *, work1, wkmm, work3, sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv
+     *2xfhei, resss, nbzjkpi3, acpios9q, itwk, jwbkl9fp, p, q, yzoe1rsp,
+     * niter, gtrlbz3e, ghz9vuba, oldmat, wk2, rutyk8mg, xjc4ywlh, lyma1
+     *kwc, dimw, dimu, fbd5yktj, ldk)
       implicit logical (a-z)
       integer qemj9asg
       integer dufozmt7(*), tgiyxdw1(*)
-      integer p, q, yzoe1rsp, niter, gtrlbz3e, rutyk8mg, xjc4ywlh, 
-     &lyma1kwc, dimw, dimu, fbd5yktj, ldk
+      integer p, q, yzoe1rsp, niter, gtrlbz3e, rutyk8mg, xjc4ywlh, lyma1
+     *kwc, dimw, dimu, fbd5yktj, ldk
       integer irhm4cfa, kuzxj1lo, wy1vqfzu, ezlgm2up(kuzxj1lo,q),nef(q),
-     &which(q), ges1xpkr(xjc4ywlh)
-      integer jnxpuym2(q), hnpt1zym(q), iz2nbfjc(q), nbzjkpi3(q+1), 
-     &acpios9q(q), itwk(*), jwbkl9fp(q+1)
+     *which(q), ges1xpkr(xjc4ywlh)
+      integer jnxpuym2(q), hnpt1zym(q), iz2nbfjc(q), nbzjkpi3(q+1), acpi
+     *os9q(q), itwk(*), jwbkl9fp(q+1)
       double precision he7mqnvy(kuzxj1lo,p), tlgduey8(kuzxj1lo,wy1vqfzu)
-     &, wmat(kuzxj1lo,dimw), wbkq9zyi(lyma1kwc), dof(lyma1kwc)
-      double precision ub4xioar(wy1vqfzu,kuzxj1lo), kispwgx3(kuzxj1lo,
-     &lyma1kwc), m0ibglfx(wy1vqfzu,kuzxj1lo), s0(wy1vqfzu), beta(
-     &xjc4ywlh), cov(kuzxj1lo,lyma1kwc), zpcqv3uj, vc6hatuj(rutyk8mg,
-     &xjc4ywlh), fasrkub3(xjc4ywlh)
+     *, wmat(kuzxj1lo,dimw), wbkq9zyi(lyma1kwc), dof(lyma1kwc)
+      double precision ub4xioar(wy1vqfzu,kuzxj1lo), kispwgx3(kuzxj1lo,ly
+     *ma1kwc), m0ibglfx(wy1vqfzu,kuzxj1lo), s0(wy1vqfzu), beta(xjc4ywlh)
+     *, cov(kuzxj1lo,lyma1kwc), zpcqv3uj, vc6hatuj(rutyk8mg,xjc4ywlh), f
+     *asrkub3(xjc4ywlh)
       double precision xbig(rutyk8mg,xjc4ywlh), wpuarq2m(dimu,kuzxj1lo),
-     & hjm2ktyr(wy1vqfzu,lyma1kwc), work1(*), wk2(kuzxj1lo,wy1vqfzu), 
-     &wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb(*), ifys6woa(*
-     &), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss
-      double precision ghz9vuba(kuzxj1lo,wy1vqfzu), oldmat(kuzxj1lo,
-     &wy1vqfzu)
+     * hjm2ktyr(wy1vqfzu,lyma1kwc), work1(*), wk2(kuzxj1lo,wy1vqfzu), wk
+     *mm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb(*), ifys6woa(*),
+     * mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss
+      double precision ghz9vuba(kuzxj1lo,wy1vqfzu), oldmat(kuzxj1lo,wy1v
+     *qfzu)
       integer job,info,nefk
       integer ayfnwr1v, yq6lorbx, gp1jxzuh, wg1xifdy
-      double precision vo4mtexk, rd9beyfk,ratio, deltaf, z4vrscot,
-     &pvofyg8z
+      double precision vo4mtexk, rd9beyfk,ratio, deltaf, z4vrscot,pvofyg
+     *8z
       pvofyg8z = 1.0d-7
       job = 101
       info = 1
-      if(.not.(q .eq. 0))goto 23320
+      if(q .eq. 0)then
       gtrlbz3e = 1
-23320 continue
-      if(.not.(irhm4cfa .ne. 0))goto 23322
-      do 23324 yq6lorbx=1,xjc4ywlh 
-      do 23326 ayfnwr1v=1,rutyk8mg 
+      endif
+      if(irhm4cfa .ne. 0)then
+      do23324 yq6lorbx=1,xjc4ywlh 
+      do23326 ayfnwr1v=1,rutyk8mg 
       vc6hatuj(ayfnwr1v,yq6lorbx)=xbig(ayfnwr1v,yq6lorbx)
 23326 continue
+23327 continue
 23324 continue
-23322 continue
-      if(.not.(qemj9asg.eq.0))goto 23328
+23325 continue
+      endif
+      if(qemj9asg.eq.0)then
       call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
-      call mxrbkut0f(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, kuzxj1lo, 
-     &wkmm(1,1,1), wkmm(1,1,2), tgiyxdw1, dufozmt7, dimu, rutyk8mg)
-      do 23330 gp1jxzuh=1,xjc4ywlh 
+      call mxrbkut0f(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, kuzxj1lo, w
+     *kmm(1,1,1), wkmm(1,1,2), tgiyxdw1, dufozmt7, dimu, rutyk8mg)
+      do23330 gp1jxzuh=1,xjc4ywlh 
       ges1xpkr(gp1jxzuh) = gp1jxzuh
 23330 continue
+23331 continue
       call vqrdca(vc6hatuj,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr,
-     &twk,qemj9asg,pvofyg8z)
-23328 continue
-      do 23332 yq6lorbx=1,wy1vqfzu 
-      do 23334 ayfnwr1v=1,kuzxj1lo 
+     *twk,qemj9asg,pvofyg8z)
+      endif
+      do23332 yq6lorbx=1,wy1vqfzu 
+      do23334 ayfnwr1v=1,kuzxj1lo 
       m0ibglfx(yq6lorbx,ayfnwr1v)=0.0d0
 23334 continue
-      if(.not.(q .gt. 0))goto 23336
-      do 23338 gp1jxzuh=1,q 
-      if(.not.(iz2nbfjc(gp1jxzuh).eq.1))goto 23340
-      do 23342 ayfnwr1v=1,kuzxj1lo 
-      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1)
+23335 continue
+      if(q .gt. 0)then
+      do23338 gp1jxzuh=1,q 
+      if(iz2nbfjc(gp1jxzuh).eq.1)then
+      do23342 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + kispwg
+     *x3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1)
 23342 continue
-      goto 23341
-23340 continue
-      do 23344 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
-      do 23346 ayfnwr1v=1,kuzxj1lo 
-      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &hjm2ktyr(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(
-     &ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1)
+23343 continue
+      else
+      do23344 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
+      do23346 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + hjm2kt
+     *yr(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnp
+     *t1zym(gp1jxzuh)+wg1xifdy-1)
 23346 continue
+23347 continue
 23344 continue
-23341 continue
+23345 continue
+      endif
 23338 continue
-23336 continue
+23339 continue
+      endif
 23332 continue
+23333 continue
       niter = 0
       ratio = 1.0d0
-23348 if(.not.((ratio .gt. zpcqv3uj ) .and. (niter .lt. gtrlbz3e)))
-     &goto 23349
+23348 if((ratio .gt. zpcqv3uj ) .and. (niter .lt. gtrlbz3e))then
       niter = niter + 1
       deltaf = 0.0d0
-      do 23350 yq6lorbx=1,wy1vqfzu 
-      do 23352 ayfnwr1v=1,kuzxj1lo 
-      ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(
-     &yq6lorbx,ayfnwr1v)
+      do23350 yq6lorbx=1,wy1vqfzu 
+      do23352 ayfnwr1v=1,kuzxj1lo 
+      ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(y
+     *q6lorbx,ayfnwr1v)
 23352 continue
+23353 continue
 23350 continue
+23351 continue
       call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
-      call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,
-     &kuzxj1lo,wy1vqfzu,wkmm)
-      call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, 
-     &wk2,wk2, beta, wk2,ub4xioar,job,info)
+      call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzx
+     *j1lo,wy1vqfzu,wkmm)
+      call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2
+     *,wk2, beta, wk2,ub4xioar,job,info)
       resss=0.0d0
-      do 23354 ayfnwr1v=1,kuzxj1lo 
-      do 23356 yq6lorbx=1,wy1vqfzu 
-      vo4mtexk = twk((ayfnwr1v-1)*wy1vqfzu+yq6lorbx) - ub4xioar(
-     &yq6lorbx,ayfnwr1v)
+      do23354 ayfnwr1v=1,kuzxj1lo 
+      do23356 yq6lorbx=1,wy1vqfzu 
+      vo4mtexk = twk((ayfnwr1v-1)*wy1vqfzu+yq6lorbx) - ub4xioar(yq6lorbx
+     *,ayfnwr1v)
       resss = resss + vo4mtexk * vo4mtexk
 23356 continue
+23357 continue
 23354 continue
-      call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,
-     &dufozmt7,dimu)
-      if(.not.(q .gt. 0))goto 23358
-      do 23360 gp1jxzuh=1,q 
-      do 23362 yq6lorbx=1,wy1vqfzu 
-      if(.not.(iz2nbfjc(gp1jxzuh).eq.1))goto 23364
-      do 23366 ayfnwr1v=1,kuzxj1lo 
-      oldmat(ayfnwr1v,yq6lorbx)=kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+
-     &yq6lorbx-1)
-      ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - 
-     &ub4xioar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &oldmat(ayfnwr1v,yq6lorbx)
+23355 continue
+      call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,dufoz
+     *mt7,dimu)
+      if(q .gt. 0)then
+      do23360 gp1jxzuh=1,q 
+      do23362 yq6lorbx=1,wy1vqfzu 
+      if(iz2nbfjc(gp1jxzuh).eq.1)then
+      do23366 ayfnwr1v=1,kuzxj1lo 
+      oldmat(ayfnwr1v,yq6lorbx)=kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6
+     *lorbx-1)
+      ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - ub4xio
+     *ar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + oldmat(ayfnw
+     *r1v,yq6lorbx)
 23366 continue
-      goto 23365
-23364 continue
-      do 23368 ayfnwr1v=1,kuzxj1lo 
+23367 continue
+      else
+      do23368 ayfnwr1v=1,kuzxj1lo 
       oldmat(ayfnwr1v,yq6lorbx)=0.0d0
-      do 23370 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
-      oldmat(ayfnwr1v,yq6lorbx)=oldmat(ayfnwr1v,yq6lorbx) + hjm2ktyr(
-     &yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,
-     &hnpt1zym(gp1jxzuh)+wg1xifdy-1)
+      do23370 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
+      oldmat(ayfnwr1v,yq6lorbx)=oldmat(ayfnwr1v,yq6lorbx) + hjm2ktyr(yq6
+     *lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1zym(
+     *gp1jxzuh)+wg1xifdy-1)
 23370 continue
-      ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - 
-     &ub4xioar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &oldmat(ayfnwr1v,yq6lorbx)
+23371 continue
+      ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - ub4xio
+     *ar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + oldmat(ayfnw
+     *r1v,yq6lorbx)
 23368 continue
-23365 continue
+23369 continue
+      endif
 23362 continue
+23363 continue
       nefk = nef(gp1jxzuh)
       call ewg7qruh(he7mqnvy(1,which(gp1jxzuh)),ghz9vuba,wmat, kuzxj1lo,
-     &wy1vqfzu,ezlgm2up(1,gp1jxzuh),nefk, wbkq9zyi(hnpt1zym(gp1jxzuh)), 
-     &dof(hnpt1zym(gp1jxzuh)), kispwgx3(1,hnpt1zym(gp1jxzuh)), cov(1,
-     &hnpt1zym(gp1jxzuh)), s0, mwk(1), mwk(1+nefk), mwk(1+nefk*(
-     &wy1vqfzu+1)), mwk(1+nefk*(2*wy1vqfzu+1)), work1, work3, dimw, 
-     &fbd5yktj, ldk, info, yzoe1rsp, sgdub, rpyis2kc(nbzjkpi3(gp1jxzuh))
-     &, zv2xfhei(jwbkl9fp(gp1jxzuh)), acpios9q(gp1jxzuh),tgiyxdw1, 
-     &dufozmt7, bmb, ifys6woa, wkmm, iz2nbfjc(gp1jxzuh),jnxpuym2(
-     &gp1jxzuh),itwk, hjm2ktyr(1,hnpt1zym(gp1jxzuh)), twk(1), twk(1+2*
-     &jnxpuym2(gp1jxzuh)), twk(1+4*jnxpuym2(gp1jxzuh)), twk(1+(4+nefk)*
-     &jnxpuym2(gp1jxzuh)), twk(1+(4+2*nefk)*jnxpuym2(gp1jxzuh)), twk(1+(
-     &4+3*nefk)*jnxpuym2(gp1jxzuh)), twk(1+(4+4*nefk)*jnxpuym2(gp1jxzuh)
-     &))
-      do 23372 yq6lorbx=1,wy1vqfzu 
-      if(.not.(iz2nbfjc(gp1jxzuh).eq.1))goto 23374
-      do 23376 ayfnwr1v=1,kuzxj1lo 
-      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1)
+     *wy1vqfzu,ezlgm2up(1,gp1jxzuh),nefk, wbkq9zyi(hnpt1zym(gp1jxzuh)), 
+     *dof(hnpt1zym(gp1jxzuh)), kispwgx3(1,hnpt1zym(gp1jxzuh)), cov(1,hnp
+     *t1zym(gp1jxzuh)), s0, mwk(1), mwk(1+nefk), mwk(1+nefk*(wy1vqfzu+1)
+     *), mwk(1+nefk*(2*wy1vqfzu+1)), work1, work3, dimw, fbd5yktj, ldk, 
+     *info, yzoe1rsp, sgdub, rpyis2kc(nbzjkpi3(gp1jxzuh)), zv2xfhei(jwbk
+     *l9fp(gp1jxzuh)), acpios9q(gp1jxzuh),tgiyxdw1, dufozmt7, bmb, ifys6
+     *woa, wkmm, iz2nbfjc(gp1jxzuh),jnxpuym2(gp1jxzuh),itwk, hjm2ktyr(1,
+     *hnpt1zym(gp1jxzuh)), twk(1), twk(1+2*jnxpuym2(gp1jxzuh)), twk(1+4*
+     *jnxpuym2(gp1jxzuh)), twk(1+(4+nefk)*jnxpuym2(gp1jxzuh)), twk(1+(4+
+     *2*nefk)*jnxpuym2(gp1jxzuh)), twk(1+(4+3*nefk)*jnxpuym2(gp1jxzuh)),
+     * twk(1+(4+4*nefk)*jnxpuym2(gp1jxzuh)))
+      do23372 yq6lorbx=1,wy1vqfzu 
+      if(iz2nbfjc(gp1jxzuh).eq.1)then
+      do23376 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + kispwg
+     *x3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1)
 23376 continue
-      goto 23375
-23374 continue
-      do 23378 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
-      do 23380 ayfnwr1v=1,kuzxj1lo 
-      m0ibglfx(yq6lorbx,ayfnwr1v)=m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &hjm2ktyr(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(
-     &ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1)
+23377 continue
+      else
+      do23378 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
+      do23380 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(yq6lorbx,ayfnwr1v)=m0ibglfx(yq6lorbx,ayfnwr1v) + hjm2ktyr
+     *(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1
+     *zym(gp1jxzuh)+wg1xifdy-1)
 23380 continue
+23381 continue
 23378 continue
-23375 continue
-      do 23382 ayfnwr1v=1,kuzxj1lo 
-      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) - 
-     &oldmat(ayfnwr1v,yq6lorbx)
+23379 continue
+      endif
+      do23382 ayfnwr1v=1,kuzxj1lo 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) - oldmat
+     *(ayfnwr1v,yq6lorbx)
 23382 continue
+23383 continue
 23372 continue
-      do 23384 yq6lorbx=1,wy1vqfzu 
-      if(.not.(iz2nbfjc(gp1jxzuh) .eq. 1))goto 23386
+23373 continue
+      do23384 yq6lorbx=1,wy1vqfzu 
+      if(iz2nbfjc(gp1jxzuh) .eq. 1)then
       deltaf = deltaf + rd9beyfk(kuzxj1lo,oldmat(1,yq6lorbx),kispwgx3(1,
-     &hnpt1zym(gp1jxzuh)+yq6lorbx-1), wmat(1,yq6lorbx))
-      goto 23387
-23386 continue
-      do 23388 ayfnwr1v=1,kuzxj1lo 
+     *hnpt1zym(gp1jxzuh)+yq6lorbx-1), wmat(1,yq6lorbx))
+      else
+      do23388 ayfnwr1v=1,kuzxj1lo 
       twk(ayfnwr1v) = 0.0d0
-      do 23390 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
-      twk(ayfnwr1v) = twk(ayfnwr1v) + hjm2ktyr(yq6lorbx,hnpt1zym(
-     &gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+
-     &wg1xifdy-1)
+      do23390 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
+      twk(ayfnwr1v) = twk(ayfnwr1v) + hjm2ktyr(yq6lorbx,hnpt1zym(gp1jxzu
+     *h)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1)
 23390 continue
+23391 continue
 23388 continue
-      deltaf = deltaf + rd9beyfk(kuzxj1lo, oldmat(1,yq6lorbx), twk, 
-     &wmat(1,yq6lorbx))
-23387 continue
+23389 continue
+      deltaf = deltaf + rd9beyfk(kuzxj1lo, oldmat(1,yq6lorbx), twk, wmat
+     *(1,yq6lorbx))
+      endif
 23384 continue
-      do 23392 yq6lorbx=1,wy1vqfzu 
-      do 23394 ayfnwr1v=1,kuzxj1lo 
-      ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(
-     &yq6lorbx,ayfnwr1v)
+23385 continue
+      do23392 yq6lorbx=1,wy1vqfzu 
+      do23394 ayfnwr1v=1,kuzxj1lo 
+      ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(y
+     *q6lorbx,ayfnwr1v)
 23394 continue
+23395 continue
 23392 continue
+23393 continue
       call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
-      call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,
-     &kuzxj1lo,wy1vqfzu,wkmm)
-      call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, 
-     &wk2,wk2, beta, wk2,ub4xioar,job,info)
-      call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,
-     &dufozmt7,dimu)
+      call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzx
+     *j1lo,wy1vqfzu,wkmm)
+      call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2
+     *,wk2, beta, wk2,ub4xioar,job,info)
+      call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,dufoz
+     *mt7,dimu)
 23360 continue
-23358 continue
-      if(.not.(q .gt. 0))goto 23396
+23361 continue
+      endif
+      if(q .gt. 0)then
       z4vrscot=0.0d0
-      do 23398 yq6lorbx=1,wy1vqfzu 
-      do 23400 ayfnwr1v=1,kuzxj1lo 
+      do23398 yq6lorbx=1,wy1vqfzu 
+      do23400 ayfnwr1v=1,kuzxj1lo 
       z4vrscot = z4vrscot + wmat(ayfnwr1v,yq6lorbx) * m0ibglfx(yq6lorbx,
-     &ayfnwr1v)**2
+     *ayfnwr1v)**2
 23400 continue
+23401 continue
 23398 continue
-      if(.not.(z4vrscot .gt. 0.0d0))goto 23402
+23399 continue
+      if(z4vrscot .gt. 0.0d0)then
       ratio = dsqrt(deltaf/z4vrscot)
-      goto 23403
-23402 continue
+      else
       ratio = 0.0d0
-23403 continue
-23396 continue
-      if(.not.(niter .eq. 1))goto 23404
+      endif
+      endif
+      if(niter .eq. 1)then
       ratio = 1.0d0
-23404 continue
+      endif
       goto 23348
+      endif
 23349 continue
-      do 23406 yq6lorbx=1,xjc4ywlh 
+      do23406 yq6lorbx=1,xjc4ywlh 
       twk(yq6lorbx)=beta(yq6lorbx)
 23406 continue
-      do 23408 yq6lorbx=1,xjc4ywlh 
+23407 continue
+      do23408 yq6lorbx=1,xjc4ywlh 
       beta(ges1xpkr(yq6lorbx))=twk(yq6lorbx)
 23408 continue
-      do 23410 ayfnwr1v=1,kuzxj1lo 
-      do 23412 yq6lorbx=1,wy1vqfzu 
-      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + 
-     &ub4xioar(yq6lorbx,ayfnwr1v)
+23409 continue
+      do23410 ayfnwr1v=1,kuzxj1lo 
+      do23412 yq6lorbx=1,wy1vqfzu 
+      m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + ub4xio
+     *ar(yq6lorbx,ayfnwr1v)
 23412 continue
+23413 continue
 23410 continue
-      if(.not.((yzoe1rsp .ne. 0) .and. (q .gt. 0)))goto 23414
-      do 23416 gp1jxzuh=1,q 
-      do 23418 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
-      call shm8ynte(kuzxj1lo,nef(gp1jxzuh),ezlgm2up(1,gp1jxzuh), cov(1,
-     &hnpt1zym(gp1jxzuh)+wg1xifdy-1),oldmat)
-      do 23420 ayfnwr1v=1,kuzxj1lo 
+23411 continue
+      if((yzoe1rsp .ne. 0) .and. (q .gt. 0))then
+      do23416 gp1jxzuh=1,q 
+      do23418 wg1xifdy=1,jnxpuym2(gp1jxzuh) 
+      call shm8ynte(kuzxj1lo,nef(gp1jxzuh),ezlgm2up(1,gp1jxzuh), cov(1,h
+     *npt1zym(gp1jxzuh)+wg1xifdy-1),oldmat)
+      do23420 ayfnwr1v=1,kuzxj1lo 
       cov(ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1) = oldmat(ayfnwr1v,1)
 23420 continue
+23421 continue
 23418 continue
+23419 continue
 23416 continue
-23414 continue
+23417 continue
+      endif
       return
       end
       subroutine x6kanjdh(he7mqnvy, xout, kuzxj1lo, wy1vqfzu)
@@ -1144,118 +1249,124 @@
       double precision he7mqnvy(kuzxj1lo), xout(*)
       integer ayfnwr1v, yq6lorbx, gp1jxzuh, iptr
       iptr=1
-      do 23422 yq6lorbx=1,wy1vqfzu 
-      do 23424 ayfnwr1v=1,kuzxj1lo 
-      do 23426 gp1jxzuh=1,wy1vqfzu 
-      if(.not.(yq6lorbx .eq. gp1jxzuh))goto 23428
+      do23422 yq6lorbx=1,wy1vqfzu 
+      do23424 ayfnwr1v=1,kuzxj1lo 
+      do23426 gp1jxzuh=1,wy1vqfzu 
+      if(yq6lorbx .eq. gp1jxzuh)then
       xout(iptr) = 1.0d0
-      goto 23429
-23428 continue
+      else
       xout(iptr) = 0.0d0
-23429 continue
+      endif
       iptr=iptr+1
 23426 continue
+23427 continue
 23424 continue
+23425 continue
 23422 continue
-      do 23430 yq6lorbx=1,wy1vqfzu 
-      do 23432 ayfnwr1v=1,kuzxj1lo 
-      do 23434 gp1jxzuh=1,wy1vqfzu 
-      if(.not.(yq6lorbx .eq. gp1jxzuh))goto 23436
+23423 continue
+      do23430 yq6lorbx=1,wy1vqfzu 
+      do23432 ayfnwr1v=1,kuzxj1lo 
+      do23434 gp1jxzuh=1,wy1vqfzu 
+      if(yq6lorbx .eq. gp1jxzuh)then
       xout(iptr) = he7mqnvy(ayfnwr1v)
-      goto 23437
-23436 continue
+      else
       xout(iptr) = 0.0d0
-23437 continue
+      endif
       iptr=iptr+1
 23434 continue
+23435 continue
 23432 continue
+23433 continue
 23430 continue
+23431 continue
       return
       end
-      double precision function rd9beyfk(kuzxj1lo, bhcji9gl, m0ibglfx, 
-     &po8rwsmy)
+      double precision function rd9beyfk(kuzxj1lo, bhcji9gl, m0ibglfx, p
+     *o8rwsmy)
       integer kuzxj1lo
       double precision bhcji9gl(kuzxj1lo), m0ibglfx(kuzxj1lo), po8rwsmy(
-     &kuzxj1lo)
+     *kuzxj1lo)
       integer ayfnwr1v
       double precision lm9vcjob, rxeqjn0y, work
       rxeqjn0y = 0.0d0
       lm9vcjob = 0.0d0
-      do 23438 ayfnwr1v=1,kuzxj1lo 
+      do23438 ayfnwr1v=1,kuzxj1lo 
       work = bhcji9gl(ayfnwr1v) - m0ibglfx(ayfnwr1v)
       rxeqjn0y = rxeqjn0y + po8rwsmy(ayfnwr1v)*work*work
       lm9vcjob = lm9vcjob + po8rwsmy(ayfnwr1v)
 23438 continue
-      if(.not.(lm9vcjob .gt. 0.0d0))goto 23440
+23439 continue
+      if(lm9vcjob .gt. 0.0d0)then
       rd9beyfk=rxeqjn0y/lm9vcjob
-      goto 23441
-23440 continue
+      else
       rd9beyfk=0.0d0
-23441 continue
+      endif
       return
       end
-      subroutine pitmeh0q(kuzxj1lo, bhcji9gl, po8rwsmy, lfu2qhid, 
-     &lm9vcjob)
+      subroutine pitmeh0q(kuzxj1lo, bhcji9gl, po8rwsmy, lfu2qhid, lm9vcj
+     *ob)
       implicit logical (a-z)
       integer kuzxj1lo
       double precision bhcji9gl(kuzxj1lo), po8rwsmy(kuzxj1lo), lfu2qhid,
-     & lm9vcjob
+     * lm9vcjob
       double precision rxeqjn0y
       integer ayfnwr1v
       lm9vcjob = 0.0d0
       rxeqjn0y = 0.0d0
-      do 23442 ayfnwr1v=1,kuzxj1lo 
+      do23442 ayfnwr1v=1,kuzxj1lo 
       rxeqjn0y = rxeqjn0y + bhcji9gl(ayfnwr1v) * po8rwsmy(ayfnwr1v)
       lm9vcjob = lm9vcjob + po8rwsmy(ayfnwr1v)
 23442 continue
-      if(.not.(lm9vcjob .gt. 0.0d0))goto 23444
+23443 continue
+      if(lm9vcjob .gt. 0.0d0)then
       lfu2qhid = rxeqjn0y / lm9vcjob
-      goto 23445
-23444 continue
+      else
       lfu2qhid = 0.0d0
-23445 continue
+      endif
       return
       end
-      subroutine dsrt0gem(kuzxj1lo, x, w, bhcji9gl, ub4xioar, cov, 
-     &yzoe1rsp)
+      subroutine dsrt0gem(kuzxj1lo, x, w, bhcji9gl, ub4xioar, cov, yzoe1
+     *rsp)
       implicit logical (a-z)
       integer kuzxj1lo
       integer yzoe1rsp
-      double precision x(kuzxj1lo), w(kuzxj1lo), bhcji9gl(kuzxj1lo), 
-     &ub4xioar(kuzxj1lo)
+      double precision x(kuzxj1lo), w(kuzxj1lo), bhcji9gl(kuzxj1lo), ub4
+     *xioar(kuzxj1lo)
       double precision cov(kuzxj1lo,*)
       integer ayfnwr1v
-      double precision pasjmo8g, pygsw6ko, q6zdcwxk, nsum, eck8vubt, 
-     &interc, bzmd6ftv, hofjnx2e, lm9vcjob
+      double precision pasjmo8g, pygsw6ko, q6zdcwxk, nsum, eck8vubt, int
+     *erc, bzmd6ftv, hofjnx2e, lm9vcjob
       call pitmeh0q(kuzxj1lo,bhcji9gl,w,pasjmo8g, lm9vcjob)
       call pitmeh0q(kuzxj1lo,x,w,pygsw6ko, lm9vcjob)
       nsum = 0.0d0
       q6zdcwxk = 0.0d0
-      do 23446 ayfnwr1v=1,kuzxj1lo 
+      do23446 ayfnwr1v=1,kuzxj1lo 
       hofjnx2e = x(ayfnwr1v)-pygsw6ko
-      nsum = nsum + hofjnx2e * (bhcji9gl(ayfnwr1v)-pasjmo8g) * w(
-     &ayfnwr1v)
+      nsum = nsum + hofjnx2e * (bhcji9gl(ayfnwr1v)-pasjmo8g) * w(ayfnwr1
+     *v)
       hofjnx2e = hofjnx2e * hofjnx2e
       q6zdcwxk = q6zdcwxk + hofjnx2e * w(ayfnwr1v)
 23446 continue
+23447 continue
       eck8vubt = nsum/q6zdcwxk
       interc = pasjmo8g - eck8vubt * pygsw6ko
-      do 23448 ayfnwr1v=1,kuzxj1lo 
+      do23448 ayfnwr1v=1,kuzxj1lo 
       ub4xioar(ayfnwr1v) = interc + eck8vubt * x(ayfnwr1v)
 23448 continue
+23449 continue
       bzmd6ftv = interc + eck8vubt * x(1)
-      if(.not.(yzoe1rsp .ne. 0))goto 23450
-      do 23452 ayfnwr1v=1,kuzxj1lo 
+      if(yzoe1rsp .ne. 0)then
+      do23452 ayfnwr1v=1,kuzxj1lo 
       hofjnx2e = x(ayfnwr1v)-pygsw6ko
-      if(.not.(w(ayfnwr1v) .gt. 0.0d0))goto 23454
-      cov(ayfnwr1v,1) = cov(ayfnwr1v,1) - 1.0d0/lm9vcjob - hofjnx2e * 
-     &hofjnx2e / q6zdcwxk
-      goto 23455
-23454 continue
+      if(w(ayfnwr1v) .gt. 0.0d0)then
+      cov(ayfnwr1v,1) = cov(ayfnwr1v,1) - 1.0d0/lm9vcjob - hofjnx2e * ho
+     *fjnx2e / q6zdcwxk
+      else
       cov(ayfnwr1v,1) = 0.0d0
-23455 continue
+      endif
 23452 continue
-23450 continue
+23453 continue
+      endif
       return
       end
       subroutine shm8ynte(kuzxj1lo, p, ezlgm2up, pygsw6ko, x)
@@ -1263,9 +1374,10 @@
       integer kuzxj1lo, p, ezlgm2up(kuzxj1lo)
       double precision pygsw6ko(p), x(kuzxj1lo)
       integer ayfnwr1v
-      do 23456 ayfnwr1v=1,kuzxj1lo 
+      do23456 ayfnwr1v=1,kuzxj1lo 
       x(ayfnwr1v) = pygsw6ko(ezlgm2up(ayfnwr1v))
 23456 continue
+23457 continue
       return
       end
       subroutine vankcghz2l2(x, kuzxj1lo, ankcghz2, rvy1fpli, ukgwt7na)
@@ -1273,27 +1385,28 @@
       integer kuzxj1lo, rvy1fpli, ukgwt7na
       double precision x(kuzxj1lo), ankcghz2(kuzxj1lo)
       integer ndk, yq6lorbx
-      if(.not.(ukgwt7na .eq. 0))goto 23458
-      if(.not.(kuzxj1lo .le. 40))goto 23460
+      if(ukgwt7na .eq. 0)then
+      if(kuzxj1lo .le. 40)then
       ndk = kuzxj1lo
-      goto 23461
-23460 continue
+      else
       ndk = 40 + dexp(0.25d0 * dlog(kuzxj1lo-40.0d0))
-23461 continue
-      goto 23459
-23458 continue
+      endif
+      else
       ndk = rvy1fpli - 6
-23459 continue
+      endif
       rvy1fpli = ndk + 6
-      do 23462 yq6lorbx = 1,3 
+      do23462 yq6lorbx = 1,3 
       ankcghz2(yq6lorbx) = x(1) 
 23462 continue
-      do 23464 yq6lorbx = 1,ndk 
+23463 continue
+      do23464 yq6lorbx = 1,ndk 
       ankcghz2(yq6lorbx+3) = x( 1 + (yq6lorbx-1)*(kuzxj1lo-1)/(ndk-1) ) 
 23464 continue
-      do 23466 yq6lorbx = 1,3 
+23465 continue
+      do23466 yq6lorbx = 1,3 
       ankcghz2(ndk+3+yq6lorbx) = x(kuzxj1lo) 
 23466 continue
+23467 continue
       return
       end
       subroutine pankcghz2l2(ankcghz2, kuzxj1lo, zo8wpibx, tol)
@@ -1301,22 +1414,24 @@
       integer kuzxj1lo, zo8wpibx(kuzxj1lo)
       double precision ankcghz2(kuzxj1lo), tol
       integer ayfnwr1v, cjop5bwm
-      do 23468 ayfnwr1v=1,4 
+      do23468 ayfnwr1v=1,4 
       zo8wpibx(ayfnwr1v) = 1
 23468 continue
+23469 continue
       cjop5bwm = 4
-      do 23470 ayfnwr1v=5,(kuzxj1lo-4) 
-      if(.not.((ankcghz2(ayfnwr1v) - ankcghz2(cjop5bwm) .ge. tol) .and.(
-     &ankcghz2(kuzxj1lo) - ankcghz2(ayfnwr1v) .ge. tol)))goto 23472
+      do23470 ayfnwr1v=5,(kuzxj1lo-4) 
+      if((ankcghz2(ayfnwr1v) - ankcghz2(cjop5bwm) .ge. tol) .and. (ankcg
+     *hz2(kuzxj1lo) - ankcghz2(ayfnwr1v) .ge. tol))then
       zo8wpibx(ayfnwr1v) = 1
       cjop5bwm = ayfnwr1v
-      goto 23473
-23472 continue
+      else
       zo8wpibx(ayfnwr1v) = 0
-23473 continue
+      endif
 23470 continue
-      do 23474 ayfnwr1v=(kuzxj1lo-3),kuzxj1lo 
+23471 continue
+      do23474 ayfnwr1v=(kuzxj1lo-3),kuzxj1lo 
       zo8wpibx(ayfnwr1v) = 1
 23474 continue
+23475 continue
       return
       end
diff --git a/src/vgam3.c b/src/vgam3.c
index 865b958..0b44033 100644
--- a/src/vgam3.c
+++ b/src/vgam3.c
@@ -255,12 +255,12 @@ void Yee_vbvs(int *f8yswcat, double gkdx5jal[], double rpyis2kc[],
   int    ayfnwr1v, yq6lorbx, h2dpsbkr = 4;
 
   for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-      chw8lzty = sjwyig9t;
-      for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
-          F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr,
-                            chw8lzty++, order, kispwgx3++);
-      }
-      rpyis2kc += *acpios9q;
+    chw8lzty = sjwyig9t;
+    for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
+      F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr,
+                        chw8lzty++, order, kispwgx3++);
+    }
+    rpyis2kc += *acpios9q;
   }
 }
 
@@ -275,51 +275,51 @@ void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk,
          *fpdlcqk9osiz4fxy;
 
 
-  fpdlcqk9osiz4fxy  = osiz4fxy + *ldk - 1;
+  fpdlcqk9osiz4fxy = osiz4fxy + *ldk - 1;
   fpdlcqk9xecbg0pf = xecbg0pf;
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-      fpdlcqk9wbkq9zyi = wbkq9zyi;
-      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-        *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9xecbg0pf;
-         fpdlcqk9osiz4fxy += *ldk;
-      }
-      fpdlcqk9xecbg0pf++;
+    fpdlcqk9wbkq9zyi = wbkq9zyi;
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9xecbg0pf;
+       fpdlcqk9osiz4fxy += *ldk;
+    }
+    fpdlcqk9xecbg0pf++;
   }
 
   fpdlcqk9osiz4fxy  = osiz4fxy + *wy1vqfzu * *ldk;
   fpdlcqk9osiz4fxy  = fpdlcqk9osiz4fxy + *ldk - *wy1vqfzu - 1;
   fpdlcqk9z4grbpiq = z4grbpiq;
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-      fpdlcqk9wbkq9zyi = wbkq9zyi;
-      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-        *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9z4grbpiq;
-         fpdlcqk9osiz4fxy += *ldk;
-      }
-      fpdlcqk9z4grbpiq++;
+    fpdlcqk9wbkq9zyi = wbkq9zyi;
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9z4grbpiq;
+       fpdlcqk9osiz4fxy += *ldk;
+    }
+    fpdlcqk9z4grbpiq++;
   }
 
   fpdlcqk9osiz4fxy  = osiz4fxy + *ldk + 2 * *wy1vqfzu * *ldk;
   fpdlcqk9osiz4fxy  = fpdlcqk9osiz4fxy - 2 * *wy1vqfzu - 1;
   fpdlcqk9d7glzhbj = d7glzhbj;
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-      fpdlcqk9wbkq9zyi = wbkq9zyi;
-      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-        *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9d7glzhbj;
-         fpdlcqk9osiz4fxy += *ldk;
-      }
-      fpdlcqk9d7glzhbj++;
+    fpdlcqk9wbkq9zyi = wbkq9zyi;
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9d7glzhbj;
+       fpdlcqk9osiz4fxy += *ldk;
+    }
+    fpdlcqk9d7glzhbj++;
   }
 
   fpdlcqk9osiz4fxy  = osiz4fxy + *ldk + 3 * *wy1vqfzu * *ldk;
   fpdlcqk9osiz4fxy  = fpdlcqk9osiz4fxy - 3 * *wy1vqfzu - 1;
   fpdlcqk9v2eydbxs = v2eydbxs;
   for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
-      fpdlcqk9wbkq9zyi = wbkq9zyi;
-      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-        *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9v2eydbxs;
-         fpdlcqk9osiz4fxy += *ldk;
-      }
-      fpdlcqk9v2eydbxs++;
+    fpdlcqk9wbkq9zyi = wbkq9zyi;
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9v2eydbxs;
+       fpdlcqk9osiz4fxy += *ldk;
+    }
+    fpdlcqk9v2eydbxs++;
   }
 }
 
@@ -338,22 +338,21 @@ void fapc0tnbybnagt8k(int *iii, int *cz8qdfyj, int *tesdm5kv,
   bcol = *cz8qdfyj + *tesdm5kv;
   brow = *cz8qdfyj;
   for (urohxe6t = 1; urohxe6t <= *kvowz9ht; urohxe6t++) {
-      tmp_wrk = rbne6ouj[*iii -1 + (urohxe6t-1) * *kuzxj1lo] *
-                g9fvdrbw[*kxvq6sfw-1] * g9fvdrbw[*nyfu9rod-1];
+    tmp_wrk = rbne6ouj[*iii -1 + (urohxe6t-1) * *kuzxj1lo] *
+              g9fvdrbw[*kxvq6sfw-1] * g9fvdrbw[*nyfu9rod-1];
 
-      biuvowq2 = (brow-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1];
-      nbj8tdsk = (bcol-1) * *wy1vqfzu + dufozmt7[urohxe6t-1];
+    biuvowq2 = (brow-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1];
+    nbj8tdsk = (bcol-1) * *wy1vqfzu + dufozmt7[urohxe6t-1];
+    nead = nbj8tdsk - biuvowq2;
+    osiz4fxy[*ldk - nead - 1 + (nbj8tdsk-1) * *ldk] += tmp_wrk;
+
+    if (*tesdm5kv > 0 && dufozmt7[urohxe6t-1] != tgiyxdw1[urohxe6t-1]) {
+      biuvowq2 = (brow-1) * *wy1vqfzu + dufozmt7[urohxe6t-1];
+      nbj8tdsk = (bcol-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1];
       nead = nbj8tdsk - biuvowq2;
       osiz4fxy[*ldk - nead - 1 + (nbj8tdsk-1) * *ldk] += tmp_wrk;
-
-      if (*tesdm5kv > 0 && dufozmt7[urohxe6t-1] != tgiyxdw1[urohxe6t-1]) {
-          biuvowq2 = (brow-1) * *wy1vqfzu + dufozmt7[urohxe6t-1];
-          nbj8tdsk = (bcol-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1];
-          nead = nbj8tdsk - biuvowq2;
-          osiz4fxy[*ldk - nead - 1 + (nbj8tdsk-1) * *ldk] += tmp_wrk;
-      }
+    }
   }
-
 }
 
 
@@ -606,19 +605,19 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
 
 
   if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) {
-      Free_fapc0tnbyee_spline(wkumc9idosiz4fxy,  wkumc9idenaqpzk9,
-                             wkumc9idbtwy,   wkumc9idwk0,
-                             wkumc9idbk3ymcih,
-                             wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+    Free_fapc0tnbyee_spline(wkumc9idosiz4fxy,  wkumc9idenaqpzk9,
+                           wkumc9idbtwy,   wkumc9idwk0,
+                           wkumc9idbk3ymcih,
+                           wkumc9idtgiyxdw1, wkumc9iddufozmt7);
 
 
-  for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-    hdnw2fts[yq6lorbx-1] -= 1.0;  // Decrement it.
-  }
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      hdnw2fts[yq6lorbx-1] -= 1.0;  // Decrement it.
+    }
 
 
 
-      return;
+    return;
   }
 
 
@@ -830,12 +829,12 @@ void fapc0tnbcn8kzpab(double gkdx5jals[], double sjwyig9t[], double rpyis2kc[],
 
 
   for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) {
-      chw8lzty = sjwyig9t;
-      for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) {
-          F77_CALL(wbvalue)(gkdx5jals, rpyis2kc, acpios9q, &h2dpsbkr,
-                            chw8lzty++, &yu6izdrc, t8hwvalr++);
-      }
-      rpyis2kc += *acpios9q;
+    chw8lzty = sjwyig9t;
+    for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) {
+      F77_CALL(wbvalue)(gkdx5jals, rpyis2kc, acpios9q, &h2dpsbkr,
+                        chw8lzty++, &yu6izdrc, t8hwvalr++);
+    }
+    rpyis2kc += *acpios9q;
   }
 }
 
@@ -886,7 +885,14 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
          imk5wjxg   = *wy1vqfzu   * (*wy1vqfzu   + 1) / 2,
          n2colb = *kgwmz4ip *  *kgwmz4ip,
          n3colb = *kgwmz4ip * (*kgwmz4ip + 1) / 2;
-  wkumc9ideshvo2ic   = conmat;   wkumc9idonxjvw8u = conmat;
+
+  double hmayv1xt1 = 1.0, hmayv1xt2;
+
+
+
+
+  hmayv1xt2 = hmayv1xt1 + 1.0;
+  wkumc9ideshvo2ic   = &hmayv1xt2;   wkumc9idonxjvw8u = &hmayv1xt2;
 
   wkumc9idwk1a    = Calloc(zyojx5hw          , double);
   wkumc9idwk1b    = Calloc(*wy1vqfzu         , double);
@@ -934,87 +940,87 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
 
   ptri = ezlgm2up;  qnwamo0e = sjwyig9t;
   for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
-      pygsw6ko[(*ptri++) - 1] = *qnwamo0e++;
+    pygsw6ko[(*ptri++) - 1] = *qnwamo0e++;
   }
 
   if (*iz2nbfjc) {
-      qnwamo0e = onxjvw8u;
-      for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) {
-          for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) {
-              *qnwamo0e++ = 0.0e0;
-          }
+    qnwamo0e = onxjvw8u;
+    for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) {
+      for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) {
+        *qnwamo0e++ = 0.0e0;
       }
+    }
   }
 
   if (*iz2nbfjc) {
-      qnwamo0e = eshvo2ic;
-      for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) {
-          for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
-              *qnwamo0e++ = 0.0e0;
-          }
+    qnwamo0e = eshvo2ic;
+    for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) {
+      for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+        *qnwamo0e++ = 0.0e0;
       }
+    }
   }
 
   for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+    for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
+      wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
+               (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] =
+      wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 +
+               (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] =
+           rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1)    * *ftnjamu2];
+    }
+
+    qnwamo0e1 = (*iz2nbfjc) ? eshvo2ic  : wkumc9ideshvo2ic;
+    qnwamo0e2 = (*iz2nbfjc) ? onxjvw8u : wkumc9idonxjvw8u;
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
+              qnwamo0e2[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] +=
+                wkumc9idwk1a[yq6lorbx   -1 + (gp1jxzuh-1) * *wy1vqfzu] *
+                     tlgduey8[ayfnwr1v   -1 + (gp1jxzuh-1) * *ftnjamu2];
+      }
+    }
+    for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
+       qnwamo0e1[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] +=
+             rbne6ouj[ayfnwr1v   -1 + (yq6lorbx-1) * *ftnjamu2];
+    }
+  }
+
+  *dvhw1ulq = 1;
+  if (*iz2nbfjc) {
+    for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
       for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
           wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
                    (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] =
           wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 +
                    (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] =
-               rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1)    * *ftnjamu2];
+               eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1)    * *lqsahu0r];
       }
-
-      qnwamo0e1 = (*iz2nbfjc) ? eshvo2ic  : wkumc9ideshvo2ic;
-      qnwamo0e2 = (*iz2nbfjc) ? onxjvw8u : wkumc9idonxjvw8u;
       for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-          for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
-                    qnwamo0e2[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] +=
-                      wkumc9idwk1a[yq6lorbx   -1 + (gp1jxzuh-1) * *wy1vqfzu] *
-                           tlgduey8[ayfnwr1v   -1 + (gp1jxzuh-1) * *ftnjamu2];
-          }
-      }
-      for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
-               qnwamo0e1[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] +=
-                     rbne6ouj[ayfnwr1v   -1 + (yq6lorbx-1) * *ftnjamu2];
+       wkumc9idwk1b[yq6lorbx-1] =      onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
       }
-  }
 
-  *dvhw1ulq = 1;
-  if (*iz2nbfjc) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
-          for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
+      fvlmz9iyjdbomp0g(wkumc9idwk1a, wkumc9idwk1b, wy1vqfzu, dvhw1ulq, &pqneb2ra);
+      if (*dvhw1ulq != 1) {
+        Rprintf("*dvhw1ulq != 1 after fvlmz9iyjdbomp0g in vsuff9.\n");
+        Free_fapc0tnbvsuff9(wkumc9idwk1a,    wkumc9idwk1b,
+                           wkumc9idwk2a,    wkumc9idwk2b,
+                           wkumc9ideshvo2ic,   wkumc9idonxjvw8u,
+                           wkumc9idtgiyxdw11, wkumc9iddufozmt71,
+                           wkumc9idtgiyxdw12, wkumc9iddufozmt72,
+                           iz2nbfjc);
+        return;
+      }
+      if (*wueshvo2ic) {
+        for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
+            ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
               wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
-                       (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] =
-              wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 +
-                       (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] =
-                   eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1)    * *lqsahu0r];
-          }
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-           wkumc9idwk1b[yq6lorbx-1] =      onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
-          }
-
-          fvlmz9iyjdbomp0g(wkumc9idwk1a, wkumc9idwk1b, wy1vqfzu, dvhw1ulq, &pqneb2ra);
-          if (*dvhw1ulq != 1) {
-            Rprintf("*dvhw1ulq != 1 after fvlmz9iyjdbomp0g in vsuff9.\n");
-            Free_fapc0tnbvsuff9(wkumc9idwk1a,    wkumc9idwk1b,
-                               wkumc9idwk2a,    wkumc9idwk2b,
-                               wkumc9ideshvo2ic,   wkumc9idonxjvw8u,
-                               wkumc9idtgiyxdw11, wkumc9iddufozmt71,
-                               wkumc9idtgiyxdw12, wkumc9iddufozmt72,
-                               iz2nbfjc);
-            return;
-          }
-          if (*wueshvo2ic) {
-            for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
-                ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
-                  wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
-                           (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu];
-            }
-          }
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-            pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk1b[yq6lorbx-1];
-          }
+                       (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu];
+        }
       }
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk1b[yq6lorbx-1];
+      }
+    }
   } else {
       qnwamo0e = wkumc9idwk1a;
       for (yq6lorbx = 1; yq6lorbx <= zyojx5hw; yq6lorbx++) {
@@ -1034,61 +1040,61 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
           wkumc9idwk1b[yq6lorbx-1] = wkumc9idonxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
         }
 
-          for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-              for (gp1jxzuh = yq6lorbx; gp1jxzuh <= *kgwmz4ip; gp1jxzuh++) {
-                  wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] = 0.0e0;
-                  for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
-                      for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
-                         wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] +=
-                            conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] *
-                         wkumc9idwk1a[urohxe6t-1 + (bpvaqm5z-1) * *wy1vqfzu] *
-                            conmat[bpvaqm5z-1 + (gp1jxzuh-1) * *wy1vqfzu];
-                      }
-                  }
-              }
+        for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+          for (gp1jxzuh = yq6lorbx; gp1jxzuh <= *kgwmz4ip; gp1jxzuh++) {
+            wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] = 0.0e0;
+            for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+              for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
+                wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] +=
+                   conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] *
+                wkumc9idwk1a[urohxe6t-1 + (bpvaqm5z-1) * *wy1vqfzu] *
+                   conmat[bpvaqm5z-1 + (gp1jxzuh-1) * *wy1vqfzu];
+                }
+            }
           }
+        }
 
-          for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) {
-              eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] =
-                wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1 +
-                         (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip];
-          }
+        for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) {
+          eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] =
+            wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1 +
+                     (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip];
+        }
 
     for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-        wkumc9idwk2b[yq6lorbx-1] = 0.0e0;
-        for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
-            wkumc9idwk2b[yq6lorbx-1] +=    conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] *
-                                   wkumc9idwk1b[urohxe6t-1];
+      wkumc9idwk2b[yq6lorbx-1] = 0.0e0;
+      for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+          wkumc9idwk2b[yq6lorbx-1] +=    conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] *
+                                 wkumc9idwk1b[urohxe6t-1];
       }
   }
 
-          for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-              onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1];
-          }
+      for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+        onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1];
+      }
 
-          fvlmz9iyjdbomp0g(wkumc9idwk2a, wkumc9idwk2b, kgwmz4ip, dvhw1ulq, &pqneb2ra);
-          if (*dvhw1ulq != 1) {
-              Rprintf("*dvhw1ulq!=1 in vchol-vsuff9. Something gone wrong\n");
-              Free_fapc0tnbvsuff9(wkumc9idwk1a,    wkumc9idwk1b,
-                                 wkumc9idwk2a,    wkumc9idwk2b,
-                                 wkumc9ideshvo2ic,   wkumc9idonxjvw8u,
-                                 wkumc9idtgiyxdw11, wkumc9iddufozmt71,
-                                 wkumc9idtgiyxdw12, wkumc9iddufozmt72,
-                                 iz2nbfjc);
-              return;
-          }
+        fvlmz9iyjdbomp0g(wkumc9idwk2a, wkumc9idwk2b, kgwmz4ip, dvhw1ulq, &pqneb2ra);
+        if (*dvhw1ulq != 1) {
+            Rprintf("*dvhw1ulq!=1 in vchol-vsuff9. Something gone wrong\n");
+            Free_fapc0tnbvsuff9(wkumc9idwk1a,    wkumc9idwk1b,
+                               wkumc9idwk2a,    wkumc9idwk2b,
+                               wkumc9ideshvo2ic,   wkumc9idonxjvw8u,
+                               wkumc9idtgiyxdw11, wkumc9iddufozmt71,
+                               wkumc9idtgiyxdw12, wkumc9iddufozmt72,
+                               iz2nbfjc);
+            return;
+        }
 
-          if (*wueshvo2ic) {
-              for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
-                  ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
-                  wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1  +
-                           (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip];
-              }
+        if (*wueshvo2ic) {
+          for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
+            ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
+            wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1  +
+                     (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip];
           }
+        }
 
-          for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-              pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1];
-          }
+        for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+          pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1];
+        }
       }
   }
 
@@ -1126,7 +1132,8 @@ void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jals[],
   fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1_, wkumc9iddufozmt7_, wy1vqfzu);
   ptri1 = wkumc9idtgiyxdw1_;   ptri2 = wkumc9iddufozmt7_;
   for (ayfnwr1v = 0; ayfnwr1v < imk5wjxg; ayfnwr1v++) {
-    (*ptri1++)--;  (*ptri2++)--;
+    (*ptri1++)--;
+    (*ptri2++)--;
   }
 
   wkumc9idwrk = Calloc(zyojx5hw, double);
@@ -1244,17 +1251,17 @@ void fapc0tnbo0xlszqr(int *wy1vqfzu, double *g9fvdrbw, double *quc6khaf, double
 
   qnwamo0e = quc6khaf;
   for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) {
-      for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) {
-          *quc6khaf *= *g9fvdrbw;
-          quc6khaf++;
-      }
+    for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) {
+      *quc6khaf *= *g9fvdrbw;
+      quc6khaf++;
+    }
   }
   quc6khaf = qnwamo0e;
   for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) {
-      for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) {
-          *bmb += *quc6khaf++;
-          bmb++;
-      }
+    for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) {
+      *bmb += *quc6khaf++;
+      bmb++;
+    }
   }
 }
 
@@ -1270,35 +1277,35 @@ void fapc0tnbvsel(int *nurohxe6t, int *nbpvaqm5z, int *wy1vqfzu, int *ldk,
 
   qnwamo0e = quc6khaf;
   for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
-      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-          *qnwamo0e++ = 0.0;
-      }
+    for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+      *qnwamo0e++ = 0.0;
+    }
   }
 
   if (*nurohxe6t != *nbpvaqm5z) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
-          biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx;
-              quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] =
-                  minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk];
-          }
+    for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
+      biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v;
+      for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx;
+        quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] =
+            minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk];
       }
+    }
   } else {
-      for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
-          biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v;
-          for (yq6lorbx = ayfnwr1v; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx;
-              quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] =
-                  minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk];
-          }
+    for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
+      biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v;
+      for (yq6lorbx = ayfnwr1v; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx;
+        quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] =
+            minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk];
       }
-      for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
-          for (yq6lorbx = ayfnwr1v+1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              quc6khaf[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] =
-              quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu];
-          }
+    }
+    for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) {
+      for (yq6lorbx = ayfnwr1v+1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+        quc6khaf[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] =
+        quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu];
       }
+    }
   }
 }
 
@@ -1360,51 +1367,51 @@ void fapc0tnbvicb2(double enaqpzk9[], double wpuarq2m[], double Dvector[],
   hofjnx2e = *wy1vqfzu + 1;
   sedf7mxb = *f8yswcat + 1 - hofjnx2e;
   for (kij0gwer = sedf7mxb; kij0gwer <= *f8yswcat; kij0gwer++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= hofjnx2e; ayfnwr1v++) {
-          wkumc9iduu[ayfnwr1v-1 + (kij0gwer-sedf7mxb) * Mplus1] =
-             wpuarq2m[ayfnwr1v-1 + (kij0gwer-1   ) * Mplus1];
-      }
+    for (ayfnwr1v = 1; ayfnwr1v <= hofjnx2e; ayfnwr1v++) {
+      wkumc9iduu[ayfnwr1v-1 + (kij0gwer-sedf7mxb) * Mplus1] =
+         wpuarq2m[ayfnwr1v-1 + (kij0gwer-1   ) * Mplus1];
+    }
   }
 
   for (ayfnwr1v = *f8yswcat-1; ayfnwr1v >= 1; ayfnwr1v--) {
-      uplim = *wy1vqfzu < (*f8yswcat - ayfnwr1v) ? *wy1vqfzu : *f8yswcat - ayfnwr1v;
-
-      for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) {
-          enaqpzk9[-urohxe6t+*wy1vqfzu + (ayfnwr1v+urohxe6t-1) * Mplus1] = 0.0e0;
-          for (gp1jxzuh = 1; gp1jxzuh <= urohxe6t; gp1jxzuh++) {
-              enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1] -=
-            wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] *
-        enaqpzk9[gp1jxzuh-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1];
-          }
-
-          for ( ; gp1jxzuh <= uplim; gp1jxzuh++) {
-              enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1] -=
-            wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] *
-        enaqpzk9[urohxe6t-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh-1     ) * Mplus1];
-          }
+    uplim = *wy1vqfzu < (*f8yswcat - ayfnwr1v) ? *wy1vqfzu : *f8yswcat - ayfnwr1v;
+
+    for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) {
+      enaqpzk9[-urohxe6t+*wy1vqfzu + (ayfnwr1v+urohxe6t-1) * Mplus1] = 0.0e0;
+      for (gp1jxzuh = 1; gp1jxzuh <= urohxe6t; gp1jxzuh++) {
+          enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1] -=
+        wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] *
+    enaqpzk9[gp1jxzuh-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1];
       }
 
-      enaqpzk9[*wy1vqfzu + (ayfnwr1v-1) * Mplus1] = 1.0e0 / Dvector[ayfnwr1v-1];
-      for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) {
-                   enaqpzk9[  *wy1vqfzu + (ayfnwr1v        - 1   ) * Mplus1] -=
-         wkumc9iduu[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - sedf7mxb) * Mplus1] *
-           enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - 1   ) * Mplus1];
+      for ( ; gp1jxzuh <= uplim; gp1jxzuh++) {
+          enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1     ) * Mplus1] -=
+        wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] *
+    enaqpzk9[urohxe6t-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh-1     ) * Mplus1];
       }
+    }
+
+    enaqpzk9[*wy1vqfzu + (ayfnwr1v-1) * Mplus1] = 1.0e0 / Dvector[ayfnwr1v-1];
+    for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) {
+                 enaqpzk9[  *wy1vqfzu + (ayfnwr1v        - 1   ) * Mplus1] -=
+       wkumc9iduu[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - sedf7mxb) * Mplus1] *
+         enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - 1   ) * Mplus1];
+    }
 
       if (ayfnwr1v == sedf7mxb) {
-          if (--sedf7mxb < 1) {
-              sedf7mxb = 1;
-          } else {
-              for (kij0gwer = hofjnx2e - 1; kij0gwer >= 1; kij0gwer--) {
-                  for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) {
-                      wkumc9iduu[gp1jxzuh-1 +  kij0gwer    * Mplus1] =
-                      wkumc9iduu[gp1jxzuh-1 + (kij0gwer-1) * Mplus1];
-                  }
-              }
-              for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) {
-                  wkumc9iduu[gp1jxzuh-1] = wpuarq2m[gp1jxzuh-1 + (sedf7mxb-1) * Mplus1];
-              }
+        if (--sedf7mxb < 1) {
+          sedf7mxb = 1;
+        } else {
+          for (kij0gwer = hofjnx2e - 1; kij0gwer >= 1; kij0gwer--) {
+            for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) {
+              wkumc9iduu[gp1jxzuh-1 +  kij0gwer    * Mplus1] =
+              wkumc9iduu[gp1jxzuh-1 + (kij0gwer-1) * Mplus1];
+            }
           }
+          for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) {
+            wkumc9iduu[gp1jxzuh-1] = wpuarq2m[gp1jxzuh-1 + (sedf7mxb-1) * Mplus1];
+          }
+        }
       }
   }
 
@@ -1598,7 +1605,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
                    kgwmz4ip, &xjc4ywlh, lqsahu0r, &npjlv3mreshvo2ic, &rutyk8mg);
 
       for (gp1jxzuh = 1; gp1jxzuh <= xjc4ywlh; gp1jxzuh++) {
-          wkumc9idges1xpkr[gp1jxzuh-1] = gp1jxzuh;
+        wkumc9idges1xpkr[gp1jxzuh-1] = gp1jxzuh;
       }
       F77_CALL(vqrdca)(wkumc9idwk4, &rutyk8mg, &rutyk8mg, &xjc4ywlh, wkumc9idfasrkub3,
                        wkumc9idges1xpkr, wkumc9idWrk1, &qemj9asg, &pvofyg8z);
@@ -1655,16 +1662,16 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
 
   fpdlcqk9ub4xioar = wkumc9idub4xioar;
   for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
-      for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-          wkumc9idsout[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] -= *fpdlcqk9ub4xioar++;
-      }
+    for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+      wkumc9idsout[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] -= *fpdlcqk9ub4xioar++;
+    }
   }
 
 
   for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
-      fapc0tnbshm8ynte(ftnjamu2,  /* lqsahu0r, */
-                    ezlgm2up, wkumc9idsout + (yq6lorbx-1) * *lqsahu0r,
-                              kispwgx3 + (yq6lorbx-1) * *ftnjamu2);
+    fapc0tnbshm8ynte(ftnjamu2,  /* lqsahu0r, */
+                  ezlgm2up, wkumc9idsout + (yq6lorbx-1) * *lqsahu0r,
+                            kispwgx3 + (yq6lorbx-1) * *ftnjamu2);
   }
 
   Free_fapc0tnbewg7qruh(wkumc9idWrk1,
@@ -1707,6 +1714,7 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g,
 
 
 
+
     int    *ftnjamu2, *wy1vqfzu;
     int     itdcb8ilk[1];
     double   tdcb8ilk[4];
@@ -1933,7 +1941,7 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
 
           cvnjhg2u = (*ueb8hndv == 1) ? 0 : 1;
           fapc0tnbewg7qruh(he7mqnvy+(which[gp1jxzuh-1]-1) * *ftnjamu2, wkumc9idghz9vuba, rbne6ouj,
-              ftnjamu2, wy1vqfzu,   ezlgm2up + (gp1jxzuh-1)   * *ftnjamu2, lqsahu0r + gp1jxzuh-1,
+              ftnjamu2, wy1vqfzu,   ezlgm2up + (gp1jxzuh-1)    * *ftnjamu2, lqsahu0r + gp1jxzuh-1,
                  wbkq9zyi +  hnpt1zym[gp1jxzuh-1]-1,
                  lamvec +  hnpt1zym[gp1jxzuh-1]-1,
                  hdnw2fts +  hnpt1zym[gp1jxzuh-1]-1,
@@ -2009,11 +2017,11 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
  
           fpdlcqk9ghz9vuba = wkumc9idghz9vuba;  fpdlcqk9tlgduey8 = tlgduey8;
           for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              fpdlcqk9m0ibglfx = m0ibglfx + yq6lorbx-1;
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                  *fpdlcqk9ghz9vuba++  = *fpdlcqk9tlgduey8++ - *fpdlcqk9m0ibglfx;
-                   fpdlcqk9m0ibglfx += *wy1vqfzu;
-              }
+            fpdlcqk9m0ibglfx = m0ibglfx + yq6lorbx-1;
+            for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+              *fpdlcqk9ghz9vuba++  = *fpdlcqk9tlgduey8++ - *fpdlcqk9m0ibglfx;
+               fpdlcqk9m0ibglfx += *wy1vqfzu;
+            }
           }
 
           fvlmz9iyC_nudh6szq(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu);
@@ -2027,14 +2035,14 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
       }
 
       if (*nhja0izq > 0) {
-          z4vrscot = 0.0e0;
-          for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-              for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                z4vrscot += rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
-                    pow(m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu], (double) 2.0);
-              }
+        z4vrscot = 0.0e0;
+        for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+          for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+            z4vrscot += rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] *
+                pow(m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu], (double) 2.0);
           }
-          g2dnwteb = (z4vrscot > 0.0e0) ? sqrt(deltaf / z4vrscot) : 0.0;
+        }
+        g2dnwteb = (z4vrscot > 0.0e0) ? sqrt(deltaf / z4vrscot) : 0.0;
       }
 
       if (*ueb8hndv == 1) {
@@ -2053,8 +2061,8 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
   fpdlcqk9m0ibglfx = m0ibglfx;  fpdlcqk9ub4xioar = wkumc9idub4xioar;
   for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
     for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-        *fpdlcqk9m0ibglfx   += *fpdlcqk9ub4xioar++;
-         fpdlcqk9m0ibglfx++;
+      *fpdlcqk9m0ibglfx   += *fpdlcqk9ub4xioar++;
+       fpdlcqk9m0ibglfx++;
     }
   }
 
@@ -2067,8 +2075,8 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
                       ui8ysltq + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2,
                       wkumc9idoldmat);
         for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
-                ui8ysltq[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
-           wkumc9idoldmat[ayfnwr1v-1];
+               ui8ysltq[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
+          wkumc9idoldmat[ayfnwr1v-1];
         }
       }
     }
@@ -2110,19 +2118,19 @@ void fapc0tnbx6kanjdh(double sjwyig9t[], double xout[], int *f8yswcat, int *wy1v
 
 
   for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
-          for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
-              xout[iptr++] = (yq6lorbx == gp1jxzuh) ? 1.0e0 : 0.0e0;
-          }
+    for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
+      for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
+        xout[iptr++] = (yq6lorbx == gp1jxzuh) ? 1.0e0 : 0.0e0;
       }
+    }
   }
 
   for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
-      for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
-          for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
-              xout[iptr++] = (yq6lorbx == gp1jxzuh) ? sjwyig9t[ayfnwr1v-1] : 0.0e0;
-          }
+    for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
+      for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
+        xout[iptr++] = (yq6lorbx == gp1jxzuh) ? sjwyig9t[ayfnwr1v-1] : 0.0e0;
       }
+    }
   }
 }
 
@@ -2135,8 +2143,8 @@ double fapc0tnbrd9beyfk(int *f8yswcat, double bhcji9gl[], double po8rwsmy[],
   double rd9beyfk, rxeqjn0y = 0.0, lm9vcjob = 0.0;
 
   for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-      lm9vcjob    += *po8rwsmy;
-      rxeqjn0y    += *po8rwsmy++ * pow(*bhcji9gl++ - *m0ibglfx++, (double) 2.0);
+    lm9vcjob    += *po8rwsmy;
+    rxeqjn0y    += *po8rwsmy++ * pow(*bhcji9gl++ - *m0ibglfx++, (double) 2.0);
   }
   rd9beyfk = (lm9vcjob > 0.0e0) ? (rxeqjn0y / lm9vcjob) : 0.0e0;
   return rd9beyfk;
@@ -2152,8 +2160,8 @@ void fapc0tnbpitmeh0q(int *f8yswcat, double bhcji9gl[], double po8rwsmy[],
 
   *lm9vcjob = 0.0e0;
   for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-      *lm9vcjob  += *po8rwsmy;
-      rxeqjn0yy  += *po8rwsmy++ * *bhcji9gl++;
+    *lm9vcjob  += *po8rwsmy;
+    rxeqjn0yy  += *po8rwsmy++ * *bhcji9gl++;
   }
   *lfu2qhid = (*lm9vcjob > 0.0e0) ? (rxeqjn0yy / *lm9vcjob) : 0.0e0;
 }
@@ -2175,30 +2183,30 @@ void fapc0tnbdsrt0gem(int *f8yswcat, double sjwyig9t[], double po8rwsmy[], doubl
 
   fpdlcqk9sjwyig9t = sjwyig9t;  fpdlcqk9bhcji9gl = bhcji9gl;  fpdlcqk9po8rwsmy = po8rwsmy; 
   for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-      qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko;
-      nsum  += qtce8hzo * (*fpdlcqk9bhcji9gl++ - pasjmo8g) * *fpdlcqk9po8rwsmy;
-      qtce8hzo = pow(qtce8hzo, (double) 2.0);
-      q6zdcwxk  += qtce8hzo * *fpdlcqk9po8rwsmy++;
+    qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko;
+    nsum  += qtce8hzo * (*fpdlcqk9bhcji9gl++ - pasjmo8g) * *fpdlcqk9po8rwsmy;
+    qtce8hzo = pow(qtce8hzo, (double) 2.0);
+    q6zdcwxk  += qtce8hzo * *fpdlcqk9po8rwsmy++;
   }
 
   eck8vubt = nsum / q6zdcwxk;
   intercept = pasjmo8g - eck8vubt * pygsw6ko;
   fpdlcqk9sjwyig9t = sjwyig9t; 
   for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-      *ub4xioar++ = intercept + eck8vubt * *fpdlcqk9sjwyig9t++;
+    *ub4xioar++ = intercept + eck8vubt * *fpdlcqk9sjwyig9t++;
   }
 
   if (*yzoe1rsp) {
-      fpdlcqk9sjwyig9t = sjwyig9t;  fpdlcqk9po8rwsmy = po8rwsmy;
-      for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-          qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko;
-          if (*fpdlcqk9po8rwsmy++ > 0.0e0) {
-             *ui8ysltq -= (1.0e0 / lm9vcjob + pow(qtce8hzo, (double) 2.0) / q6zdcwxk);
-              ui8ysltq++;
-          } else {
-             *ui8ysltq++ = 0.0e0;
-          }
+    fpdlcqk9sjwyig9t = sjwyig9t;  fpdlcqk9po8rwsmy = po8rwsmy;
+    for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
+      qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko;
+      if (*fpdlcqk9po8rwsmy++ > 0.0e0) {
+       *ui8ysltq -= (1.0e0 / lm9vcjob + pow(qtce8hzo, (double) 2.0) / q6zdcwxk);
+        ui8ysltq++;
+      } else {
+       *ui8ysltq++ = 0.0e0;
       }
+    }
   }
 }
 
@@ -2263,13 +2271,13 @@ void Yee_pknootl2(double *gkdx5jal, int *f8yswcat, int *zo8wpibx, double *Toler_
   }
 
   for (ayfnwr1v = 5; ayfnwr1v <= yq6lorbx; ayfnwr1v++) {
-      if ((gkdx5jal[ayfnwr1v -1] - gkdx5jal[cjop5bwm -1] >= *Toler_ankcghz2) &&
-          (gkdx5jal[  *f8yswcat -1] - gkdx5jal[ayfnwr1v -1] >= *Toler_ankcghz2)) {
-          *zo8wpibx++ = 1;
-          cjop5bwm = ayfnwr1v;
-      } else {
-          *zo8wpibx++ = 0;
-      }
+    if ((gkdx5jal[ayfnwr1v -1] - gkdx5jal[cjop5bwm -1] >= *Toler_ankcghz2) &&
+      (gkdx5jal[  *f8yswcat -1] - gkdx5jal[ayfnwr1v -1] >= *Toler_ankcghz2)) {
+      *zo8wpibx++ = 1;
+      cjop5bwm = ayfnwr1v;
+    } else {
+      *zo8wpibx++ = 0;
+    }
   }
 
   for (ayfnwr1v = *f8yswcat - 3; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
diff --git a/src/vlinpack1.f b/src/vlinpack1.f
index 5bc3be7..03f1351 100644
--- a/src/vlinpack1.f
+++ b/src/vlinpack1.f
@@ -1,3 +1,4 @@
+C Output from Public domain Ratfor, version 1.01
       subroutine vqrdca(x,ldx,n,p,fasrkub3,jpvt,work,xwdf5ltg,eps)
       implicit double precision (a-h,o-z)
       implicit integer (i-n)
@@ -9,17 +10,18 @@
       double precision x(ldx,p),fasrkub3(p),work(*),eps
       double precision vdnrm2,tt
       double precision ddot8,nrmxl,t
-      do 23000 j=1,p 
+      do23000 j=1,p 
       fasrkub3(j) = vdnrm2(n,x(1,j),ldx,1)
       work(j) = fasrkub3(j)
 23000 continue
+23001 continue
       l=1
       lup = min0(n,p)
       curpvt = p
-23002 if(.not.(l.le.lup))goto 23003
+23002 if(l.le.lup)then
       fasrkub3(l) = 0.0d0
       nrmxl = vdnrm2(n-l+1, x(l,l), ldx, 1)
-      if(.not.(nrmxl .lt. eps))goto 23004
+      if(nrmxl .lt. eps)then
       call dshift8(x,ldx,n,l,curpvt)
       jp = jpvt(l)
       t=fasrkub3(l)
@@ -30,51 +32,50 @@
       jpvt(jj)=jpvt(j)
       fasrkub3(jj)=fasrkub3(j)
       work(jj)=work(j)
-       j=j+1
+23007 j=j+1
       goto 23006
 23008 continue
       jpvt(curpvt)=jp
       fasrkub3(curpvt)=t
       work(curpvt)=tt
       curpvt=curpvt-1
-      if(.not.(lup.gt.curpvt))goto 23009
+      if(lup.gt.curpvt)then
       lup=curpvt
-23009 continue
-      goto 23005
-23004 continue
-      if(.not.(l.eq.n))goto 23011
+      endif
+      else
+      if(l.eq.n)then
       goto 23003
-23011 continue
-      if(.not.(x(l,l).ne.0.0d0))goto 23013
+      endif
+      if(x(l,l).ne.0.0d0)then
       nrmxl = dsign(nrmxl,x(l,l))
-23013 continue
+      endif
       call dscal8(n-l+1,1.0d0/nrmxl,x(l,l),1)
       x(l,l) = 1.0d0+x(l,l)
       j=l+1
 23015 if(.not.(j.le.curpvt))goto 23017
       t = -ddot8(n-l+1,x(l,l),1,x(l,j),1)/x(l,l)
       call daxpy8(n-l+1,t,x(l,l),1,x(l,j),1)
-      if(.not.(fasrkub3(j).ne.0.0d0))goto 23018
+      if(fasrkub3(j).ne.0.0d0)then
       tt = 1.0d0-(dabs(x(l,j))/fasrkub3(j))**2
       tt = dmax1(tt,0.0d0)
       t = tt
       tt = 1.0d0+0.05d0*tt*(fasrkub3(j)/work(j))**2
-      if(.not.(tt.ne.1.0d0))goto 23020
+      if(tt.ne.1.0d0)then
       fasrkub3(j) = fasrkub3(j)*dsqrt(t)
-      goto 23021
-23020 continue
+      else
       fasrkub3(j) = vdnrm2(n-l,x(l+1,j),ldx,1)
       work(j) = fasrkub3(j)
-23021 continue
-23018 continue
-       j=j+1
+      endif
+      endif
+23016 j=j+1
       goto 23015
 23017 continue
       fasrkub3(l) = x(l,l)
       x(l,l) = -nrmxl
       l=l+1
-23005 continue
+      endif
       goto 23002
+      endif
 23003 continue
       xwdf5ltg = lup
       return
diff --git a/src/vmux.f b/src/vmux.f
index 07b270e..94c47ed 100644
--- a/src/vmux.f
+++ b/src/vmux.f
@@ -1,142 +1,149 @@
+C Output from Public domain Ratfor, version 1.01
       subroutine qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
       implicit logical (a-z)
       integer wy1vqfzu, tgiyxdw1(*), dufozmt7(*)
       integer urohxe6t, bpvaqm5z, ayfnwr1v
       ayfnwr1v = 1
       urohxe6t = wy1vqfzu
-23000 if(.not.(urohxe6t.ge.1))goto 23002
-      do 23003 bpvaqm5z=1,urohxe6t 
+23000 if(.not.(urohxe6t .ge. 1))goto 23002
+      do23003 bpvaqm5z=1,urohxe6t 
       tgiyxdw1(ayfnwr1v) = bpvaqm5z
       ayfnwr1v = ayfnwr1v+1
 23003 continue
-       urohxe6t=urohxe6t-1
+23004 continue
+23001 urohxe6t=urohxe6t-1
       goto 23000
 23002 continue
       ayfnwr1v = 1
-      do 23005 urohxe6t=1,wy1vqfzu 
-      do 23007 bpvaqm5z=urohxe6t,wy1vqfzu 
+      do23005 urohxe6t=1,wy1vqfzu 
+      do23007 bpvaqm5z=urohxe6t,wy1vqfzu 
       dufozmt7(ayfnwr1v) = bpvaqm5z
       ayfnwr1v = ayfnwr1v+1
 23007 continue
+23008 continue
 23005 continue
+23006 continue
       return
       end
-      integer function viamf(cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1, 
-     &dufozmt7)
+      integer function viamf(cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1, duf
+     *ozmt7)
       integer cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1(*), dufozmt7(*)
       integer urohxe6t, imk5wjxg
       imk5wjxg = wy1vqfzu*(wy1vqfzu+1)/2
-      do 23009 urohxe6t=1,imk5wjxg 
-      if(.not.((tgiyxdw1(urohxe6t).eq.cz8qdfyj .and. dufozmt7(urohxe6t)
-     &.eq.rvy1fpli) .or.(tgiyxdw1(urohxe6t).eq.rvy1fpli .and. dufozmt7(
-     &urohxe6t).eq.cz8qdfyj)))goto 23011
+      do23009 urohxe6t=1,imk5wjxg 
+      if((tgiyxdw1(urohxe6t).eq.cz8qdfyj .and. dufozmt7(urohxe6t).eq.rvy
+     *1fpli) .or. (tgiyxdw1(urohxe6t).eq.rvy1fpli .and. dufozmt7(urohxe6
+     *t).eq.cz8qdfyj))then
       viamf = urohxe6t
       return
-23011 continue
+      endif
 23009 continue
+23010 continue
       viamf = 0
       return
       end
-      subroutine vm2af(mat, a, dimm, tgiyxdw1, dufozmt7, kuzxj1lo, 
-     &wy1vqfzu, upper)
+      subroutine vm2af(mat, a, dimm, tgiyxdw1, dufozmt7, kuzxj1lo, wy1vq
+     *fzu, upper)
       implicit logical (a-z)
       integer dimm, tgiyxdw1(dimm), dufozmt7(dimm), kuzxj1lo, wy1vqfzu, 
-     &upper
+     *upper
       double precision mat(dimm,kuzxj1lo), a(wy1vqfzu,wy1vqfzu,kuzxj1lo)
       integer ayfnwr1v, yq6lorbx, gp1jxzuh, imk5wjxg
       imk5wjxg = wy1vqfzu * (wy1vqfzu + 1) / 2
-      if(.not.(upper .eq. 1 .or. dimm .ne. imk5wjxg))goto 23013
+      if(upper .eq. 1 .or. dimm .ne. imk5wjxg)then
       ayfnwr1v = 1
-23015 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23017
+23015 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23017
       yq6lorbx = 1
-23018 if(.not.(yq6lorbx.le.wy1vqfzu))goto 23020
+23018 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23020
       gp1jxzuh = 1
-23021 if(.not.(gp1jxzuh.le.wy1vqfzu))goto 23023
+23021 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23023
       a(gp1jxzuh,yq6lorbx,ayfnwr1v) = 0.0d0
-       gp1jxzuh=gp1jxzuh+1
+23022 gp1jxzuh=gp1jxzuh+1
       goto 23021
 23023 continue
-       yq6lorbx=yq6lorbx+1
+23019 yq6lorbx=yq6lorbx+1
       goto 23018
 23020 continue
-       ayfnwr1v=ayfnwr1v+1
+23016 ayfnwr1v=ayfnwr1v+1
       goto 23015
 23017 continue
-23013 continue
-      do 23024 ayfnwr1v=1,kuzxj1lo 
-      do 23026 yq6lorbx=1,dimm 
-      a(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,
-     &ayfnwr1v)
-      if(.not.(upper .eq. 0))goto 23028
-      a(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,
-     &ayfnwr1v)
-23028 continue
+      endif
+      do23024 ayfnwr1v=1,kuzxj1lo 
+      do23026 yq6lorbx=1,dimm 
+      a(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a
+     *yfnwr1v)
+      if(upper .eq. 0)then
+      a(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a
+     *yfnwr1v)
+      endif
 23026 continue
+23027 continue
 23024 continue
+23025 continue
       return
       end
       subroutine nudh6szqf(wpuarq2m, tlgduey8, lfu2qhid, dimu, tgiyxdw1,
-     & dufozmt7, kuzxj1lo, wy1vqfzu, wk1200)
+     * dufozmt7, kuzxj1lo, wy1vqfzu, wk1200)
       implicit logical (a-z)
       integer dimu, tgiyxdw1(*), dufozmt7(*), kuzxj1lo, wy1vqfzu
-      double precision wpuarq2m(dimu,kuzxj1lo), tlgduey8(kuzxj1lo,
-     &wy1vqfzu), lfu2qhid(wy1vqfzu,kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
+      double precision wpuarq2m(dimu,kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqf
+     *zu), lfu2qhid(wy1vqfzu,kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
       double precision q6zdcwxk
       integer ayfnwr1v, yq6lorbx, bpvaqm5z, one, upper
       one = 1
       upper = 1
       ayfnwr1v = 1
-23030 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23032
+23030 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23032
       call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7,
-     & one, wy1vqfzu, upper)
+     * one, wy1vqfzu, upper)
       yq6lorbx = 1
-23033 if(.not.(yq6lorbx.le.wy1vqfzu))goto 23035
+23033 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23035
       q6zdcwxk = 0.0d0
       bpvaqm5z = yq6lorbx
-23036 if(.not.(bpvaqm5z.le.wy1vqfzu))goto 23038
-      q6zdcwxk = q6zdcwxk + wk1200(yq6lorbx,bpvaqm5z) * tlgduey8(
-     &ayfnwr1v,bpvaqm5z)
-       bpvaqm5z=bpvaqm5z+1
+23036 if(.not.(bpvaqm5z .le. wy1vqfzu))goto 23038
+      q6zdcwxk = q6zdcwxk + wk1200(yq6lorbx,bpvaqm5z) * tlgduey8(ayfnwr1
+     *v,bpvaqm5z)
+23037 bpvaqm5z=bpvaqm5z+1
       goto 23036
 23038 continue
       lfu2qhid(yq6lorbx,ayfnwr1v) = q6zdcwxk
-       yq6lorbx=yq6lorbx+1
+23034 yq6lorbx=yq6lorbx+1
       goto 23033
 23035 continue
-       ayfnwr1v=ayfnwr1v+1
+23031 ayfnwr1v=ayfnwr1v+1
       goto 23030
 23032 continue
       return
       end
-      subroutine vbksf(wpuarq2m, bvecto, wy1vqfzu, kuzxj1lo, wk1200, 
-     &tgiyxdw1, dufozmt7, dimu)
+      subroutine vbksf(wpuarq2m, bvecto, wy1vqfzu, kuzxj1lo, wk1200, tgi
+     *yxdw1, dufozmt7, dimu)
       implicit logical (a-z)
       integer wy1vqfzu, kuzxj1lo, tgiyxdw1(*), dufozmt7(*), dimu
-      double precision wpuarq2m(dimu,kuzxj1lo), bvecto(wy1vqfzu,
-     &kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
+      double precision wpuarq2m(dimu,kuzxj1lo), bvecto(wy1vqfzu,kuzxj1lo
+     *), wk1200(wy1vqfzu,wy1vqfzu)
       double precision q6zdcwxk
       integer ayfnwr1v, yq6lorbx, gp1jxzuh, upper, one
       upper = 1
       one = 1
       ayfnwr1v = 1
-23039 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23041
+23039 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23041
       call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7,
-     & one, wy1vqfzu, upper)
+     * one, wy1vqfzu, upper)
       yq6lorbx = wy1vqfzu
-23042 if(.not.(yq6lorbx.ge.1))goto 23044
+23042 if(.not.(yq6lorbx .ge. 1))goto 23044
       q6zdcwxk = bvecto(yq6lorbx,ayfnwr1v)
       gp1jxzuh = yq6lorbx+1
-23045 if(.not.(gp1jxzuh.le.wy1vqfzu))goto 23047
+23045 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23047
       q6zdcwxk = q6zdcwxk - wk1200(yq6lorbx,gp1jxzuh) * bvecto(gp1jxzuh,
-     &ayfnwr1v)
-       gp1jxzuh=gp1jxzuh+1
+     *ayfnwr1v)
+23046 gp1jxzuh=gp1jxzuh+1
       goto 23045
 23047 continue
       bvecto(yq6lorbx,ayfnwr1v) = q6zdcwxk / wk1200(yq6lorbx,yq6lorbx)
-       yq6lorbx=yq6lorbx-1
+23043 yq6lorbx=yq6lorbx-1
       goto 23042
 23044 continue
-       ayfnwr1v=ayfnwr1v+1
+23040 ayfnwr1v=ayfnwr1v+1
       goto 23039
 23041 continue
       return
@@ -149,168 +156,182 @@
       double precision q6zdcwxk, dsqrt
       integer ayfnwr1v, yq6lorbx, gp1jxzuh
       dvhw1ulq=1
-      do 23048 ayfnwr1v=1,wy1vqfzu
+      do23048 ayfnwr1v=1,wy1vqfzu
       q6zdcwxk = 0d0
-      do 23050 gp1jxzuh=1,ayfnwr1v-1 
-      q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,
-     &ayfnwr1v)
+      do23050 gp1jxzuh=1,ayfnwr1v-1 
+      q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,ayfn
+     *wr1v)
 23050 continue
+23051 continue
       wmat(ayfnwr1v,ayfnwr1v) = wmat(ayfnwr1v,ayfnwr1v) - q6zdcwxk
-      if(.not.(wmat(ayfnwr1v,ayfnwr1v) .le. 0d0))goto 23052
+      if(wmat(ayfnwr1v,ayfnwr1v) .le. 0d0)then
       dvhw1ulq = 0
       return
-23052 continue
+      endif
       wmat(ayfnwr1v,ayfnwr1v) = dsqrt(wmat(ayfnwr1v,ayfnwr1v))
-      do 23054 yq6lorbx=ayfnwr1v+1,wy1vqfzu
+      do23054 yq6lorbx=ayfnwr1v+1,wy1vqfzu
       q6zdcwxk = 0d0
-      do 23056 gp1jxzuh=1,ayfnwr1v-1 
-      q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,
-     &yq6lorbx)
+      do23056 gp1jxzuh=1,ayfnwr1v-1 
+      q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,yq6l
+     *orbx)
 23056 continue
-      wmat(ayfnwr1v,yq6lorbx) = (wmat(ayfnwr1v,yq6lorbx) - q6zdcwxk) / 
-     &wmat(ayfnwr1v,ayfnwr1v)
+23057 continue
+      wmat(ayfnwr1v,yq6lorbx) = (wmat(ayfnwr1v,yq6lorbx) - q6zdcwxk) / w
+     *mat(ayfnwr1v,ayfnwr1v)
 23054 continue
+23055 continue
 23048 continue
-      if(.not.(isolve .eq. 0))goto 23058
-      do 23060 ayfnwr1v=2,wy1vqfzu 
-      do 23062 yq6lorbx=1,ayfnwr1v-1 
+23049 continue
+      if(isolve .eq. 0)then
+      do23060 ayfnwr1v=2,wy1vqfzu 
+      do23062 yq6lorbx=1,ayfnwr1v-1 
       wmat(ayfnwr1v,yq6lorbx) = 0.0d0
 23062 continue
+23063 continue
       return
 23060 continue
-23058 continue
-      do 23064 yq6lorbx=1,wy1vqfzu 
+23061 continue
+      endif
+      do23064 yq6lorbx=1,wy1vqfzu 
       q6zdcwxk = bvecto(yq6lorbx)
-      do 23066 gp1jxzuh=1,yq6lorbx-1 
+      do23066 gp1jxzuh=1,yq6lorbx-1 
       q6zdcwxk = q6zdcwxk - wmat(gp1jxzuh,yq6lorbx) * bvecto(gp1jxzuh)
 23066 continue
+23067 continue
       bvecto(yq6lorbx) = q6zdcwxk / wmat(yq6lorbx,yq6lorbx)
 23064 continue
+23065 continue
       yq6lorbx = wy1vqfzu
-23068 if(.not.(yq6lorbx.ge.1))goto 23070
+23068 if(.not.(yq6lorbx .ge. 1))goto 23070
       q6zdcwxk = bvecto(yq6lorbx)
       gp1jxzuh = yq6lorbx+1
-23071 if(.not.(gp1jxzuh.le.wy1vqfzu))goto 23073
+23071 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23073
       q6zdcwxk = q6zdcwxk - wmat(yq6lorbx,gp1jxzuh) * bvecto(gp1jxzuh)
-       gp1jxzuh=gp1jxzuh+1
+23072 gp1jxzuh=gp1jxzuh+1
       goto 23071
 23073 continue
       bvecto(yq6lorbx) = q6zdcwxk / wmat(yq6lorbx,yq6lorbx)
-       yq6lorbx=yq6lorbx-1
+23069 yq6lorbx=yq6lorbx-1
       goto 23068
 23070 continue
       return
       end
-      subroutine mxrbkut0f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh, 
-     &kuzxj1lo, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg)
+      subroutine mxrbkut0f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh, kuzxj
+     *1lo, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg)
       implicit logical (a-z)
       integer dimu, wy1vqfzu, xjc4ywlh, kuzxj1lo, tgiyxdw1(*), dufozmt7(
-     &*), rutyk8mg
-      double precision wpuarq2m(dimu,kuzxj1lo), he7mqnvy(rutyk8mg,
-     &xjc4ywlh), wk1200(wy1vqfzu,wy1vqfzu), wk3400(wy1vqfzu,xjc4ywlh)
+     **), rutyk8mg
+      double precision wpuarq2m(dimu,kuzxj1lo), he7mqnvy(rutyk8mg,xjc4yw
+     *lh), wk1200(wy1vqfzu,wy1vqfzu), wk3400(wy1vqfzu,xjc4ywlh)
       double precision q6zdcwxk
       integer ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z
-      do 23074 yq6lorbx=1,wy1vqfzu 
-      do 23076 ayfnwr1v=1,wy1vqfzu 
+      do23074 yq6lorbx=1,wy1vqfzu 
+      do23076 ayfnwr1v=1,wy1vqfzu 
       wk1200(ayfnwr1v,yq6lorbx) = 0.0d0
 23076 continue
+23077 continue
 23074 continue
-      do 23078 ayfnwr1v=1,kuzxj1lo 
-      do 23080 bpvaqm5z=1,dimu 
-      wk1200(tgiyxdw1(bpvaqm5z), dufozmt7(bpvaqm5z)) = wpuarq2m(
-     &bpvaqm5z,ayfnwr1v)
+23075 continue
+      do23078 ayfnwr1v=1,kuzxj1lo 
+      do23080 bpvaqm5z=1,dimu 
+      wk1200(tgiyxdw1(bpvaqm5z), dufozmt7(bpvaqm5z)) = wpuarq2m(bpvaqm5z
+     *,ayfnwr1v)
 23080 continue
-      do 23082 gp1jxzuh=1,xjc4ywlh 
-      do 23084 yq6lorbx=1,wy1vqfzu 
-      wk3400(yq6lorbx,gp1jxzuh) = he7mqnvy((ayfnwr1v-1)*wy1vqfzu+
-     &yq6lorbx,gp1jxzuh)
+23081 continue
+      do23082 gp1jxzuh=1,xjc4ywlh 
+      do23084 yq6lorbx=1,wy1vqfzu 
+      wk3400(yq6lorbx,gp1jxzuh) = he7mqnvy((ayfnwr1v-1)*wy1vqfzu+yq6lorb
+     *x,gp1jxzuh)
 23084 continue
+23085 continue
 23082 continue
-      do 23086 gp1jxzuh=1,xjc4ywlh 
-      do 23088 yq6lorbx=1,wy1vqfzu 
+23083 continue
+      do23086 gp1jxzuh=1,xjc4ywlh 
+      do23088 yq6lorbx=1,wy1vqfzu 
       q6zdcwxk = 0d0
-      do 23090 bpvaqm5z=yq6lorbx,wy1vqfzu 
+      do23090 bpvaqm5z=yq6lorbx,wy1vqfzu 
       q6zdcwxk = q6zdcwxk + wk1200(yq6lorbx,bpvaqm5z) * wk3400(bpvaqm5z,
-     &gp1jxzuh)
+     *gp1jxzuh)
 23090 continue
+23091 continue
       he7mqnvy((ayfnwr1v-1)*wy1vqfzu+yq6lorbx,gp1jxzuh) = q6zdcwxk
 23088 continue
+23089 continue
 23086 continue
+23087 continue
 23078 continue
+23079 continue
       return
       end
-      subroutine vrinvf9(wpuarq2m, ldr, wy1vqfzu, dvhw1ulq, ks3wejcv, 
-     &work)
+      subroutine vrinvf9(wpuarq2m, ldr, wy1vqfzu, dvhw1ulq, ks3wejcv, wo
+     *rk)
       implicit logical (a-z)
       integer ldr, wy1vqfzu, dvhw1ulq
-      double precision wpuarq2m(ldr,wy1vqfzu), ks3wejcv(wy1vqfzu,
-     &wy1vqfzu), work(wy1vqfzu,wy1vqfzu)
+      double precision wpuarq2m(ldr,wy1vqfzu), ks3wejcv(wy1vqfzu,wy1vqfz
+     *u), work(wy1vqfzu,wy1vqfzu)
       double precision q6zdcwxk
       integer yq6lorbx, gp1jxzuh, col, uaoynef0
       dvhw1ulq = 1
       yq6lorbx = 1
-23092 if(.not.(yq6lorbx.le.wy1vqfzu))goto 23094
+23092 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23094
       col = 1
-23095 if(.not.(col.le.wy1vqfzu))goto 23097
+23095 if(.not.(col .le. wy1vqfzu))goto 23097
       work(yq6lorbx,col) = 0.0d0
-       col=col+1
+23096 col=col+1
       goto 23095
 23097 continue
-       yq6lorbx=yq6lorbx+1
+23093 yq6lorbx=yq6lorbx+1
       goto 23092
 23094 continue
       col = 1
-23098 if(.not.(col.le.wy1vqfzu))goto 23100
+23098 if(.not.(col .le. wy1vqfzu))goto 23100
       yq6lorbx = col
-23101 if(.not.(yq6lorbx.ge.1))goto 23103
-      if(.not.(yq6lorbx .eq. col))goto 23104
+23101 if(.not.(yq6lorbx .ge. 1))goto 23103
+      if(yq6lorbx .eq. col)then
       q6zdcwxk = 1.0d0
-      goto 23105
-23104 continue
+      else
       q6zdcwxk = 0.0d0
-23105 continue
+      endif
       gp1jxzuh = yq6lorbx+1
-23106 if(.not.(gp1jxzuh.le.col))goto 23108
+23106 if(.not.(gp1jxzuh .le. col))goto 23108
       q6zdcwxk = q6zdcwxk - wpuarq2m(yq6lorbx,gp1jxzuh) * work(gp1jxzuh,
-     &col)
-       gp1jxzuh=gp1jxzuh+1
+     *col)
+23107 gp1jxzuh=gp1jxzuh+1
       goto 23106
 23108 continue
-      if(.not.(wpuarq2m(yq6lorbx,yq6lorbx) .eq. 0.0d0))goto 23109
+      if(wpuarq2m(yq6lorbx,yq6lorbx) .eq. 0.0d0)then
       dvhw1ulq = 0
-      goto 23110
-23109 continue
+      else
       work(yq6lorbx,col) = q6zdcwxk / wpuarq2m(yq6lorbx,yq6lorbx)
-23110 continue
-       yq6lorbx=yq6lorbx-1
+      endif
+23102 yq6lorbx=yq6lorbx-1
       goto 23101
 23103 continue
-       col=col+1
+23099 col=col+1
       goto 23098
 23100 continue
       yq6lorbx = 1
-23111 if(.not.(yq6lorbx.le.wy1vqfzu))goto 23113
+23111 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23113
       col = yq6lorbx
-23114 if(.not.(col.le.wy1vqfzu))goto 23116
-      if(.not.(yq6lorbx .lt. col))goto 23117
+23114 if(.not.(col .le. wy1vqfzu))goto 23116
+      if(yq6lorbx .lt. col)then
       uaoynef0 = col
-      goto 23118
-23117 continue
+      else
       uaoynef0 = yq6lorbx
-23118 continue
+      endif
       q6zdcwxk = 0.0d0
       gp1jxzuh = uaoynef0
-23119 if(.not.(gp1jxzuh.le.wy1vqfzu))goto 23121
+23119 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23121
       q6zdcwxk = q6zdcwxk + work(yq6lorbx,gp1jxzuh) * work(col,gp1jxzuh)
-       gp1jxzuh=gp1jxzuh+1
+23120 gp1jxzuh=gp1jxzuh+1
       goto 23119
 23121 continue
       ks3wejcv(yq6lorbx,col) = q6zdcwxk
       ks3wejcv(col,yq6lorbx) = q6zdcwxk
-       col=col+1
+23115 col=col+1
       goto 23114
 23116 continue
-       yq6lorbx=yq6lorbx+1
+23112 yq6lorbx=yq6lorbx+1
       goto 23111
 23113 continue
       return
@@ -332,29 +353,29 @@
       hofjnx2e = hofjnx2e - (x+0.50d0) * dlog(hofjnx2e)
       q6zdcwxk=1.000000000190015d0
       yq6lorbx=1
-23122 if(.not.(yq6lorbx.le.6))goto 23124
+23122 if(.not.(yq6lorbx .le. 6))goto 23124
       y = y + 1.0d0
       q6zdcwxk = q6zdcwxk + xd4mybgj(yq6lorbx)/y
-       yq6lorbx=yq6lorbx+1
+23123 yq6lorbx=yq6lorbx+1
       goto 23122
 23124 continue
       lfu2qhid = -hofjnx2e + dlog(2.5066282746310005d0 * q6zdcwxk / x)
       return
       end
-      subroutine enbin9(bzmd6ftv, hdqsx7bk, nm0eljqk, n2kersmx, n, 
-     &dvhw1ulq, zy1mchbf, ux3nadiw, rsynp1go, sguwj9ty)
+      subroutine enbin9(bzmd6ftv, hdqsx7bk, nm0eljqk, n2kersmx, n, dvhw1
+     *ulq, zy1mchbf, ux3nadiw, rsynp1go, sguwj9ty)
       implicit logical (a-z)
       integer n, dvhw1ulq, zy1mchbf, sguwj9ty
-      double precision bzmd6ftv(n, zy1mchbf), hdqsx7bk(n, zy1mchbf), 
-     &nm0eljqk(n, zy1mchbf), n2kersmx, ux3nadiw, rsynp1go
+      double precision bzmd6ftv(n, zy1mchbf), hdqsx7bk(n, zy1mchbf), nm0
+     *eljqk(n, zy1mchbf), n2kersmx, ux3nadiw, rsynp1go
       integer ayfnwr1v, kij0gwer
-      double precision oxjgzv0e, btiehdm2, ydb, vjz5sxty, esql7umk, 
-     &pvcjl2na, mwuvskg1, ft3ijqmy, hmayv1xt, q6zdcwxk, plo6hkdr
+      double precision oxjgzv0e, btiehdm2, ydb, vjz5sxty, esql7umk, pvcj
+     *l2na, mwuvskg1, ft3ijqmy, hmayv1xt, q6zdcwxk, plo6hkdr
       real csi9ydge
-      if(.not.(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0))goto 23125
+      if(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0)then
       dvhw1ulq = 0
       return
-23125 continue
+      endif
       btiehdm2 = 100.0d0 * rsynp1go
       oxjgzv0e = 0.001d0
       dvhw1ulq = 1
@@ -362,33 +383,33 @@
 23127 if(.not.(kij0gwer.le.zy1mchbf))goto 23129
       ayfnwr1v=1
 23130 if(.not.(ayfnwr1v.le.n))goto 23132
-      vjz5sxty = nm0eljqk(ayfnwr1v,kij0gwer) / hdqsx7bk(ayfnwr1v,
-     &kij0gwer)
-      if(.not.((vjz5sxty .lt. oxjgzv0e) .or. (nm0eljqk(ayfnwr1v,
-     &kij0gwer) .gt. 1.0d5)))goto 23133
-      bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk(ayfnwr1v,kij0gwer) * (1.
-     &0d0 + hdqsx7bk(ayfnwr1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + 
-     &nm0eljqk(ayfnwr1v,kij0gwer))) / hdqsx7bk(ayfnwr1v,kij0gwer)**2
-      if(.not.(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. -btiehdm2))goto 23135
+      vjz5sxty = nm0eljqk(ayfnwr1v,kij0gwer) / hdqsx7bk(ayfnwr1v,kij0gwe
+     *r)
+      if((vjz5sxty .lt. oxjgzv0e) .or. (nm0eljqk(ayfnwr1v,kij0gwer) .gt.
+     * 1.0d5))then
+      bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk(ayfnwr1v,kij0gwer) * (1.0d
+     *0 + hdqsx7bk(ayfnwr1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + nm0
+     *eljqk(ayfnwr1v,kij0gwer))) / hdqsx7bk(ayfnwr1v,kij0gwer)**2
+      if(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. -btiehdm2)then
       bzmd6ftv(ayfnwr1v,kij0gwer) = -btiehdm2
-23135 continue
+      endif
       goto 20
-23133 continue
+      endif
       q6zdcwxk = 0.0d0
-      pvcjl2na = hdqsx7bk(ayfnwr1v,kij0gwer) / (hdqsx7bk(ayfnwr1v,
-     &kij0gwer) + nm0eljqk(ayfnwr1v,kij0gwer))
+      pvcjl2na = hdqsx7bk(ayfnwr1v,kij0gwer) / (hdqsx7bk(ayfnwr1v,kij0gw
+     *er) + nm0eljqk(ayfnwr1v,kij0gwer))
       mwuvskg1 = 1.0d0 - pvcjl2na
       csi9ydge = hdqsx7bk(ayfnwr1v,kij0gwer)
-      if(.not.(pvcjl2na .lt. btiehdm2))goto 23137
+      if(pvcjl2na .lt. btiehdm2)then
       pvcjl2na = btiehdm2
-23137 continue
-      if(.not.(mwuvskg1 .lt. btiehdm2))goto 23139
+      endif
+      if(mwuvskg1 .lt. btiehdm2)then
       mwuvskg1 = btiehdm2
-23139 continue
+      endif
       esql7umk = 100.0d0 + 15.0d0 * nm0eljqk(ayfnwr1v,kij0gwer)
-      if(.not.(esql7umk .lt. sguwj9ty))goto 23141
+      if(esql7umk .lt. sguwj9ty)then
       esql7umk = sguwj9ty
-23141 continue
+      endif
       ft3ijqmy = pvcjl2na ** csi9ydge
       ux3nadiw = ft3ijqmy
       plo6hkdr = (1.0d0 - ux3nadiw) / hdqsx7bk(ayfnwr1v,kij0gwer)**2
@@ -396,51 +417,52 @@
       ydb = 1.0d0
       ft3ijqmy = hdqsx7bk(ayfnwr1v,kij0gwer) * mwuvskg1 * ft3ijqmy
       ux3nadiw = ux3nadiw + ft3ijqmy
-      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + 
-     &ydb)**2
+      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb
+     *)**2
       q6zdcwxk = q6zdcwxk + plo6hkdr
       ydb = 2.0d0
-23143 if(.not.(((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4)) .
-     &and.(ydb .lt. esql7umk)))goto 23144
+23143 if(((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4)) .and. (y
+     *db .lt. esql7umk))then
       ft3ijqmy = (hdqsx7bk(ayfnwr1v,kij0gwer) - 1.0d0 + ydb) * mwuvskg1 
-     &* ft3ijqmy / ydb
+     ** ft3ijqmy / ydb
       ux3nadiw = ux3nadiw + ft3ijqmy
-      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + 
-     &ydb)**2
+      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb
+     *)**2
       q6zdcwxk = q6zdcwxk + plo6hkdr
       ydb = ydb + 1.0d0
       goto 23143
+      endif
 23144 continue
       bzmd6ftv(ayfnwr1v,kij0gwer) = -q6zdcwxk
 20    hmayv1xt = 0.0d0
-       ayfnwr1v=ayfnwr1v+1
+23131 ayfnwr1v=ayfnwr1v+1
       goto 23130
 23132 continue
-       kij0gwer=kij0gwer+1
+23128 kij0gwer=kij0gwer+1
       goto 23127
 23129 continue
       return
       end
-      subroutine enbin8(bzmd6ftv, hdqsx7bk, hsj9bzaq, n2kersmx, 
-     &kuzxj1lo, dvhw1ulq, zy1mchbf, ux3nadiw, rsynp1go)
+      subroutine enbin8(bzmd6ftv, hdqsx7bk, hsj9bzaq, n2kersmx, kuzxj1lo
+     *, dvhw1ulq, zy1mchbf, ux3nadiw, rsynp1go)
       implicit logical (a-z)
       integer kuzxj1lo, dvhw1ulq, zy1mchbf
       double precision bzmd6ftv(kuzxj1lo, zy1mchbf), hdqsx7bk(kuzxj1lo, 
-     &zy1mchbf), hsj9bzaq(kuzxj1lo, zy1mchbf), n2kersmx, ux3nadiw, 
-     &rsynp1go
+     *zy1mchbf), hsj9bzaq(kuzxj1lo, zy1mchbf), n2kersmx, ux3nadiw, rsynp
+     *1go
       integer ayfnwr1v, kij0gwer, esql7umk
       double precision ft3ijqmy, tad5vhsu, o3jyipdf, pq0hfucn, q6zdcwxk,
-     & d1, d2, plo6hkdr, hnu1vjyw
+     * d1, d2, plo6hkdr, hnu1vjyw
       logical pok1, pok2, pok12
       double precision oxjgzv0e, onemse, nm0eljqk, btiehdm2, ydb, kbig
       d1 = 0.0d0
       d2 = 0.0d0
       btiehdm2 = -100.0d0 * rsynp1go
       esql7umk = 3000
-      if(.not.(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0))goto 23145
+      if(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0)then
       dvhw1ulq = 0
       return
-23145 continue
+      endif
       kbig = 1.0d4
       oxjgzv0e = 0.001d0
       hnu1vjyw = 1.0d0 - rsynp1go
@@ -450,105 +472,102 @@
 23147 if(.not.(kij0gwer.le.zy1mchbf))goto 23149
       ayfnwr1v=1
 23150 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23152
-      if(.not.(hdqsx7bk(ayfnwr1v,kij0gwer) .gt. kbig))goto 23153
+      if(hdqsx7bk(ayfnwr1v,kij0gwer) .gt. kbig)then
       hdqsx7bk(ayfnwr1v,kij0gwer) = kbig
-23153 continue
-      if(.not.(hsj9bzaq(ayfnwr1v,kij0gwer) .lt. oxjgzv0e))goto 23155
+      endif
+      if(hsj9bzaq(ayfnwr1v,kij0gwer) .lt. oxjgzv0e)then
       hsj9bzaq(ayfnwr1v,kij0gwer) = oxjgzv0e
-23155 continue
-      if(.not.((hsj9bzaq(ayfnwr1v,kij0gwer) .gt. onemse)))goto 23157
+      endif
+      if((hsj9bzaq(ayfnwr1v,kij0gwer) .gt. onemse))then
       nm0eljqk = hdqsx7bk(ayfnwr1v,kij0gwer) * (1.0d0/hsj9bzaq(ayfnwr1v,
-     &kij0gwer) - 1.0d0)
-      bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk * (1.0d0 + hdqsx7bk(
-     &ayfnwr1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + nm0eljqk)) / 
-     &hdqsx7bk(ayfnwr1v,kij0gwer)**2
-      if(.not.(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. btiehdm2))goto 23159
+     *kij0gwer) - 1.0d0)
+      bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk * (1.0d0 + hdqsx7bk(ayfnwr
+     *1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + nm0eljqk)) / hdqsx7bk(
+     *ayfnwr1v,kij0gwer)**2
+      if(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. btiehdm2)then
       bzmd6ftv(ayfnwr1v,kij0gwer) = btiehdm2
-23159 continue
+      endif
       goto 20
-23157 continue
+      endif
       q6zdcwxk = 0.0d0
       pok1 = .true.
       pok2 = hsj9bzaq(ayfnwr1v,kij0gwer) .lt. (1.0d0-rsynp1go)
       pok12 = pok1 .and. pok2
-      if(.not.(pok12))goto 23161
-      d2 = hdqsx7bk(ayfnwr1v,kij0gwer) * dlog(hsj9bzaq(ayfnwr1v,
-     &kij0gwer))
+      if(pok12)then
+      d2 = hdqsx7bk(ayfnwr1v,kij0gwer) * dlog(hsj9bzaq(ayfnwr1v,kij0gwer
+     *))
       ux3nadiw = dexp(d2)
-      goto 23162
-23161 continue
+      else
       ux3nadiw = 0.0d0
-23162 continue
+      endif
       plo6hkdr = (1.0d0 - ux3nadiw) / hdqsx7bk(ayfnwr1v,kij0gwer)**2
       q6zdcwxk = q6zdcwxk + plo6hkdr
       call tldz5ion(hdqsx7bk(ayfnwr1v,kij0gwer), o3jyipdf)
       ydb = 1.0d0
       call tldz5ion(ydb + hdqsx7bk(ayfnwr1v,kij0gwer), tad5vhsu)
       pq0hfucn = 0.0d0
-      if(.not.(pok12))goto 23163
+      if(pok12)then
       d1 = dlog(1.0d0 - hsj9bzaq(ayfnwr1v,kij0gwer))
       ft3ijqmy = dexp(ydb * d1 + d2 + tad5vhsu - o3jyipdf - pq0hfucn)
-      goto 23164
-23163 continue
+      else
       ft3ijqmy = 0.0d0
-23164 continue
+      endif
       ux3nadiw = ux3nadiw + ft3ijqmy
-      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + 
-     &ydb)**2
+      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb
+     *)**2
       q6zdcwxk = q6zdcwxk + plo6hkdr
       ydb = 2.0d0
-23165 if(.not.((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4)))
-     &goto 23166
-      tad5vhsu = tad5vhsu + dlog(ydb + hdqsx7bk(ayfnwr1v,kij0gwer) - 1.
-     &0d0)
+23165 if((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4))then
+      tad5vhsu = tad5vhsu + dlog(ydb + hdqsx7bk(ayfnwr1v,kij0gwer) - 1.0
+     *d0)
       pq0hfucn = pq0hfucn + dlog(ydb)
-      if(.not.(pok12))goto 23167
+      if(pok12)then
       ft3ijqmy = dexp(ydb * d1 + d2 + tad5vhsu - o3jyipdf - pq0hfucn)
-      goto 23168
-23167 continue
+      else
       ft3ijqmy = 0.0d0
-23168 continue
+      endif
       ux3nadiw = ux3nadiw + ft3ijqmy
-      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + 
-     &ydb)**2
+      plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb
+     *)**2
       q6zdcwxk = q6zdcwxk + plo6hkdr
       ydb = ydb + 1.0d0
-      if(.not.(ydb .gt. 1.0d3))goto 23169
+      if(ydb .gt. 1.0d3)then
       goto 21
-23169 continue
+      endif
       goto 23165
+      endif
 23166 continue
 21    bzmd6ftv(ayfnwr1v,kij0gwer) = -q6zdcwxk
 20    tad5vhsu = 0.0d0
-       ayfnwr1v=ayfnwr1v+1
+23151 ayfnwr1v=ayfnwr1v+1
       goto 23150
 23152 continue
-       kij0gwer=kij0gwer+1
+23148 kij0gwer=kij0gwer+1
       goto 23147
 23149 continue
       return
       end
-      subroutine mbessi0(bvecto, kuzxj1lo, kpzavbj3, d0, d1, d2, 
-     &zjkrtol8, qaltf0nz)
+      subroutine mbessi0(bvecto, kuzxj1lo, kpzavbj3, d0, d1, d2, zjkrtol
+     *8, qaltf0nz)
       implicit logical (a-z)
       integer kuzxj1lo, kpzavbj3, zjkrtol8, c5aesxkus
       double precision bvecto(kuzxj1lo), d0(kuzxj1lo), d1(kuzxj1lo), d2(
-     &kuzxj1lo), qaltf0nz
+     *kuzxj1lo), qaltf0nz
       integer ayfnwr1v, gp1jxzuh
       double precision f0, t0, m0, f1, t1, m1, f2, t2, m2
       double precision toobig
       toobig = 20.0d0
       zjkrtol8 = 0
-      if(.not.(.not.(kpzavbj3 .eq. 0 .or. kpzavbj3 .eq. 1 .or. kpzavbj3 
-     &.eq. 2)))goto 23171
+      if(.not.(kpzavbj3 .eq. 0 .or. kpzavbj3 .eq. 1 .or. kpzavbj3 .eq. 2
+     *))then
       zjkrtol8 = 1
       return
-23171 continue
-      do 23173 gp1jxzuh=1,kuzxj1lo 
-      if(.not.(dabs(bvecto(gp1jxzuh)) .gt. toobig))goto 23175
+      endif
+      do23173 gp1jxzuh=1,kuzxj1lo 
+      if(dabs(bvecto(gp1jxzuh)) .gt. toobig)then
       zjkrtol8 = 1
       return
-23175 continue
+      endif
       t1 = bvecto(gp1jxzuh) / 2.0d0
       f1 = t1
       t0 = t1 * t1
@@ -556,19 +575,19 @@
       t2 = 0.50d0
       f2 = t2
       c5aesxkus = 15
-      if(.not.(dabs(bvecto(gp1jxzuh)) .gt. 10))goto 23177
+      if(dabs(bvecto(gp1jxzuh)) .gt. 10)then
       c5aesxkus = 25
-23177 continue
-      if(.not.(dabs(bvecto(gp1jxzuh)) .gt. 15))goto 23179
+      endif
+      if(dabs(bvecto(gp1jxzuh)) .gt. 15)then
       c5aesxkus = 35
-23179 continue
-      if(.not.(dabs(bvecto(gp1jxzuh)) .gt. 20))goto 23181
+      endif
+      if(dabs(bvecto(gp1jxzuh)) .gt. 20)then
       c5aesxkus = 40
-23181 continue
-      if(.not.(dabs(bvecto(gp1jxzuh)) .gt. 30))goto 23183
+      endif
+      if(dabs(bvecto(gp1jxzuh)) .gt. 30)then
       c5aesxkus = 55
-23183 continue
-      do 23185 ayfnwr1v=1,c5aesxkus 
+      endif
+      do23185 ayfnwr1v=1,c5aesxkus 
       m0 = (bvecto(gp1jxzuh) / (2.0d0*(ayfnwr1v+1.0d0))) ** 2.0
       m1 = m0 * (1.0d0 + 1.0d0/ayfnwr1v)
       m2 = m1 * (2.0d0*ayfnwr1v + 1.0d0) / (2.0d0*ayfnwr1v - 1.0d0)
@@ -578,21 +597,22 @@
       f0 = f0 + t0
       f1 = f1 + t1
       f2 = f2 + t2
-      if(.not.((dabs(t0) .lt. qaltf0nz) .and. (dabs(t1) .lt. qaltf0nz) 
-     &.and. (dabs(t2) .lt. qaltf0nz)))goto 23187
+      if((dabs(t0) .lt. qaltf0nz) .and. (dabs(t1) .lt. qaltf0nz) .and. (
+     *dabs(t2) .lt. qaltf0nz))then
       goto 23186
-23187 continue
+      endif
 23185 continue
 23186 continue
-      if(.not.(0 .le. kpzavbj3))goto 23189
+      if(0 .le. kpzavbj3)then
       d0(gp1jxzuh) = f0
-23189 continue
-      if(.not.(1 .le. kpzavbj3))goto 23191
+      endif
+      if(1 .le. kpzavbj3)then
       d1(gp1jxzuh) = f1
-23191 continue
-      if(.not.(2 .le. kpzavbj3))goto 23193
+      endif
+      if(2 .le. kpzavbj3)then
       d2(gp1jxzuh) = f2
-23193 continue
+      endif
 23173 continue
+23174 continue
       return
       end
diff --git a/src/vmux3.c b/src/vmux3.c
index 58ab442..3179cd1 100644
--- a/src/vmux3.c
+++ b/src/vmux3.c
@@ -504,7 +504,6 @@ void fvlmz9iyC_enbin8(double bzmd6ftvmat[], double hdqsx7bk[], double hsj9bzaq[]
   double onemse, nm0eljqk, ydb, btiehdm2 = -100.0 * *rsynp1go,
          kbig = 1.0e4, oxjgzv0e = 0.0010;
 
-  Rprintf("zz 20100122; this function fvlmz9iyC_enbin8 unchecked.\n");
   if (*n2kersmx <= 0.80e0 || *n2kersmx >= 1.0e0) {
       Rprintf("returning since n2kersmx <= 0.8 or >= 1\n");
       *dvhw1ulq = 0;
@@ -608,7 +607,6 @@ void fvlmz9iyC_mbessI0(double unvxka0m[], int *f8yswcat, int *kpzavbj3,
   int    ayfnwr1v, gp1jxzuh, c5aesxkus;
   double f0, t0, m0, f1, t1, m1, f2, t2, m2, Toobig = 20.0e0;
 
-  Rprintf("zz 20100122; this function fvlmz9iyC_mbessI0 unchecked.\n");
   *zjkrtol8 = 0;
   if (!(*kpzavbj3 == 0 || *kpzavbj3 == 1 || *kpzavbj3 == 2)) {
       Rprintf("Error in fvlmz9iyC_mbessI0: kpzavbj3 not in 0:2. Returning.\n");
diff --git a/src/zeta3.c b/src/zeta3.c
index d0172c9..8081d7c 100644
--- a/src/zeta3.c
+++ b/src/zeta3.c
@@ -25,21 +25,21 @@ void vzetawr(double sjwyig9t[], double *bqelz3cy, int *kpzavbj3, int *f8yswcat)
   qnwamo0e1 = bqelz3cy;
   qnwamo0e2 = sjwyig9t;
   if (*kpzavbj3 == 0) {
-      for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-          *qnwamo0e1++ = fvlmz9iyzeta8(*qnwamo0e2++, kxae8glp);
-      }
+    for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
+      *qnwamo0e1++ = fvlmz9iyzeta8(*qnwamo0e2++, kxae8glp);
+    }
   } else
   if (*kpzavbj3 == 1) {
-      for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-          *qnwamo0e1++ = fvlmz9iydzeta8(*qnwamo0e2++, kxae8glp);
-      }
+    for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
+      *qnwamo0e1++ = fvlmz9iydzeta8(*qnwamo0e2++, kxae8glp);
+    }
   } else
   if (*kpzavbj3 == 2) {
-      for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
-          *qnwamo0e1++ = fvlmz9iyddzeta8(*qnwamo0e2++, kxae8glp);
-      }
+    for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
+      *qnwamo0e1++ = fvlmz9iyddzeta8(*qnwamo0e2++, kxae8glp);
+    }
   } else {
-      Rprintf("Error: *kpzavbj3 must equal 0, 1 or 2 in C function vzetawr\n");
+    Rprintf("Error: *kpzavbj3 must equal 0, 1 or 2 in C function vzetawr\n");
   }
 }
 
@@ -122,41 +122,42 @@ double fvlmz9iydzeta8(double ghz9vuba, double kxae8glp[]) {
 double fvlmz9iyddzeta8(double ghz9vuba, double kxae8glp[]) {
 
 
-    int      ayfnwr1v, gp1jxzuh, uw3favmo, nsvdbx3tk, m2svdbx3tk;
-    double   q6zdcwxk, xvr7bonh, dh9mgvze, hpmwnav2, a2svdbx3tk, ugqvjoe5a, ugqvjoe5n, fred, fred2;
-
-    ayfnwr1v = 12;
-    gp1jxzuh = 8;
-
-    ugqvjoe5a = log( (double) ayfnwr1v );
-    a2svdbx3tk = ayfnwr1v * ayfnwr1v;
-    xvr7bonh = ghz9vuba / 2.000 / a2svdbx3tk;
-    dh9mgvze = 1.000 / ghz9vuba - ugqvjoe5a;
-    hpmwnav2 = 1.000 / ghz9vuba / ghz9vuba;
-    q6zdcwxk = kxae8glp[0] * xvr7bonh * (pow(dh9mgvze, (double) 2.0) - hpmwnav2);
-
-    for (uw3favmo = 2; uw3favmo < gp1jxzuh; uw3favmo++) {
-        m2svdbx3tk = uw3favmo + uw3favmo;
-        xvr7bonh *= (ghz9vuba + m2svdbx3tk - 3.000) *
-                  (ghz9vuba + m2svdbx3tk - 2.000) / (m2svdbx3tk -
-                  1.0) / m2svdbx3tk / a2svdbx3tk;
-        dh9mgvze += 1.000 / (ghz9vuba + m2svdbx3tk - 3.000) +
-                  1.000 / (ghz9vuba + m2svdbx3tk - 2.000);
-        hpmwnav2 += 1.000 / pow(ghz9vuba + m2svdbx3tk - 3.000, (double) 2.0) +
-                  1.000 / pow(ghz9vuba + m2svdbx3tk - 2.000, (double) 2.0);
-        q6zdcwxk += kxae8glp[uw3favmo-1] * xvr7bonh * (dh9mgvze * dh9mgvze - hpmwnav2);
-    }
-    fred  = pow((double) ayfnwr1v, (double) 1.0 - ghz9vuba);
-    fred2 = pow(ugqvjoe5a, (double) 2.0) * (1.0 / (ghz9vuba - 1.0) + 0.50 / ayfnwr1v);
-    q6zdcwxk = (q6zdcwxk + 2.0 / pow(ghz9vuba - 1.0, (double) 3.0) +
-            2.0 * ugqvjoe5a / pow(ghz9vuba - 1.0, (double) 2.0) + fred2) * fred;
-
-    for (nsvdbx3tk = 2; nsvdbx3tk < ayfnwr1v; nsvdbx3tk++) {
-        ugqvjoe5n = log( (double) nsvdbx3tk );
-        q6zdcwxk += pow(ugqvjoe5n, (double) 2.0) / exp(ugqvjoe5n * ghz9vuba);
-    }
+  int      ayfnwr1v, gp1jxzuh, uw3favmo, nsvdbx3tk, m2svdbx3tk;
+  double   q6zdcwxk, xvr7bonh, dh9mgvze, hpmwnav2, a2svdbx3tk, ugqvjoe5a, ugqvjoe5n,
+           fred1, fred2;
+
+  ayfnwr1v = 12;
+  gp1jxzuh = 8;
+
+  ugqvjoe5a = log( (double) ayfnwr1v );
+  a2svdbx3tk = ayfnwr1v * ayfnwr1v;
+  xvr7bonh = ghz9vuba / 2.000 / a2svdbx3tk;
+  dh9mgvze = 1.000 / ghz9vuba - ugqvjoe5a;
+  hpmwnav2 = 1.000 / ghz9vuba / ghz9vuba;
+  q6zdcwxk = kxae8glp[0] * xvr7bonh * (pow(dh9mgvze, (double) 2.0) - hpmwnav2);
+
+  for (uw3favmo = 2; uw3favmo < gp1jxzuh; uw3favmo++) {
+    m2svdbx3tk = uw3favmo + uw3favmo;
+    xvr7bonh *= (ghz9vuba + m2svdbx3tk - 3.000) *
+              (ghz9vuba + m2svdbx3tk - 2.000) / (m2svdbx3tk -
+              1.0) / m2svdbx3tk / a2svdbx3tk;
+    dh9mgvze += 1.000 / (ghz9vuba + m2svdbx3tk - 3.000) +
+              1.000 / (ghz9vuba + m2svdbx3tk - 2.000);
+    hpmwnav2 += 1.000 / pow(ghz9vuba + m2svdbx3tk - 3.000, (double) 2.0) +
+              1.000 / pow(ghz9vuba + m2svdbx3tk - 2.000, (double) 2.0);
+    q6zdcwxk += kxae8glp[uw3favmo-1] * xvr7bonh * (dh9mgvze * dh9mgvze - hpmwnav2);
+  }
+  fred1 = pow((double) ayfnwr1v, (double) 1.0 - ghz9vuba);
+  fred2 = pow(ugqvjoe5a, (double) 2.0) * (1.0 / (ghz9vuba - 1.0) + 0.50 / ayfnwr1v);
+  q6zdcwxk = (q6zdcwxk + 2.0 / pow(ghz9vuba - 1.0, (double) 3.0) +
+          2.0 * ugqvjoe5a / pow(ghz9vuba - 1.0, (double) 2.0) + fred2) * fred1;
 
-    return q6zdcwxk;
+  for (nsvdbx3tk = 2; nsvdbx3tk < ayfnwr1v; nsvdbx3tk++) {
+    ugqvjoe5n = log( (double) nsvdbx3tk );
+    q6zdcwxk += pow(ugqvjoe5n, (double) 2.0) / exp(ugqvjoe5n * ghz9vuba);
+  }
+
+  return q6zdcwxk;
 }
 
 
@@ -171,18 +172,18 @@ void vbecoef(double kxae8glp[]) {
 
 
 
-    kxae8glp[0] = 1.000 / 6.000;
-    kxae8glp[1] = -1.000 / 30.000;
-    kxae8glp[2] = 1.000 / 42.000;
-    kxae8glp[3] = -1.000 / 30.000;
-    kxae8glp[4] = 5.000 / 66.000;
-    kxae8glp[5] = -691.000 / 2730.000;
-    kxae8glp[6] = 7.000 / 6.000;
-    kxae8glp[7] = -3617.000 / 510.000;
-    kxae8glp[8] = 4386.700 / 79.800;
-    kxae8glp[9] = -1746.1100 / 3.3000;
-    kxae8glp[10] = 8545.1300 / 1.3800;
-    kxae8glp[11] = -2363.6409100 / 0.0273000;
+  kxae8glp[0] =  1.000 / 6.000;
+  kxae8glp[1] = -1.000 / 30.000;
+  kxae8glp[2] =  1.000 / 42.000;
+  kxae8glp[3] = -1.000 / 30.000;
+  kxae8glp[4] =  5.000 / 66.000;
+  kxae8glp[5] = -691.000 / 2730.000;
+  kxae8glp[6] =  7.000 / 6.000;
+  kxae8glp[7] = -3617.000 / 510.000;
+  kxae8glp[8] = 4386.700 / 79.800;
+  kxae8glp[9] = -1746.1100 / 3.3000;
+  kxae8glp[10] = 8545.1300 / 1.3800;
+  kxae8glp[11] = -2363.6409100 / 0.0273000;
 }
 
 
@@ -193,35 +194,35 @@ void conmax_Z(double *lamvec, double *nuvec, double *bqelz3cy,
               double *qaltf0nz) {
 
 
-    double *pq6zdcwxk, denom = 0.0, yq6lorbx, prevterm;
-    int    ayfnwr1v;
-
-    *qaltf0nz = 1.0e-6;
-
-    if (*kpzavbj3 == 0) {
-      pq6zdcwxk = bqelz3cy;
-      for (ayfnwr1v = 0; ayfnwr1v < *nlength; ayfnwr1v++) {
-        prevterm = 1.0 + *lamvec;
-        denom = 1.0;
-        *pq6zdcwxk = prevterm;
-        yq6lorbx = 2.0;
-
-        if (*nuvec == 0.0 && *lamvec >= 1.0) {
-          Rprintf("Error: series will not converge. Returning 0.0\n");
-          *pq6zdcwxk = 0.0;
-        } else {
-          while (prevterm > *qaltf0nz) {
-              denom = denom * pow(yq6lorbx, *lamvec);
-              prevterm = prevterm * *lamvec / denom;
-              *pq6zdcwxk += prevterm;
-              yq6lorbx += 1.0;
-          }
+  double *pq6zdcwxk, denom = 0.0, yq6lorbx, prevterm;
+  int    ayfnwr1v;
+
+  *qaltf0nz = 1.0e-6;
+
+  if (*kpzavbj3 == 0) {
+    pq6zdcwxk = bqelz3cy;
+    for (ayfnwr1v = 0; ayfnwr1v < *nlength; ayfnwr1v++) {
+      prevterm = 1.0 + *lamvec;
+      denom = 1.0;
+      *pq6zdcwxk = prevterm;
+      yq6lorbx = 2.0;
+
+      if (*nuvec == 0.0 && *lamvec >= 1.0) {
+        Rprintf("Error: series will not converge. Returning 0.0\n");
+        *pq6zdcwxk = 0.0;
+      } else {
+        while (prevterm > *qaltf0nz) {
+          denom = denom * pow(yq6lorbx, *lamvec);
+          prevterm = prevterm * *lamvec / denom;
+          *pq6zdcwxk += prevterm;
+          yq6lorbx += 1.0;
         }
-        lamvec++;
-        nuvec++;
-        pq6zdcwxk++;
       }
-    } else if (*kpzavbj3 == 1) {
+      lamvec++;
+      nuvec++;
+      pq6zdcwxk++;
+    }
+  } else if (*kpzavbj3 == 1) {
 
     } else if (*kpzavbj3 == 2) {
 
diff --git a/inst/doc/categoricalVGAM.Rnw b/vignettes/categoricalVGAM.Rnw
similarity index 87%
copy from inst/doc/categoricalVGAM.Rnw
copy to vignettes/categoricalVGAM.Rnw
index b5841f5..c4f98e0 100644
--- a/inst/doc/categoricalVGAM.Rnw
+++ b/vignettes/categoricalVGAM.Rnw
@@ -83,6 +83,7 @@
 
 <<echo=FALSE, results=hide>>=
 library("VGAM")
+library("VGAMdata")
 ps.options(pointsize = 12)
 options(width = 72, digits = 4)
 options(SweaveHooks = list(fig = function() par(las = 1)))
@@ -610,7 +611,7 @@ to~\texttt{gam()} \citep{gam:pack:2009}, e.g.,
 to fit a nonparametric proportional odds model
 \citep[cf.~p.179 of][]{mccu:neld:1989}
 to the pneumoconiosis data one could try
-<<eval=T>>=
+<<label = pneumocat, eval=T>>=
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
             cumulative(reverse = TRUE, parallel = TRUE), pneumo)
@@ -1005,7 +1006,7 @@ A toy example where $p=p_A$ and $q=p_B$ is
 abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
 fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
 coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
+Coef(fit)  # Estimated pA and pB
 @
 The function \texttt{Coef()}, which applies only to intercept-only models,
 applies to $g_{j}(\theta_{j})=\eta_{j}$
@@ -1246,251 +1247,13 @@ data sets in order to give a flavour of what is available in the package.
 
 
 
-\subsection{2008 World Fly Fishing Championships}
-\label{sec:jsscat.eg.WFFC}
 
-The World Fly Fishing Championships (WFFC)
-is a prestigious catch-and-release competition held annually.
-In 2008 it was held in New~Zealand during the month of March.
-The data was released and appears in~\VGAM{} as the data frames
-\texttt{wffc},
-\texttt{wffc.nc},
-\texttt{wffc.indiv} and
-\texttt{wffc.teams}.
-Details about the competition are found
-in the online help, as well as~\cite{yee:2010v}.
-
-
-Briefly, we will model the abundance of fish caught during each
-three-hour session amongst the 90 or so competitors (from about
-19~countries) who fished all their sessions. There were five~sectors
-(locations) labelled I--V for the Whanganui~River, Lake~Otamangakau,
-Lake~Rotoaira, Waihou~River and Waimakariri~River, respectively. The
-sessions were sequentially labelled 1--6 where odd and even
-numbers denote morning and afternoon respectively. There were
-three consecutive days of fishing during which each sector experienced
-a rest session.
-
-
-
-\cite{yee:2010v} fitted Poisson and negative
-binomial regressions to the numbers caught at each competitor-session
-combination.
-The negative
-binomial regression had an intercept-only for its
-index parameter~$k$ and
-$\Var(Y) = \mu(1+\mu / k)$.
-Both models had the log-linear relationship
-\begin{eqnarray}
-\label{eq:wffc.use.loglinear}
-\log \, \mu_{adsc} &=&
-\eta ~=~
-\beta_{(1)1} +
-\alpha_{s} +
-\beta_{a} +
-\gamma_{d} +
-\delta_{c}.
-\end{eqnarray}
-where $\mu = E(Y)$ is the mean number caught,
-$\beta_{(1)1}$~is the intercept,
-$\alpha_{s}$~are the sector effects for $s=1,\ldots,5$ sectors,
-$\delta_{c}$~are the ``competitor effects'' for $c=1,\ldots,91$ competitors
-(8~competitors who did not fish all~5 sessions were excluded),
-$\beta_{a}$~are the morning ($a=1$) and afternoon ($a=2$) effects,
-$\gamma_{d}$~are the day effects for
-day $d=1,2,3$.
-Recall for factors that the first level is baseline, e.g.,
-$\alpha_1=\beta_1=0$ etc.
-Not used here is $b=1,\ldots,19$ for which beat/boat was
-fished/used (e.g., fixed locations on the river).
-We will fit a proportional odds model with essentially the RHS
-of~(\ref{eq:wffc.use.loglinear}) as the linear predictor.
-
-
-
-Here is a peek at the data frame used.
-Each row of~\texttt{wffc.nc} is the number of captures
-by each sector-session-beat combination.
-<<>>=
-head(wffc.nc, 5)
-@ 
-
-
-We first process the data a little: create the regressor
-variables and restrict the analysis to anglers who fished all their
-sessions.
-Here, ``\texttt{nc}'' stands for numbers caught, and
-``\texttt{f}'' stands for factor.
-<<>>=
-fnc <- transform(wffc.nc,
-                 finame = factor(iname),
-                 fsector = factor(sector),
-                 fday = factor(ceiling(session / 2)),
-                 mornaft = 1 - (session %% 2),
-                 fbeatboat = factor(beatboat))
-
-fnc <- fnc[with(fnc, !is.element(comid, c(99,72,80,93,45,71,97,78))),] 
-fnc <- transform(fnc,
-                ordnum = ifelse(numbers <= 02, "few",
-                         ifelse(numbers <= 10, "more", "most")))
-fnc$ordnum <- ordered(fnc$ordnum, levels = c("few", "more", "most"))
-@
-The variable \texttt{ordnum} is ordinal with 3~levels.
-The cut-points chosen here were decided upon by manual
-inspection; they gave approximately the same numbers in each level:
-<<>>=
-with(fnc, table(ordnum))
-@
-
-
-
-Now we are in a position to fit a proportional odds model
-to mimic~(\ref{eq:wffc.use.loglinear}).
-<<>>=
-fit.pom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = TRUE, reverse = TRUE),
-          data = fnc)
-@
-Here, we set \texttt{reverse = TRUE} so that the coefficients
-have the same direction as a logistic regression.
-It means that if a regression coefficient is positive then 
-an increasing value of an explanatory variable is associated
-with an increasing value of the response.
-One could have used \texttt{family = propodds} instead.
-
-
-
-Before interpreting some output let's check that the input was alright.
-<<>>=
-head(fit.pom at y, 3)
-colSums(fit.pom at y)
-@
-The checking indicates no problems with the input.
-
-
-
-Now let's look at some output. Note that the Whanganui~River,
-Mornings and Day~1 are the baseline levels of the factors. Also, the
-variable \texttt{mornaft} is 0~for morning and 1~for afternoons.
-Likewise, the factor \texttt{fday} has values \texttt{1}, \texttt{2}
-and \texttt{3}.
-<<>>=
-head(coef(fit.pom, matrix = TRUE), 10)
-#head(summary(fit.pom)@coef3, 10) # Old now since 0.7-10 is nicer
-@
-verifies the parallelism assumption.
-Standard errors and Wald statistics may be obtained by
-<<>>=
-head(coef(summary(fit.pom)), 10)
-@
-Not surprisingly, these results agree with the Poisson
-and negative binomial regressions
-\citep[reported in][]{yee:2010v}.
-The most glaring qualitative results are as follows.
-We use the rough rule of thumb that if
-the absolute value of the $t$~{statistic} is greater than~$2$
-then it is `statistically significant'.
-\begin{itemize}
-
-\item
-The two lakes were clearly less productive than the rivers.
-However, neither of the other two rivers were significantly different
-from the Whanganui~River.
-
-
-
-\item
-There is a noticeable day effect: the second day is not significantly
-different from the opening day but it is for the third day.
-The decreasing values of the fitted coefficients show there is an
-increasing catch-reduction (fish depletion if it were catch-and-keep)
-as the competition progressed. Replacing \texttt{fday} by a
-variable~\texttt{day} and entering that linearly gave a $t$~statistic
-of~$-4.0$: there is a significant decrease in catch over time.
-
-
-
-\item
-Mornings were more productive than afternoons. The $p$~value for this
-would be close to~5\%. This result is in line with the day effect: fishing
-often results in a `hammering' effect over time on fish populations,
-especially in small streams. Since the morning and afternoon sessions
-were fixed at 9.00am--12.00pm and 2.30--5.30pm daily, there was only
-$2\frac12$~hours for the fish to recover until the next angler arrived.
-
-
-\end{itemize}
-
-
-
-
-
-Let us check the proportional odds assumption with respect
-to the variable~\texttt{mornaft}.
-<<>>=
-fit.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          cumulative(parallel = FALSE ~ 1 + mornaft, reverse = TRUE),
-          data = fnc)
-head(coef(fit.ppom, matrix = TRUE),  8)
-@
-As expected, all rows but~\texttt{(Intercept)} and~\texttt{mornaft}
-are identical due to the parallelism.
-Then
-<<>>=
-pchisq(deviance(fit.pom) - deviance(fit.ppom),
-       df = df.residual(fit.pom) - df.residual(fit.ppom), lower.tail=FALSE)
-@
-gives a likelihood ratio test $p$~value which is non-significant.
-Repeating the testing for each variable separately indicates
-that the parallelism assumption seems reasonable here except
-with~\texttt{fday} ($p$~value $\approx 0.012$).
-For this model
-<<>>=
-fit2.ppom <- vglm(ordnum ~
-          fsector +
-          mornaft +
-          fday +
-          finame,
-          family = cumulative(parallel = FALSE ~ 1 + fday, reverse = TRUE),
-          data = fnc)
-head(coef(fit2.ppom, matrix = TRUE), 8)
-@
-
-
-Some miscellaneous output is as follows.
-<<>>=
-head(fitted(fit2.ppom), 3)
-@
-are the fitted probabilities $\widehat{P}(Y={j})$ which sum to unity for
-each row.
-The $i$th~row of
-<<>>=
-head(predict(fit2.ppom), 3)
-@
-is $\widehat{\boldeta}(\bix_i)^{\top}$.
-The dimensions of the LM and VLM design matrices are
-<<>>=
-dim(model.matrix(fit2.ppom, type = "lm"))
-dim(model.matrix(fit2.ppom, type = "vlm"))
-@
-which shows the VLM matrix grows quickly with respect to~$M$.
-Lastly,
-<<>>=
-constraints(fit2.ppom)[c(1, 2, 5, 6)]
-@
-shows some of the constraint matrices,
-$\bH_1=\bI_2$
-and
-$\bH_2=\bH_5=\bH_6=\bone_2$
-(see Equations~\ref{eqn:constraints.VGAM}--\ref{eqn:lin.coefs4}).
+%20130919
+%Note: 
+%\subsection{2008 World Fly Fishing Championships}
+%\label{sec:jsscat.eg.WFFC}
+%are deleted since there are problems with accessing the \texttt{wffc.nc}
+%data etc. since they are now in \pkg{VGAMdata}.
 
 
 
@@ -1763,9 +1526,9 @@ ooo <- with(marital.nz, order(age))
 with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
      type="l", las=1, lwd=2, ylim=0:1,
      ylab="Fitted probabilities",
-     xlab="Age", # main="Marital status amongst NZ Male Europeans",
+     xlab="Age",  # main="Marital status amongst NZ Male Europeans",
      col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
+legend(x=52.5, y=0.62,  # x="topright",
        col=c(mycol[1], "black", mycol[-1]),
        lty=1:4,
        legend=colnames(fit.ms at y), lwd=2)
@@ -1888,8 +1651,8 @@ The fit was biplotted
  rows of $\widehat{\bA}$ plotted as labels) using
 <<figure=F>>=
 biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-#      xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
-       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
+#      xlim=c(-1,6), ylim=c(-1.2,4),  # Use this if not scaled
+       xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2),  # Use this if scaled
        chull=TRUE, clty=2, ccol="blue")
 @
 to give Figure~\ref{fig:jsscat.eg.rrmlm2.backPain}.
diff --git a/inst/doc/categoricalVGAMbib.bib b/vignettes/categoricalVGAMbib.bib
similarity index 100%
rename from inst/doc/categoricalVGAMbib.bib
rename to vignettes/categoricalVGAMbib.bib

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git



More information about the debian-science-commits mailing list