[r-cran-vgam] 01/63: Import Upstream version 0.7-1
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:19 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 58f1354086521e923a6284262f90a2096af233cc
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:42 2017 +0100
Import Upstream version 0.7-1
---
DESCRIPTION | 18 +
DISCLAIMER | 9 +
NAMESPACE | 284 ++
NEWS | 738 ++++
R/aamethods.q | 546 +++
R/add1.vglm.q | 6 +
R/attrassign.R | 37 +
R/build.terms.s | 50 +
R/build.terms.vlm.q | 92 +
R/calibrate.q | 287 ++
R/cao.R | 177 +
R/cao.fit.q | 1817 +++++++++
R/coef.vlm.q | 211 +
R/cqo.R | 162 +
R/cqo.fit.q | 859 ++++
R/deviance.vlm.q | 54 +
R/effects.vglm.q | 25 +
R/family.basics.q | 839 ++++
R/family.binomial.q | 794 ++++
R/family.bivariate.q | 615 +++
R/family.categorical.q | 1107 ++++++
R/family.censored.q | 447 +++
R/family.extremes.q | 2256 +++++++++++
R/family.functions.q | 274 ++
R/family.genetic.q | 571 +++
R/family.glmgam.q | 790 ++++
R/family.loglin.q | 226 ++
R/family.mixture.q | 290 ++
R/family.nonlinear.q | 152 +
R/family.normal.q | 471 +++
R/family.positive.q | 359 ++
R/family.qreg.q | 909 +++++
R/family.rcqo.q | 346 ++
R/family.rrr.q | 3092 +++++++++++++++
R/family.survival.q | 284 ++
R/family.ts.q | 440 +++
R/family.univariate.q | 7967 ++++++++++++++++++++++++++++++++++++++
R/family.vglm.q | 30 +
R/family.zeroinf.q | 716 ++++
R/fitted.vlm.q | 79 +
R/generic.q | 38 +
R/links.q | 1105 ++++++
R/logLik.vlm.q | 32 +
R/model.matrix.vglm.q | 346 ++
R/mux.q | 349 ++
R/plot.vglm.q | 936 +++++
R/predict.vgam.q | 356 ++
R/predict.vglm.q | 230 ++
R/predict.vlm.q | 410 ++
R/print.summary.others.q | 33 +
R/print.vglm.q | 95 +
R/print.vlm.q | 47 +
R/qrrvglm.control.q | 125 +
R/qtplot.q | 764 ++++
R/residuals.vlm.q | 212 +
R/rrvglm.R | 195 +
R/rrvglm.control.q | 147 +
R/rrvglm.fit.q | 631 +++
R/s.q | 38 +
R/s.vam.q | 245 ++
R/smart.R | 1017 +++++
R/step.vglm.q | 17 +
R/summary.others.q | 35 +
R/summary.vgam.q | 245 ++
R/summary.vglm.q | 249 ++
R/summary.vlm.q | 193 +
R/uqo.R | 881 +++++
R/vgam.R | 281 ++
R/vgam.control.q | 129 +
R/vgam.fit.q | 446 +++
R/vgam.match.q | 85 +
R/vglm.R | 169 +
R/vglm.control.q | 150 +
R/vglm.fit.q | 462 +++
R/vlm.R | 190 +
R/vlm.wfit.q | 150 +
R/vsmooth.spline.q | 564 +++
R/zzz.R | 12 +
data/auuc.R | 6 +
data/bminz.txt | 701 ++++
data/car.all.R | 362 ++
data/chest.txt | 74 +
data/coalminers.txt | 10 +
data/enzyme.txt | 13 +
data/gew.txt | 21 +
data/hspider.R | 47 +
data/hunua.txt | 393 ++
data/lirat.txt | 59 +
data/nzc.txt | 27 +
data/oxtemp.txt | 81 +
data/pneumo.R | 6 +
data/rainfall.R | 1000 +++++
data/usagrain.txt | 143 +
data/venice.R | 50 +
data/waitakere.txt | 580 +++
demo/00Index | 6 +
demo/binom2.or.R | 40 +
demo/cqo.R | 99 +
demo/distributions.R | 30 +
demo/lmsqreg.R | 32 +
demo/vgam.R | 20 +
demo/zipoisson.R | 20 +
man/AA.Aa.aa.Rd | 64 +
man/AB.Ab.aB.ab.Rd | 64 +
man/AB.Ab.aB.ab2.Rd | 69 +
man/ABO.Rd | 70 +
man/BratUC.Rd | 77 +
man/Coef.Rd | 79 +
man/Coef.qrrvglm-class.Rd | 123 +
man/Coef.qrrvglm.Rd | 131 +
man/Coef.rrvglm-class.Rd | 70 +
man/Coef.rrvglm.Rd | 54 +
man/Coef.vlm.Rd | 66 +
man/DagumUC.Rd | 58 +
man/FiskUC.Rd | 58 +
man/G1G2G3.Rd | 75 +
man/Inv.gaussian.Rd | 59 +
man/InvlomaxUC.Rd | 59 +
man/InvparalogisticUC.Rd | 59 +
man/Links.Rd | 182 +
man/LomaxUC.Rd | 58 +
man/MNSs.Rd | 72 +
man/Max.Rd | 88 +
man/MaxwellUC.Rd | 70 +
man/Opt.Rd | 90 +
man/ParalogisticUC.Rd | 58 +
man/Pareto.Rd | 68 +
man/ParetoIVUC.Rd | 104 +
man/RayleighUC.Rd | 65 +
man/SinmadUC.Rd | 58 +
man/Tol.Rd | 96 +
man/VGAM-package.Rd | 444 +++
man/acat.Rd | 113 +
man/auuc.Rd | 39 +
man/benini.Rd | 93 +
man/beniniUC.Rd | 71 +
man/betaII.Rd | 94 +
man/betabin.ab.Rd | 217 ++
man/betabinUC.Rd | 112 +
man/betabinomial.Rd | 200 +
man/betaff.Rd | 140 +
man/betageomUC.Rd | 76 +
man/betageometric.Rd | 123 +
man/betanormUC.Rd | 86 +
man/betaprime.Rd | 115 +
man/bilogis4UC.Rd | 70 +
man/bilogistic4.Rd | 128 +
man/binom2.or.Rd | 168 +
man/binom2.rho.Rd | 107 +
man/binomialff.Rd | 163 +
man/biplot-methods.Rd | 39 +
man/bisa.Rd | 140 +
man/bisaUC.Rd | 81 +
man/bminz.Rd | 48 +
man/brat.Rd | 124 +
man/bratt.Rd | 137 +
man/calibrate-methods.Rd | 36 +
man/calibrate.Rd | 81 +
man/calibrate.qrrvglm.Rd | 140 +
man/calibrate.qrrvglm.control.Rd | 106 +
man/cao.Rd | 311 ++
man/cao.control.Rd | 348 ++
man/cauchit.Rd | 161 +
man/cauchy1.Rd | 92 +
man/ccoef-methods.Rd | 44 +
man/ccoef.Rd | 92 +
man/cdf.lmscreg.Rd | 76 +
man/cexpon.Rd | 88 +
man/cgo.Rd | 58 +
man/cgumbel.Rd | 136 +
man/chest.Rd | 43 +
man/chisq.Rd | 55 +
man/clo.Rd | 61 +
man/cloglog.Rd | 138 +
man/cnormal1.Rd | 93 +
man/coalminers.Rd | 39 +
man/constraints.Rd | 109 +
man/cqo.Rd | 479 +++
man/cratio.Rd | 124 +
man/cumulative.Rd | 205 +
man/dagum.Rd | 109 +
man/dcnormal1.Rd | 98 +
man/deplot.lmscreg.Rd | 89 +
man/dirichlet.Rd | 119 +
man/dirmul.old.Rd | 146 +
man/dirmultinomial.Rd | 182 +
man/dzeta.Rd | 70 +
man/enzyme.Rd | 45 +
man/erf.Rd | 51 +
man/erlang.Rd | 95 +
man/expexp.Rd | 158 +
man/expexp1.Rd | 112 +
man/exponential.Rd | 91 +
man/fff.Rd | 93 +
man/fill.Rd | 233 ++
man/fisherz.Rd | 111 +
man/fisk.Rd | 98 +
man/fitted.vlm.Rd | 85 +
man/frank.Rd | 96 +
man/frankUC.Rd | 68 +
man/frechet.Rd | 161 +
man/frechetUC.Rd | 66 +
man/freund61.Rd | 181 +
man/gamma1.Rd | 75 +
man/gamma2.Rd | 164 +
man/gamma2.ab.Rd | 118 +
man/gammahyp.Rd | 85 +
man/garma.Rd | 178 +
man/gaussianff.Rd | 144 +
man/genbetaII.Rd | 123 +
man/genpoisson.Rd | 72 +
man/geometric.Rd | 77 +
man/get.smart.Rd | 64 +
man/get.smart.prediction.Rd | 44 +
man/gev.Rd | 274 ++
man/gevUC.Rd | 79 +
man/gew.Rd | 40 +
man/ggamma.Rd | 126 +
man/ggammaUC.Rd | 68 +
man/golf.Rd | 176 +
man/gpd.Rd | 237 ++
man/gpdUC.Rd | 75 +
man/grc.Rd | 132 +
man/gumbel.Rd | 238 ++
man/gumbelUC.Rd | 109 +
man/guplot.Rd | 78 +
man/hspider.Rd | 74 +
man/hunua.Rd | 66 +
man/hyper.Rd | 129 +
man/hzeta.Rd | 95 +
man/hzetaUC.Rd | 84 +
man/iam.Rd | 99 +
man/identity.Rd | 103 +
man/inv.gaussianff.Rd | 97 +
man/invlomax.Rd | 93 +
man/invparalogistic.Rd | 96 +
man/is.smart.Rd | 64 +
man/laplaceUC.Rd | 95 +
man/leipnik.Rd | 105 +
man/lerch.Rd | 108 +
man/levy.Rd | 118 +
man/lgammaUC.Rd | 79 +
man/lgammaff.Rd | 126 +
man/lino.Rd | 123 +
man/linoUC.Rd | 77 +
man/lirat.Rd | 67 +
man/lms.bcg.Rd | 187 +
man/lms.bcn.Rd | 170 +
man/lms.yjn.Rd | 206 +
man/logUC.Rd | 87 +
man/logc.Rd | 90 +
man/loge.Rd | 107 +
man/logff.Rd | 82 +
man/logistic.Rd | 132 +
man/logit.Rd | 207 +
man/loglinb2.Rd | 100 +
man/loglinb3.Rd | 84 +
man/loglog.Rd | 102 +
man/lognormal.Rd | 122 +
man/logoff.Rd | 91 +
man/lomax.Rd | 99 +
man/lv.Rd | 84 +
man/lvplot.Rd | 79 +
man/lvplot.qrrvglm.Rd | 324 ++
man/lvplot.rrvglm.Rd | 164 +
man/maxwell.Rd | 61 +
man/mccullagh89.Rd | 125 +
man/mckaygamma2.Rd | 106 +
man/meplot.Rd | 92 +
man/micmen.Rd | 123 +
man/mix2normal1.Rd | 174 +
man/mix2poisson.Rd | 138 +
man/model.framevlm.Rd | 70 +
man/model.matrixvlm.Rd | 81 +
man/multinomial.Rd | 230 ++
man/nakagami.Rd | 102 +
man/nakagamiUC.Rd | 86 +
man/nbolf.Rd | 154 +
man/negbinomial.Rd | 214 +
man/normal1.Rd | 84 +
man/notdocumentedyet.Rd | 425 ++
man/nzc.Rd | 47 +
man/oxtemp.Rd | 30 +
man/paralogistic.Rd | 95 +
man/pareto1.Rd | 170 +
man/paretoIV.Rd | 152 +
man/persp.qrrvglm.Rd | 214 +
man/plotdeplot.lmscreg.Rd | 101 +
man/plotqrrvglm.Rd | 75 +
man/plotqtplot.lmscreg.Rd | 110 +
man/plotvgam.Rd | 143 +
man/plotvgam.control.Rd | 90 +
man/pneumo.Rd | 40 +
man/poissonff.Rd | 144 +
man/polf.Rd | 161 +
man/polonoUC.Rd | 87 +
man/posbinomUC.Rd | 96 +
man/posbinomial.Rd | 79 +
man/posnegbinomial.Rd | 156 +
man/posnormUC.Rd | 64 +
man/posnormal1.Rd | 102 +
man/pospoisUC.Rd | 92 +
man/pospoisson.Rd | 84 +
man/predict.vglm.Rd | 139 +
man/prentice74.Rd | 108 +
man/probit.Rd | 116 +
man/put.smart.Rd | 71 +
man/qrrvglm.control.Rd | 484 +++
man/qtplot.gumbel.Rd | 112 +
man/qtplot.lmscreg.Rd | 82 +
man/quasibinomialff.Rd | 149 +
man/quasipoissonff.Rd | 129 +
man/rayleigh.Rd | 92 +
man/rcqo.Rd | 380 ++
man/rdiric.Rd | 61 +
man/recexp1.Rd | 77 +
man/reciprocal.Rd | 102 +
man/recnormal1.Rd | 97 +
man/rhobit.Rd | 111 +
man/rig.Rd | 55 +
man/rlplot.egev.Rd | 142 +
man/rposnegbin.Rd | 85 +
man/rrar.Rd | 105 +
man/rrvglm-class.Rd | 250 ++
man/rrvglm.Rd | 289 ++
man/rrvglm.control.Rd | 215 +
man/rrvglm.optim.control.Rd | 54 +
man/s.Rd | 107 +
man/setup.smart.Rd | 83 +
man/simplex.Rd | 74 +
man/sinmad.Rd | 102 +
man/skewnormal1.Rd | 111 +
man/smart.expression.Rd | 36 +
man/smart.mode.is.Rd | 57 +
man/smartpred.Rd | 214 +
man/snormUC.Rd | 81 +
man/sratio.Rd | 124 +
man/studentt.Rd | 75 +
man/tikuv.Rd | 114 +
man/tikuvUC.Rd | 87 +
man/tobit.Rd | 127 +
man/tparetoUC.Rd | 77 +
man/trplot.Rd | 81 +
man/trplot.qrrvglm.Rd | 167 +
man/undocumented-methods.Rd | 222 ++
man/uqo.Rd | 269 ++
man/uqo.control.Rd | 266 ++
man/usagrain.Rd | 42 +
man/venice.Rd | 72 +
man/vgam-class.Rd | 237 ++
man/vgam.Rd | 289 ++
man/vgam.control.Rd | 172 +
man/vglm-class.Rd | 217 ++
man/vglm.Rd | 370 ++
man/vglm.control.Rd | 153 +
man/vglmff-class.Rd | 233 ++
man/vonmises.Rd | 120 +
man/vsmooth.spline.Rd | 185 +
man/waitakere.Rd | 58 +
man/wald.Rd | 75 +
man/weibull.Rd | 148 +
man/weightsvglm.Rd | 134 +
man/wrapup.smart.Rd | 37 +
man/yeo.johnson.Rd | 79 +
man/yip88.Rd | 147 +
man/zanegbinomial.Rd | 156 +
man/zapoisson.Rd | 137 +
man/zero.Rd | 104 +
man/zeta.Rd | 118 +
man/zetaff.Rd | 101 +
man/zibinomUC.Rd | 83 +
man/zibinomial.Rd | 123 +
man/zipf.Rd | 89 +
man/zipfUC.Rd | 60 +
man/zipoisUC.Rd | 78 +
man/zipoisson.Rd | 132 +
src/cqof.f | 2057 ++++++++++
src/fgam.f | 642 +++
src/gautr.c | 334 ++
src/lerchphi.c | 457 +++
src/lms.f | 194 +
src/muxr.c | 453 +++
src/rgam.f | 554 +++
src/tyeepolygamma.f | 130 +
src/vcall2.f | 10 +
src/veigen.f | 779 ++++
src/vgam.f | 1341 +++++++
src/vlinpack1.f | 81 +
src/vlinpack2.f | 298 ++
src/vlinpack3.f | 662 ++++
src/vmux.f | 578 +++
src/zeta.f | 138 +
392 files changed, 85323 insertions(+)
diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..0cd64e2
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,18 @@
+Package: VGAM
+Version: 0.7-1
+Date: 2006-10-24
+Title: Vector Generalized Linear and Additive Models
+Author: Thomas W. Yee <t.yee at auckland.ac.nz>
+Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
+Depends: R (>= 2.4.0), splines, methods, stats, stats4
+Description: Vector generalized linear and additive models, and
+ associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
+ Reduced-Rank VGAMs). This package fits many models and
+ distribution by maximum likelihood estimation (MLE) or
+ penalized MLE. Also fits constrained ordination models in
+ ecology.
+License: GPL version 2
+URL: http://www.stat.auckland.ac.nz/~yee/VGAM
+LazyLoad: yes
+LazyData: yes
+Packaged: Tue Oct 24 17:49:40 2006; yee
diff --git a/DISCLAIMER b/DISCLAIMER
new file mode 100755
index 0000000..c3c3f73
--- /dev/null
+++ b/DISCLAIMER
@@ -0,0 +1,9 @@
+The VGAM package for R is still in the development stage, meaning that
+new features are still being added and bugs are still being found on a
+regular basis. This product is available on a use-at-your-own-risk basis:
+the Author assumes no liability for loss or damage of any kind resulting
+from the use of this product. The code is distributed in the hope that
+it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..a714643
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,284 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+useDynLib(VGAM)
+
+
+
+
+
+
+export(
+procVec,
+rss.vgam,
+vcontrol.expression,
+vplot, vplot.default, vplot.factor, vplot.list,
+vplot.matrix, vplot.numeric, vvplot.factor)
+
+export(
+d2theta.deta2, Deviance.categorical.data.vgam,
+lm2qrrvlm.model.matrix,
+m2avglm,
+dimm)
+
+
+
+export(lm, glm, predict.lm, predict.mlm, predict.glm)
+
+export(is.smart, smart.mode.is, wrapup.smart, setup.smart, my1, my2)
+export(
+smart.expression,
+get.smart, get.smart.prediction,
+put.smart)
+
+
+export( bs, ns, scale.default, poly )
+
+
+
+export(iam,
+fill, fill1, fill2, fill3,
+freund61,
+frechet2, frechet3, dfrechet, pfrechet, qfrechet, rfrechet,
+frank, dfrank, pfrank, rfrank,
+benini, dbenini, pbenini, qbenini, rbenini,
+maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell,
+erf, erfc, lerch,
+tpareto1, dtpareto, qtpareto, rtpareto, ptpareto,
+pareto1, dpareto, qpareto, rpareto, ppareto,
+paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV,
+paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII,
+paretoII, dparetoII, qparetoII, rparetoII, pparetoII,
+dparetoI, qparetoI, rparetoI, pparetoI,
+cgumbel, egumbel, gumbel,
+dgumbel, pgumbel, qgumbel, rgumbel,
+cnormal1, dcnormal1,
+recnormal1, recexp1,
+crayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh,
+dinv.gaussian, pinv.gaussian, wald, expexp1, expexp)
+
+
+export(A1A2A3, a2m, AAaa.nohw,
+anova.vgam,
+anova.vglm,
+beta4,
+bisa, dbisa, pbisa, qbisa, rbisa,
+betabin.ab, betabinomial,
+dbetabin, pbetabin, rbetabin, dbetabin.ab, pbetabin.ab, rbetabin.ab,
+biplot.qrrvglm,
+borel.tanner,
+cauchy1, ccoef.cao, ccoef.Coef.cao,
+ccoef.Coef.qrrvglm, ccoef.qrrvglm, cdf, cdf.lms.bcg, cdf.lms.bcn,
+cdf.lms.yjn, cdf.vglm,
+Coef.cao, Coefficients, coefqrrvglm,
+coefvlm,
+coefvsmooth.spline, coefvsmooth.spline.fit, constraints,
+constraints.vlm,
+deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
+deplot.lms.yjn, deplot.vglm,
+deviance.uqo, deviance.vglm, deviance.vlm, df.residual.vlm,
+dirmultinomial, dirmul.old,
+dnorm2,
+dtheta.deta)
+
+export(cloglog,cauchit,elogit,fisherz,logc,loge,logit,logoff,nreciprocal,
+ probit,reciprocal,rhobit,
+ golf,polf,nbolf,Cut)
+
+
+export(m2adefault,
+erlang,
+family.vglm,
+fitted.values.uqo, fitted.vlm, fittedvsmooth.spline, fsqrt,
+garma, gaussianff,
+hyper.secant,
+hyper,
+invbinomial, InverseBrat, inverse.gaussianff, inv.gaussianff,
+is.Numeric,
+mccullagh89, leipnik, levy,
+lms.bcg.control, lms.bcn.control, lmscreg.control, lms.yjn.control,
+dbilogis4, pbilogis4, rbilogis4, bilogistic4,
+logistic1, logistic2,
+logLik.vlm, lv.cao, lv.Coef.cao, lv.Coef.qrrvglm,
+lvplot.cao, lv.qrrvglm,
+Max.Coef.qrrvglm, Max.qrrvglm,
+is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.cao, is.bell,
+model.matrix.qrrvglm,
+model.matrixvlm,
+model.framevlm,
+nakagami, dnaka, pnaka, qnaka, rnaka,
+namesof,
+nlminbcontrol, nloge,
+Opt.Coef.qrrvglm, Opt.qrrvglm, persp.cao)
+
+export( micmen )
+
+export( plot.cao,
+plotpreplotvgam,
+plotvglm, plotvlm,
+plotvsmooth.spline, pnorm2, powl,
+predict.cao, predictcao,
+predictors, predictors.vglm,
+predict.qrrvglm, predict.rrvglm, predict.uqo, predict.vgam,
+predict.vglm, predict.vlm, predictvsmooth.spline,
+predictvsmooth.spline.fit,
+process.binomial2.data.vgam, process.categorical.data.vgam,
+qtplot,
+qtplot.default, qtplot.gumbel, qtplot.lms.bcg,
+qtplot.lms.bcn, qtplot.lms.yjn, qtplot.vextremes, qtplot.vglm,
+rlplot,
+rlplot.egev, rlplot.gev,
+rlplot.vextremes, rlplot.vglm,
+quasiff,
+rlplot, rlplot.vglm, rrar.control,
+rrvglm.control.Gaussian)
+
+export(
+cexpon,
+simple.exponential, simple.poisson, size.binomial,
+stdze1, stdze2,
+summary.cao, summary.grc, summary.lms, summary.qrrvglm,
+summary.rc.exponential, summary.rrvglm, summary.uqo, summaryvgam,
+summaryvglm, summaryvlm, s.vam, terms.vlm,
+theta2eta, Tol.Coef.qrrvglm, Tol.Coef.uqo, Tol.qrrvglm, Tol.uqo,
+vglm.garma.control, vglm.multinomial.control,
+vglm.multinomial.deviance.control, vglm.vcategorical.control,
+vlm, vlm.control,
+vnonlinear.control,
+wweights, yeo.johnson,
+zipf, dzipf, pzipf,
+zeta, zetaff,
+dzeta)
+
+
+export(lm2vlm.model.matrix)
+
+
+
+ importFrom(stats, model.matrix)
+ importFrom(stats, model.frame)
+ importFrom(stats, terms)
+
+
+
+
+
+
+export(ddagum, rdagum, qdagum, pdagum, dagum)
+export(dfisk, pfisk, qfisk, rfisk, fisk)
+export(dlomax, plomax, qlomax, rlomax, lomax)
+export(dinvlomax, pinvlomax, qinvlomax, rinvlomax, invlomax)
+export(dparalogistic, pparalogistic, qparalogistic, rparalogistic,
+ paralogistic)
+export(dinvparalogistic, pinvparalogistic, qinvparalogistic, rinvparalogistic,
+ invparalogistic)
+export(dsinmad, psinmad, qsinmad, rsinmad, sinmad)
+export(lognormal)
+export(dpolono, rpolono)
+export(dgpd, pgpd, qgpd, rgpd, gpd)
+export(dgev, pgev, qgev, rgev, gev, egev)
+export(dlaplace, plaplace, qlaplace, rlaplace)
+export(fff, fff.control,
+ vonmises)
+
+
+export(
+AA.Aa.aa, AB.Ab.aB.ab2, AB.Ab.aB.ab, ABO, acat,
+betaff, betaffqn,
+dbetageom, pbetageom, rbetageom, betageometric,
+betaprime,
+betaII, binom2.or, binom2.rho, binomialff, biplot.rrvglm, brat,
+bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
+calibrate, cao.control,
+cao, ccoef, cdf.lmscreg, cgo, chisq, clo,
+Coef.qrrvglm, Coef, Coef.rrvglm, Coef.vlm,
+cratio, cumulative, deplot.lmscreg, dirichlet,
+exponential, G1G2G3)
+
+export(
+lgammaff, lgamma3ff)
+export(
+mckaygamma2, gammahyp,
+ggamma, gamma1, gamma2, gamma2.ab, gammaff)
+export(dlgamma, plgamma, qlgamma, rlgamma)
+export(dggamma, pggamma, qggamma, rggamma)
+
+
+export(
+genbetaII, genpoisson, geometric,
+dlino, plino, qlino, rlino, lino,
+grc,
+dhzeta, phzeta, qhzeta, rhzeta, hzeta,
+nidentity, identity,
+prentice74,
+lms.bcg, lms.bcn,
+lms.yjn,
+logff, dlog, plog, rlog,
+loglinb2, loglinb3,
+loglog, lognormal3, lvplot.qrrvglm,
+lvplot, lvplot.rrvglm, lv, Max, MNSs, multinomial)
+
+
+export(
+meplot, meplot.default, meplot.vlm,
+guplot, guplot.default, guplot.vlm,
+negbinomial, normal1, tobit, Opt,
+persp.qrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg,
+plotvgam.control, plotvgam,
+poissonff,
+dposnorm, pposnorm, qposnorm, rposnorm, posnormal1,
+dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
+posnegbinomial,
+dpospois, ppospois, qpospois, rpospois, pospoisson,
+qtplot.lmscreg, quasibinomialff, quasipoissonff, rdiric,
+rig, rposnegbin,
+rrar, rrvglm.control,
+rrvglm.optim.control)
+
+export(eta2theta,
+rrvglm, simplex,
+sratio, s, studentt, Tol, trplot.qrrvglm,
+trplot,
+rcqo,
+cqo,
+qrrvglm.control,
+uqo.control, uqo,
+vgam.control, vgam, vglm.control, vglm,
+vsmooth.spline,
+weibull, yip88, zanegbinomial, zapoisson,
+dzipois, pzipois, qzipois, rzipois, zipoisson,
+mix2normal1, mix2poisson,
+skewnormal1, dsnorm, rsnorm,
+tikuv, dtikuv, ptikuv, qtikuv, rtikuv,
+dzibinom, pzibinom, qzibinom, rzibinom, zibinomial)
+
+
+
+
+
+
+exportClasses("vglmff", "vlm", "vglm", "vgam", "summary.vgam",
+"summary.vglm","summary.vlm", "rrvglm", "qrrvglm", "grc",
+"vlmsmall", "uqo", "cao", "Coef.rrvglm",
+"Coef.uqo", "Coef.qrrvglm", "summary.qrrvglm",
+"vcov.qrrvglm", "summary.rrvglm",
+"vsmooth.spline.fit", "vsmooth.spline")
+
+
+
+exportMethods(
+"coef", "Coef", "coefficients", "constraints",
+"effects", "fitted", "fitted.values", "predict",
+"print", "resid", "residuals", "show",
+"summary", "terms", "model.frame", "model.matrix",
+"deviance", "logLik", "vcov",
+"calibrate", "cdf", "ccoef", "df.residual",
+"lv", "Max", "Opt", "Tol",
+"biplot", "deplot", "lvplot", "qtplot", "rlplot", "meplot",
+"plot", "trplot", "vplot",
+"weights",
+"persp")
+
diff --git a/NEWS b/NEWS
new file mode 100755
index 0000000..2b9dad3
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,738 @@
+ **************************************************
+ * *
+ * 0.7 SERIES NEWS *
+ * *
+ **************************************************
+
+
+
+ CHANGES IN VGAM VERSION 0.7-1
+
+NEW FEATURES
+
+ o VGAM family functions now require full (name) specification
+ of parameter link functions. For example, binomialff(link=probit)
+ is ok, as is binomialff(link="probit"), but binomialff(link="pr")
+ isn't.
+ VGAM family functions no longer offer a fixed set of link
+ functions but the user can invoke any, as well as write their
+ own link function.
+ o Working residuals for vglm() objects are now the default.
+ They used to be deviance residuals but they are not defined
+ for most VGAM family functions.
+ In the future the default may become "pearson" residuals.
+ For safety, use the type argument, e.g.,
+ resid(vglmobject, type="response").
+ o ITolerances=TRUE is now the default for qrrvglm.control(),
+ consequently, equal tolerances CQO models are fitted. The
+ rationale for this change that setting ITolerances=TRUE
+ provides the fast computational speed as well as the easiest
+ interpretation of the results. Also, rcqo() matches this by
+ having EqualTolerances=TRUE as its default. However, having an
+ equal tolerances assumption should be checked.
+ o New VGAM family functions: tikuv(dpqr), [dpqr]naka(), [dpr]log(),
+ [dpqr]tpareto1(), betabinomial().
+ o VGAM family functions which have been renamed (and often improved):
+
+ New name Old name
+ -------- --------
+ dirmultinomial() dirmul()
+ negbinomial() negbin.mu()
+ negbinomial.ab() negbin.mn()
+ posnegbinomial() posnegbin.mu()
+ zanegbinomial() zanegbin.mu()
+ rposnegbin() rposnegbin.mu()
+ gamma2() gamma2.ab()
+ gamma2mu() gamma2()
+
+
+ o New functions: lerch(), rcqo().
+ o In the smartpred package smart.mode.is(mode.arg) now requires
+ mode.arg, if given, to be exactly one of 3 character strings.
+ Also, is.smart(object) handles object being a smart function
+ or a fitted object.
+ o The VGAM package comes with modified lm, predict.lm, predict.glm,
+ predict.mlm, glm functions---these implement smart prediction, and
+ are current to R version 2.3.1 (2006-06-01).
+ o The order of the linear/additive predictors for expexp()
+ have been switched.
+ o weibull(zero=2) is the default now.
+ o negbin.mu(), posnegbin.mu(), zanegbin.mu: these have a few added
+ arguments for further flexibility, and some arguments have changed
+ names, e.g., 'k.init' has been changed to 'ik' and 'link.mu' to
+ 'lmu'.
+ o Negative binomial random variates are now generated using
+ rnbinom() in the stats package rather than rnegbin() in the MASS
+ package.
+ o binom2.or() and binom2.rho() have more choices for some arguments
+ such as lor and lrho.
+ o Initial values have been improved for logff(), zipf() and zetaff().
+ o This package should work for R 2.4.0 after additional tweaks to
+ handle changes in show().
+
+
+BUG FIXES
+
+ o pbetabin() had a bug.
+ o studentt() has a mean of 0 only if df > 1.
+ o garma() failed for link="loge". It now works for binary
+ data with the "logit" link.
+ o Internally, wz <- matrix(NA, ...) changed to
+ wz <- matrix(as.numeric(NA), ...).
+ Ditto for rep(NA, ...) to rep(as.numeric(NA), ...).
+ o tobit() had a bug in the initialize slot.
+ o rposnegbin.mu() now calls the MASS library function rnegbin()
+ explicitly.
+ o gammaff() now works.
+ o Working residuals for cao() objects were wrong.
+ o lvplot() for cao() objects have a whichSpecies argument
+ which allows selective plotting of the species' curves.
+ o gaussianff() did not work with rrvglm(). It now has a
+ loglikelihood slot, and returns deviance residuals for M>1.
+
+
+CHANGES
+
+ o gaussianff(), studentt() have the order of its arguments changed.
+ o eta2theta(), theta2eta(): if these have a matrix "theta" then it
+ no longer calls the VGAM link function one column at a time.
+ Hence VGAM link functions must handle matrix "theta" using one
+ value of "earg" argument.
+ o The earg argument has changed for many VGAM link functions. It
+ is now a list, with component names that are specific to each
+ link function.
+ See the online help files for the list component names.
+ Soon, every VGAM family function that allows a link function
+ will have an earg argument to match it, thus giving maximum
+ flexibility.
+
+
+
+
+ **************************************************
+ * *
+ * 0.6 SERIES NEWS *
+ * *
+ **************************************************
+
+
+
+ CHANGES IN VGAM VERSION 0.6-9
+
+
+NEW FEATURES
+
+ o New VGAM family functions: lino(dpqr), recexp1(),
+ posnormal1(dpqr), betageometric(dpr), [dr]polono(),
+ [dpr]betabin(), gamma2mu(), bisa(dpqr), zipf(dp).
+ There is a new dirmul() (the old one is renamed to dirmul.old())
+ but it hasn't yet be completed.
+ o Renamed VGAM family functions: beta2() changed to betaff().
+ o Renamed VGAM functions: is.a.number() changed to is.Numeric().
+ o The Windows crossbuild was done under R 2.3.0.
+
+BUG FIXES
+
+ o Family functions lognormal(), lognormal3() now include the
+ 1/sqrt(2*pi) constant in @loglikelihood because of its use of
+ dnorm(..., log=TRUE) and dlnorm(..., log=TRUE).
+ o [dpqr]lognormal() withdrawn as they exist in R already.
+ o Documentation for betaff() contained mistakes.
+ o summary() of a betabin.ab() object used to fail.
+ o The assign statement has been removed from some FORTRAN code.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-8
+
+NEW FEATURES
+
+ o New VGAM family functions: recnormal1(), recexp1(),
+ paretoIV(dpqr), paretoIII(dpqr), paretoII(dpqr),
+ gammahyp(), benini(dpqr).
+ However, the fitted value (mean) for benini() may be faulty.
+ o Decommissioned VGAM family functions: gpdold(), ogev(),
+ zipoissonX().
+ o gumbel.block() renamed to gumbel(), and gumbel() renamed to egumbel().
+ o Argument names and defaults have changed for: gpd(), egev(), gev(),
+ ogev(), cgumbel(), egumbel(), gumbel(), and weibull().
+ Also, gpd(), gev() and egev() have some improvements done internally.
+ Also, rlplot() is new.
+ o Several family functions have been converted to a new convention
+ whereby ilocation, iscale, ishape arguments are used, and also
+ llocation, lscale, lshape arguments for the link functions etc.
+ o New link function: nidentity(theta) for negative-identity: -theta.
+ o New argument "untransform" in predict() and vcov() for VGLMs.
+ o For intercept-only models, Coef(fit) returns more user-friendly
+ labelled output.
+
+
+BUG FIXES
+
+ o ppareto() had a bug.
+ o gpd() had an incorrect second derivative.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-7
+
+NEW FEATURES
+
+ o New VGAM family functions: bilogistic4(dpr), frechet2(),
+ frechet3(), freund61(), frank(dpr), mccullagh89().
+ o For cao(), df1.nl has a default of 2.5, changed from 2.0 before.
+ o For vglm(), vgam() etc., diagonal elements of the working weight
+ matrices that are less than .Machine$double.eps^0.75 are replaced
+ by this value. The arguments checkwz and wzepsilon support this
+ feature.
+ o More documentation on: fill() [re. the xij argument], cauchy1().
+ o logistic2() now uses Fisher scoring.
+ o Argument init.method changed to method.init in several family
+ functions.
+ o Any non-smart-prediction use of smartpredenv has been changed
+ to VGAMenv.
+
+
+BUG FIXES
+
+ o rayleigh() was not in NAMESPACE.
+ o logistic1() and logistic2() had wrong first derivatives and
+ loglikelihood function.
+ logistic1() offers some choice of link function for the location
+ parameter.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-6
+
+NEW FEATURES
+
+ o New functions: zibinomial(), zibinom(dpqr), posbinom(dpqr),
+ mix2normal1(), mix2poisson(), dsnorm(), rsnorm(),
+ cexpon(), cgumbel(), cnormal1(), hyper().
+ o New generic functions: is.bell() works for RR-VGLMs, QRR-VGLMs
+ and RR-VGAMs (CLO, CQO and CAO, respectively).
+ o normal1() has a new (first) argument: lmean for the mean.
+ o Documentation for skewnormal1() and snorm(dr).
+
+
+BUG FIXES
+
+ o tobit() now implements Fisher scoring properly.
+ o Coef.vlm() needed to test for trivial constraints.
+ o skewnorm1() had a bug in it. It has been fixed and renamed to
+ skewnormal1().
+ o cao() had a problem with the variable "usethiseta" when it had
+ possible NAs.
+ o An appropriate error message is given if residuals=TRUE in the
+ call to @loglikelihood, for all VGAM family functions.
+ o Two unneeded lines in rgam.f have been removed.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-5
+
+NEW FEATURES
+
+ o New functions: guplot(), meplot(), ggamma(dpqr), fff(),
+ vonmises(), lgamma3ff, lgamma(dpqr), prentice74, tobit,
+ zipoisson(dpqr), [dpqr]pospois(), laplace(dpqr) but there is
+ no laplace().
+ o cqo() has been largely rewritten. It now sports a new algorithm
+ for ITolerances=TRUE. It can handle large data sets (e.g., 1000
+ sites with 100 species). Compared to other cqo() options, it is
+ the fastest. There are a few things to learn though to take full
+ advantage of the new algorithm, e.g., centering the variables.
+ o Windows version is cross built with R 2.2.0. The Linux version
+ has been tested with R 2.2.0.
+ o cao() has been largely rewritten. It now should not hang in the
+ the windows cross build version.
+ o .Init.Poisson.CQO() has been renamed .Init.Poisson.QO(), and also
+ improved (however, it uses more memory by default).
+ o Modelling functions such as vglm(), vgam() and cao() have
+ qr.arg=FALSE now. This means object sizes can be a lot smaller.
+ o The functions positive.poisson(), positive.binomial() etc. have
+ been renamed pospoisson(), posbinomial() etc.
+ o The functions [dpqr]gpd now have a location=0 argument.
+ o Some VGAM family functions will be adapted later to use the
+ BFGS quasi-Newton update for their working weight matrices.
+ o The link function logoff() now works, for constant offsets.
+ Link functions had the argument "extra"; now called "earg"
+ to avoid confusion with the argument "extra" used in vglm() etc.
+ Also, elogit() is new, which allows a parameter to lie between
+ two values (A,B), say.
+
+
+BUG FIXES
+
+ o plotvgam() was incorrect if one of the terms (but not the first)
+ was "x" or a function of "x" such as bs(x).
+ o smart.expression now handles multiple 'arguments' by choosing
+ the first, which is the smart function name.
+ o lv(rrvglm.object) failed.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-4
+
+NEW FEATURES
+
+ o New family functions: betabin.ab(), betaprime(), dcnormal1(),
+ erlang(), expexp(), inv.gaussianff(), maxwell(), mckaygamma2(),
+ nakagami(), pareto1(), rayleigh(), wald().
+ Of these, Pareto, Rayleigh and Maxwell have random number
+ generation etc.
+ o If criter="coef" and trace=TRUE, then the number of
+ decimal places used to print the estimated coefficients at
+ each iteration is proportional to the control constant
+ epsilon.
+ o tanl() has been named to cauchit(), and appropriate
+ family functions reflect this change, i.e.,
+ link="cauchit" instead of link="tanl".
+ o size.binomial() has been improved.
+ o Documentation for gamma1(), gamma2().
+
+
+BUG FIXES
+
+ o The documentation for the reverse argument in cumulative(),
+ cratio(), etc. was incorrect.
+ o vcov() didn't work on the windows version.
+ o cao() still hangs under the windows version, so hopefully
+ this bug will be fixed soon!
+
+
+
+
+ CHANGES IN VGAM VERSION 0.6-3
+
+NEW FEATURES
+
+ o Built with R 2.1.0 for the .zip file (Windows version) and
+ deposited in the right directory at www.stat.auckland.ac.nz.
+ o More documentation, e.g., fitted(), yeo.johnson(),
+ dirmul().
+ o zeta() and zetaff() have been improved and/or corrected.
+ o The family functions binomial, poisson, quasibinomial,
+ quasipoisson, gaussian, inverse.gaussian, Gamma
+ have been withdrawn because of inteference with glm().
+
+
+
+ CHANGES IN VGAM VERSION 0.6-2
+
+NEW FEATURES
+
+ o model.frame() and model.matrix() are roughly working for
+ objects that inherit from "vlm"s, e.g., "vglm" objects.
+ Both of these methods functions accept a "data"
+ argument etc.
+ Also, for these, smart prediction works.
+ o A methods function for the generic function weights() has
+ been written for VGLM objects. It returns
+ either the prior or working weights.
+
+
+BUG FIXES
+
+ o The Crow1positive argument in cao() did not function correctly.
+ o The family functions dagum, fisk, lomax, invlomax, paralogistic,
+ invparalogistic, lognormal were not exported in the NAMESPACE
+ file.
+ o Functions in gaut.c and mux.c used "long" to represent integers.
+ In R, these should be "int". Although these are equivalent on
+ 32-bit machines, they differ on 64-bit machines and crash.
+ The files are now renamed to gautr.c and muxr.c in R.
+ o summary(cao.object) failed.
+
+
+
+ CHANGES IN VGAM VERSION 0.6-1
+
+NEW FEATURES
+
+ o New functions: cao() for "constrained additive ordination",
+ and uqo() for "unconstrained quadratic ordination".
+ Both of these are unfinished but will hopefully be
+ completed in the forseeable future.
+ o The function cgo() has been renamed to cqo(). Ouch!
+ CQO stands for "constrained quadratic ordination", and is
+ better than the old name cgo(), for
+ canonical Gaussian ordination.
+ o The inverse() link function has been renamed to reciprocal().
+ o More documentation: loglinb2() and loglinb3().
+ o zipbipp() renamed to zapoisson(), where "za" stand for
+ "zero-altered". This is more in line with the literature.
+ New families: zanegbin.mu, positive.negbin.mu.
+ New random variates: rposnegbin.mu, rpospois.
+ o negbin.mu() works now for cgo(). The subsequent methods
+ functions have been adapted to work on it too.
+ However, negbin.mu() is not recommended because maximum
+ likelihood estimation of the index parameter is fraught
+ numerically. It is better to use quasipoissonff().
+ o cgo() now uses the function .Init.Poisson.CGO() to obtain
+ initial values for the canonical coefficients, C.
+ The argument Use.Init.Poisson.CGO in qrrvglm.control()
+ now controls this feature.
+ o Lazy loading has been enabled for the VGAM package.
+ o Name spaces has been introduced into the VGAM package.
+ The consequencies of this might be far reaching for
+ code heavily based on the internals of the VGAM package.
+ o The application of name spaces means "ff" can be dropped
+ from certain family functions. In particular, poisson() can
+ be used instead of poissonff(), and binomial() instead
+ of binomialff(). Ditto for quasipoissonff() and
+ quasibinomialff().
+ o names.of() changed to namesof(). Many other function names
+ have been changed, particularly those of the S3 classes
+ such as coef. something, e.g., coef.vlm to coefvlm.
+ In general, S3 methods functions such as print.summary.vlm have
+ the first "." deleted, but classes such as "summary.vlm" retain the
+ ".", and the function is printsummary.vlm.
+
+BUG FIXES
+
+ o Some documentation regarding the negative binomial
+ distribution was wrong.
+ o The digamma function in FORTRAN was buggy.
+ o gumbel.block() now handles a vector response (equivalently,
+ a one column matrix) and the deviance has been decommissioned.
+ Instead, the log-likelihood is computed.
+
+
+
+
+ **************************************************
+ * *
+ * 0.5 SERIES NEWS *
+ * *
+ **************************************************
+
+
+
+ CHANGES IN VGAM VERSION 0.5-24
+
+NEW FEATURES
+
+ o zipbipp() and zipoissonX() are new alternatives to yip88().
+ They fit a zero-inflated Poisson distribution.
+ Both can handle covariates for both parameters (p0 or
+ phi, and lambda.)
+ zipbipp() is recommended over the others.
+ zipoissonX() is experimental at this stage
+ and should be used with caution.
+ rpospois() is new.
+ o More documentation: rhobit and binom2.rho.
+ o binom2.or() now has lp1 and lp2 arguments, which allow
+ a different link function for each of the two marginal
+ probabilities.
+ o bratt() is a new family function. It fits the Bradley Terry
+ model with ties.
+ o flush.console() is used if it exists. This will make
+ Windows version more nicer for large data sets and when
+ trace=TRUE is used.
+ o wweights() extracts the working weights of an object.
+ Used to be called vweights().
+
+
+
+
+ CHANGES IN VGAM VERSION 0.5-23
+
+
+NEW FEATURES
+
+ o The package works under the latest version, viz. 2.0.0.
+ There are fewer warning messages when checking :)
+ o persp() for CGO objects now handles Rank=1 models.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-22
+
+BUG FIXES
+
+ o plot.vgam(..., overlay=TRUE, which.cf=1:2) was incorrect.
+
+NEW FEATURES
+
+ o demo files now are avaible for VGAM. These include lmsqreg,
+ distributions, and cgo. More will be added later.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-21
+
+BUG FIXES
+
+ o .Rd files adapted to reflect new changes in the library names.
+ o persp.qrrvglm() argument whichSpecies was faulty.
+ o gpd()@inverse returned erroneous centiles.
+ o Coef(cgo(..., FastAlgorithm=TRUE)) produced wrong results.
+
+NEW FEATURES
+
+ o cgo(..., FastAlgorithm=TRUE) has been fined-tuned to give
+ greater speed and accuracy.
+ o lms.yjn() uses FORTRAN code to implement the Gauss-Legendre
+ algorithm. This results in greater accuracy.
+ o More documentation, especially for family functions for
+ extreme values modelling.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-20
+
+BUG FIXES
+
+ o vglm(y ~ x, binomialff(link=tanl)) used to fail.
+ o The CHECK command failed previously, but now it only
+ gives 5 warnings.
+
+NEW FEATURES
+
+ o persp.qrrvglm() has been written to apply persp() to
+ a rank-2 CGO model.
+ o cgo(..., FastAlgorithm=TRUE) now has a logical argument
+ GradientFunction, which if TRUE (default), computes the
+ derivatives by using finite-difference approximations.
+ The default will cause the speed to generally increase.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-19
+
+BUG FIXES
+
+ o garma() did coerce the model matrix into the correct class
+ o fisherz() could not work out the inverse.
+
+NEW FEATURES
+
+ o trplot() is a new generic function, and for objects of
+ class "qrrvglm" (a cgo() object), it produces a trajectory plot
+ for species.
+ o vcov.qrrvglm() now computes standard errors and returns the
+ variance-covariance matrix for rank-1 QRR-VGLMs.
+ o A new fast algorithm is implemented for cgo(..., FastAlgorithm=TRUE)
+ which only works under windows. It is a new undocumented algorithm.
+ o New family functions: lognormal(), lognormal3(), weibull().
+ o New family functions: genbetaII(), betaII(), sinmad(), dagum(),
+ lomax(), invlomax(), fisk(), invparalogistic(), paralogistic().
+ Additionally, d*, r* p* and q* forms of the
+ density/random-generation etc. functions for all of these
+ except for betaII and genbetaII.
+ o New link function for (0,1) parameters: tanl() for tan link.
+ It has a heavier tail and corresponds to a Cauchy distribution
+ (cf. probit for normal).
+ o New family function: brat() for the Bradley Terry model
+ (intercept model only).
+
+
+
+ CHANGES IN VGAM VERSION 0.5-18
+
+NEW FEATURES
+
+ o I've changed deplot.lmscreg() so that the "at" argument is now
+ "y.arg", and the density is returned with name "density" instead
+ of "y". That is, "at" is now "y", and "y" is now "density".
+ o lvplot.rrvglm() and biplot.rrvglm() have been merged and are now
+ equivalent.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-17
+
+BUG FIXES
+
+ o Bestof argument in cgo() and rrvglm() was faulty.
+ o Bug in plot.vgam(type.resid) fixed.
+
+NEW FEATURES
+
+ o Updated to work under R 1.8.1
+ o logLik() and AIC() methods functions supported for many VGAM objects.
+ o lms.bcn.control(), lms.bcg.control(), lms.yjn.control() now
+ have trace=TRUE because monitoring LMS quantile regression models
+ is a good idea.
+ o lms.bcn(), lms.bcg(), lms.yjn() now improved.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-16
+
+BUG FIXES
+
+ o biplot.rrvglm() had a internal bug with @C.
+ o Runs under R 1.8.0 now, having a fix with "qr" slot.
+ o etastart, coefstart, mustart arguments were not functional in vgam().
+ o vchol() did not replace the correct elements; sometimes the index
+ was out of subscript range.
+ o residuals.vlm() tried to evaluate a deviance slot in a "vglmff" object
+ even when it was empty.
+ o Documentation links to functions in other packages now work.
+
+NEW FEATURES
+
+ o lvplot.qrrvglm() has been renamed biplot.qrrvglm().
+ Argument Equal.tolerances changed to EqualTolerances.
+ Argument Circular changed to ITolerances.
+ rrvglm.control() now split into qrrvglm.control() and itself.
+ o cgo() now performs canonical Gaussian ordination.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-15
+
+BUG FIXES
+
+ o Coef.qrrvglm() failed wrt Equal.tolerances and Circular when
+ Rank>2.
+
+NEW FEATURES
+
+ o gco() is now an easier interface for fitting Gaussian canonical
+ ordination models. gco(...) is essentially rrvglm(..., Quadratic=TRUE).
+ o Documentation for deplot.lmscreg(), qtplot.lmscreg(), cdf.lmscreg()
+ and related functions. Also for positive.poisson(),
+ positive.binomial() and yip88().
+ o lvplot.qrrvglm() improved to handle non-diagonal tolerance matrices,
+ and a new Rotate option is available for QRR-VGLMs.
+ o By default, QRR-VGLMs now have the constraint that the latent
+ variables are uncorrelated and have unit variances, i.e.,
+ their variance-covariance matrix is diag(Rank).
+ Also, the Crow1positive argument allows ordinations to be reflected
+ across axes.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-14
+
+BUG FIXES
+
+ o vgam() with s() terms and subset= used to give a bug because the
+ attributes of s() were lost.
+ o summary() of a gaussianff was faulty because control.gaussianff()
+ was called gaussianff.control().
+
+NEW FEATURES
+
+ o install.packages("VGAM", CRAN="http://www.stat.auckland.ac.nz/~yee")
+ now works for PC and Linux/Unix, i.e., the distribution of the
+ VGAM package allows for this type of download.
+ o poissonff(), quasipoissonff(), binomialff() and
+ quasibinomialff() now handle multiple dispersion parameters when
+ mv=TRUE and onedpar=FALSE.
+ o Generic function predictx(), with methods function for "qrrvglm"
+ objects. This solves (with limited functionality) the calibration
+ problem.
+ o predict.qrrvglm() and predict.rrvglm() written (but don't work 100%)
+ o Coef.rrvglm() now returns an S4 object, which can be printed nicely.
+ o summary.qrrvglm() has been improved.
+ o Documentation for poissonff(), quasipoissonff(), binomialff() and
+ quasibinomialff().
+
+
+
+ CHANGES IN VGAM VERSION 0.5-13
+
+BUG FIXES
+
+ o Code with T and F now use TRUE and FALSE.
+
+NEW FEATURES
+
+ o Documentation for lms.bcn(), lms.bcg(), lms.yjn(), and bmi.
+ Additionally, the overall documentation has been improved
+ throughout.
+ o print.Coef.qrrvglm prints the contents of Coef(qrrvglm.object)
+ in a nicer format. It uses S4 features.
+
+
+ CHANGES IN VGAM VERSION 0.5-12
+
+BUG FIXES
+
+ o The package now works under R 1.7.1. This includes the smart
+ prediction library.
+ o dirichlet(), skewnorm1(), geometric(), gamma2() and erlang()
+ had a bug that has been fixed.
+
+NEW FEATURES
+
+ o documentation for beta2(), and dirichlet().
+ o Easier installation; use something like
+ "R CMD INSTALL -l ./myRlibs VGAM_0.5-12.tar.gz"
+ for a local library.
+
+
+ CHANGES IN VGAM VERSION 0.5-11
+
+BUG FIXES
+
+ o The code has been upgraded to work under R 1.7.0 because
+ of the calls to LAPACK and object oriented features.
+
+NEW FEATURES
+
+ o levy() added, plus grc() documentation.
+ o constraints added to binomialff() and poissonff() since
+ they both handle multivariate responses.
+
+
+ CHANGES IN VGAM VERSION 0.5-10
+
+BUG FIXES
+
+ o Many univariate family functions had a faulty loglikelihood slot.
+ o negbin.mu() was faulty causing very slow convergence.
+ o Coef.vglm() had a bug due to "fit" rather than "object"
+
+NEW FEATURES
+
+ o logff() added.
+ o The undocumented backchat facility now works for Splus 6.x.
+ This should increase the efficiency of vglm() in particular.
+ Thanks to Insightful and Dr J. Chambers for helping to get it
+ going under the S4 engine.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-9
+
+BUG FIXES
+
+ o binomialff() had a bug in @weight.
+ o binomialff(mv=T) used to fail.
+ o gev(), ogev() and egev() had @loglikelihood that was faulty.
+
+NEW FEATURES
+
+ o .Rd documentation included for vglm(), vgam(), rrvglm(), and
+ associated control and class functions, plus smart prediction.
+
+
+
+ CHANGES IN VGAM VERSION 0.5-8
+
+NEW FEATURES
+
+ o rrvglm() now has a Quadratic argument to implement the class of
+ Quadratic Reduced-rank VGLMs, which gives maximum likelihood
+ solutions to Gaussian canonical ordination problems.
+ Documentation is in rrvglm.pdf
+
+
+
+ CHANGES IN VGAM VERSION 0.5-7
+
+NEW FEATURES
+
+ o vglm() now has a xij argument which implments eta-specific covariates.
+ Documentation is supplied on the VGAM website.
+ o grc() has been written for Goodman's RC association model for a
+ contingency table. Documentation is in rrvglm.pdf
+
+
+
diff --git a/R/aamethods.q b/R/aamethods.q
new file mode 100644
index 0000000..5b5b0f6
--- /dev/null
+++ b/R/aamethods.q
@@ -0,0 +1,546 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+if(!exists("is.R")) is.R <- function()
+ exists("version") && !is.null(version$language) && version$language=="R"
+
+is.Numeric <- function(x, allowable.length=Inf, integer.valued=FALSE, positive=FALSE)
+ if(all(is.numeric(x)) && all(is.finite(x)) &&
+ (if(is.finite(allowable.length)) length(x)==allowable.length else TRUE) &&
+ (if(integer.valued) all(x==round(x)) else TRUE) &&
+ (if(positive) all(x>0) else TRUE)) TRUE else FALSE
+
+
+if(is.R())
+ VGAMenv = new.env()
+
+
+
+
+
+if(is.R()) {
+ .onLoad <- function(lib, pkg) require(methods) # 25/1/05
+
+
+
+ if(!any(search()=="package:methods"))
+ library(methods)
+
+ if(!any(search()=="package:splines"))
+ require(splines)
+
+}
+
+
+
+
+
+
+
+
+.VGAM.prototype.list = if(is.R())
+list(
+ "constraints" = expression({}),
+ "fini" = expression({}),
+ "first" = expression({}),
+ "initialize" = expression({}),
+ "last" = expression({}),
+ "middle" = expression({}),
+ "middle2" = expression({}),
+ "deriv" = expression({}),
+ "weight" = expression({})) else
+list(
+ "blurb" = "",
+ "constraints" = expression({}),
+ "deviance" = function() {},
+ "fini" = expression({}),
+ "first" = expression({}),
+ "initialize" = expression({}),
+ "inverse" = function() {},
+ "last" = expression({}),
+ "link" = function() {},
+ "loglikelihood"= function() {},
+ "middle" = expression({}),
+ "middle2" = expression({}),
+ "summary.dispersion" = FALSE,
+ "vfamily" = "",
+ "deriv" = expression({}),
+ "weight" = expression({})) # Splus doesn't use prototypes
+
+
+if(is.R())
+setClass("vglmff", representation(
+ "blurb" = "character",
+ "constraints" = "expression",
+ "deviance" = "function",
+ "fini" = "expression",
+ "first" = "expression",
+ "initialize" = "expression",
+ "inverse" = "function",
+ "last" = "expression",
+ "link" = "function",
+ "loglikelihood"= "function", # problem: zz function() NULL if unspecified
+ "middle" = "expression",
+ "middle2" = "expression",
+ "summary.dispersion" = "logical",
+ "vfamily" = "character",
+ "deriv" = "expression",
+ "weight" = "expression"), # "call"
+prototype = .VGAM.prototype.list) else
+setClass("vglmff", representation(
+ "blurb" = "character",
+ "constraints" = "expression",
+ "deviance" = "function",
+ "fini" = "expression",
+ "first" = "expression",
+ "initialize" = "expression",
+ "inverse" = "function",
+ "last" = "expression",
+ "link" = "function",
+ "loglikelihood"= "function", # problem: zz function() NULL if unspecified
+ "middle" = "expression",
+ "middle2" = "expression",
+ "summary.dispersion" = "logical",
+ "vfamily" = "character",
+ "deriv" = "expression",
+ "weight" = "expression"))
+
+
+valid.vglmff = function(object) {
+
+ compulsory = c("initialize", "weight", "deriv", "inverse")
+ for(i in compulsory) {
+ if(!length(slot(object, i)))
+ stop(paste("slot \"", i, "\" is empty"))
+ }
+
+ if(length(as.list(object at inverse)) != 3)
+ stop("wrong number of arguments in object at inverse")
+}
+
+if(FALSE)
+setValidity("vglmff", valid.vglmff)
+
+
+
+print.vglmff <- function(x, ...)
+{
+ f <- x at vfamily
+ if(is.null(f))
+ stop("not a VGAM family function")
+
+ nn <- x at blurb
+ if(!length(nn))
+ invisible(return(x))
+
+ cat("Family: ", f[1], "\n")
+ if(length(f)>1) cat("Informal classes:", paste(f, collapse=", "), "\n")
+ cat("\n")
+
+ for(i in 1:length(nn))
+ cat(nn[i])
+ cat("\n")
+ invisible(return(x))
+}
+
+
+setMethod("print", "vglmff",
+ function(x, ...)
+ invisible(print.vglmff(x, ...)))
+
+setMethod("show", "vglmff",
+ function(object)
+ print.vglmff(x=object))
+
+
+
+
+
+
+
+
+
+if(is.R())
+setClass("vlm", representation(
+ "assign" = "list",
+ "call" = "call",
+ "coefficients" = if(is.R()) "numeric" else "named",
+ "constraints" = "list",
+ "contrasts" = "list",
+ "control" = "list",
+ "criterion" = "list",
+ "df.residual" = "numeric",
+ "df.total" = "numeric",
+ "dispersion" = "numeric",
+ "effects" = "numeric",
+ "fitted.values"= "matrix",
+ "misc" = "list",
+ "model" = "data.frame",
+ "na.action" = "list", # ' if(is.R()) "omit" else '
+ "offset" = "matrix",
+ "post" = "list",
+ "preplot" = "list",
+ "prior.weights"= if(is.R()) "numeric" else "named",
+ "qr" = "list",
+ "R" = if(is.R()) "matrix" else "upper",
+ "rank" = "integer",
+ "residuals" = "matrix",
+ "rss" = "numeric",
+ "smart.prediction" = "list",
+ "terms" = "list",
+ "weights" = "matrix",
+ "x" = if(is.R()) "matrix" else "model.matrix",
+ "xlevels" = "list",
+ "y" = "matrix")
+) else
+setClass("vlm", representation(
+ "assign" = "list",
+ "call" = "call",
+ "coefficients" = if(is.R()) "numeric" else "named",
+ "constraints" = "list",
+ "contrasts" = "list",
+ "control" = "list",
+ "criterion" = "list",
+ "df.residual" = "numeric",
+ "df.total" = "numeric",
+ "dispersion" = "numeric",
+ "effects" = "numeric",
+ "fitted.values"= "matrix",
+ "misc" = "list",
+ "model" = "data.frame",
+ "na.action" = "list", # ' if(is.R()) "omit" else '
+ "offset" = "matrix",
+ "post" = "list",
+ "preplot" = "list",
+ "prior.weights"= if(is.R()) "numeric" else "named",
+ "qr" = "qr",
+ "R" = if(is.R()) "matrix" else "upper",
+ "rank" = "integer",
+ "residuals" = "matrix",
+ "rss" = "numeric",
+ "smart.prediction" = "list",
+ "terms" = "list",
+ "weights" = "matrix",
+ "x" = if(is.R()) "matrix" else "model.matrix",
+ "xlevels" = "list",
+ "y" = "matrix")
+)
+
+
+setClass("vglm", representation("vlm",
+ "extra" = "list",
+ "family" = "vglmff",
+ "iter" = if(is.R()) "numeric" else "integer",
+ "predictors" = if(is.R()) "matrix" else "matrix"))
+
+
+setClass("vgam", representation("vglm",
+ "Bspline" = "list", # each [[i]] is a "vsmooth.spline.fit"
+ "nl.chisq" = if(is.R()) "numeric" else "named",
+ "nl.df" = if(is.R()) "numeric" else "named",
+ "spar" = if(is.R()) "numeric" else "named",
+ "s.xargument" = if(is.R()) "character" else "named",
+ "var" = "matrix"))
+
+
+
+
+
+if(is.R())
+ setClass("summary.vgam",
+ representation("vgam",
+ anova="data.frame",
+ cov.unscaled="matrix",
+ correlation="matrix",
+ df="numeric",
+ pearson.resid="matrix",
+ sigma="numeric"),
+prototype(anova=data.frame())) else
+ setClass("summary.vgam",
+ representation("vgam",
+ anova="data.frame",
+ cov.unscaled="matrix",
+ correlation="matrix",
+ df="numeric",
+ pearson.resid="matrix",
+ sigma="numeric"))
+
+
+ setClass("summary.vglm",
+ representation("vglm",
+ coef3="matrix",
+ cov.unscaled="matrix",
+ correlation="matrix",
+ df="numeric",
+ pearson.resid="matrix",
+ sigma="numeric"))
+
+ setClass("summary.vlm",
+ representation("vlm",
+ coef3="matrix",
+ cov.unscaled="matrix",
+ correlation="matrix",
+ df="numeric",
+ pearson.resid="matrix",
+ sigma="numeric"))
+
+
+
+ setClass( "rrvglm", representation("vglm"))
+
+ setClass("qrrvglm", representation(
+ "assign" = "list",
+ "call" = "call",
+ "coefficients" = if(is.R()) "numeric" else "named",
+ "constraints" = "list",
+ "contrasts" = "list",
+ "control" = "list",
+ "criterion" = "list",
+ "df.residual" = "numeric",
+ "df.total" = "numeric",
+ "dispersion" = "numeric",
+ "extra" = "list",
+ "family" = "vglmff",
+ "fitted.values"= "matrix",
+ "iter" = if(is.R()) "numeric" else "integer",
+ "misc" = "list",
+ "model" = "data.frame",
+ "na.action" = "list", # ' if(is.R()) "omit" else '
+ "offset" = "matrix",
+ "post" = "list",
+ "predictors" = if(is.R()) "matrix" else "matrix",
+ "preplot" = "list",
+ "prior.weights"= if(is.R()) "numeric" else "named",
+ "residuals" = "matrix",
+ "smart.prediction" = "list",
+ "terms" = "list",
+ "weights" = "matrix",
+ "x" = if(is.R()) "matrix" else "model.matrix",
+ "xlevels" = "list",
+ "y" = "matrix")
+)
+
+if(FALSE)
+setAs("qrrvglm", "vglm", function(from)
+new("vglm", "extra"=from at extra,
+ "family"=from at family,
+ "iter"=from at iter,
+ "predictors"=from at predictors,
+ "assign"=from at assign,
+ "call"=from at call,
+ "coefficients"=from at coefficients,
+ "constraints"=from at constraints,
+ "contrasts"=from at contrasts,
+ "control"=from at control,
+ "criterion"=from at criterion,
+ "df.residual"=from at df.residual,
+ "df.total"=from at df.total,
+ "dispersion"=from at dispersion,
+ "effects"=from at effects,
+ "fitted.values"=from at fitted.values,
+ "misc"=from at misc,
+ "model"=from at model,
+ "na.action"=from at na.action,
+ "offset"=from at offset,
+ "post"=from at post,
+ "preplot"=from at preplot,
+ "prior.weights"=from at prior.weights,
+ "qr"=from at qr,
+ "R"=from at R,
+ "rank"=from at rank,
+ "residuals"=from at residuals,
+ "rss"=from at rss,
+ "smart.prediction"=from at smart.prediction,
+ "terms"=from at terms,
+ "weights"=from at weights,
+ "x"=from at x,
+ "xlevels"=from at xlevels,
+ "y"=from at y))
+
+
+
+ setClass("grc", representation("rrvglm", not.needed="numeric"))
+
+
+setMethod("summary", "grc",
+ function(object, ...)
+ summary.grc(object, ...))
+
+
+
+if(FALSE) {
+setClass("vfamily",
+ representation("list"))
+}
+
+
+
+
+if(!isGeneric("Coef"))
+setGeneric("Coef", function(object, ...) standardGeneric("Coef"))
+if(!isGeneric("Coefficients"))
+setGeneric("Coefficients", function(object, ...)
+ standardGeneric("Coefficients"))
+
+
+
+
+
+
+
+if(FALSE) {
+
+if(!isGeneric("AIC"))
+ setGeneric("AIC", function(object, ..., k=2) standardGeneric("AIC"))
+
+AIC.vlm = function(object, ..., k=2) {
+ ed = object at misc$estimated.dispersion
+ no.dpar = if(length(ed) && is.logical(ed) && ed)
+ length(object at misc$dispersion) else 0
+ -2 * logLik(object, ...) + k * (length(coef(object)) + no.dpar)
+}
+
+AIC.vgam = function(object, ..., k=2) {
+ ed = object at misc$estimated.dispersion
+ no.dpar = if(length(ed) && is.logical(ed) && ed)
+ length(object at misc$dispersion) else 0
+ nldf = if(is.Numeric(object at nl.df)) sum(object at nl.df) else 0
+ -2 * logLik(object, ...) + k * (length(coef(object)) + no.dpar + nldf)
+}
+
+AIC.rrvglm = function(object, ..., k=2) {
+ ed = object at misc$estimated.dispersion
+ no.dpar = if(length(ed) && is.logical(ed) && ed)
+ length(object at misc$dispersion) else 0
+ elts.tildeA = (object at misc$M - object at control$Rank) * object at control$Rank
+ -2 * logLik(object, ...) + k * (length(coef(object)) + no.dpar + elts.tildeA)
+}
+
+AIC.qrrgvlm = function(object, ..., k=2) {
+ stop("this function not written yet")
+}
+
+setMethod("AIC", "vlm",
+ function(object, ..., k=2)
+ AIC.vlm(object, ..., k=k))
+
+setMethod("AIC", "vglm",
+ function(object, ..., k=2)
+ AIC.vlm(object, ..., k=k))
+
+setMethod("AIC", "vgam",
+ function(object, ..., k=2)
+ AIC.vgam(object, ..., k=k))
+
+setMethod("AIC", "rrvglm",
+ function(object, ..., k=2)
+ AIC.rrvglm(object, ..., k=k))
+
+setMethod("AIC", "qrrvglm",
+ function(object, ..., k=2)
+ AIC.qrrvglm(object, ..., k=k))
+}
+
+if(!isGeneric("logLik"))
+ setGeneric("logLik", function(object, ...) standardGeneric("logLik"))
+
+if(!isGeneric("plot"))
+ setGeneric("plot", function(x, y, ...) standardGeneric("plot"))
+
+if(!isGeneric("vcov"))
+ setGeneric("vcov", function(object, ...) standardGeneric("vcov"))
+
+
+
+
+
+
+
+
+setClass("vlmsmall", representation(
+ "call" = "call",
+ "coefficients" = if(is.R()) "numeric" else "named",
+ "constraints" = "list",
+ "control" = "list",
+ "criterion" = "list",
+ "fitted.values"= "matrix",
+ "misc" = "list",
+ "model" = "data.frame",
+ "na.action" = "list", # ' if(is.R()) "omit" else '
+ "post" = "list",
+ "preplot" = "list",
+ "prior.weights"= if(is.R()) "numeric" else "named",
+ "residuals" = "matrix",
+ "weights" = "matrix",
+ "x" = if(is.R()) "matrix" else "model.matrix",
+ "y" = "matrix"),
+)
+
+setClass("uqo", representation("vlmsmall",
+ "lv" = "matrix",
+ "extra" = "list",
+ "family" = "vglmff",
+ "iter" = if(is.R()) "numeric" else "integer",
+ "predictors" = "matrix"))
+
+
+setClass(Class="cao", repr=representation("vgam", "uqo"))
+
+
+if(!isGeneric("lvplot"))
+setGeneric("lvplot", function(object, ...) standardGeneric("lvplot"))
+
+if(!isGeneric("ccoef"))
+ setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+
+
+
+
+
+if(!isGeneric("coef"))
+ setGeneric("coef", function(object, ...) standardGeneric("coef"))
+
+if(!isGeneric("coefficients"))
+ setGeneric("coefficients", function(object, ...)
+ standardGeneric("coefficients"))
+
+if(!isGeneric("df.residual"))
+ setGeneric("df.residual", function(object, ...)
+ standardGeneric("df.residual"))
+
+if(!isGeneric("fitted"))
+ setGeneric("fitted", function(object, ...) standardGeneric("fitted"))
+
+ if(!isGeneric("fitted.values"))
+ setGeneric("fitted.values", function(object, ...)
+ standardGeneric("fitted.values"))
+
+if(!isGeneric("model.matrix"))
+ setGeneric("model.matrix", function(object, ...)
+ standardGeneric("model.matrix"))
+
+if(!isGeneric("model.frame"))
+ setGeneric("model.frame", function(formula, ...)
+ standardGeneric("model.frame"))
+
+
+if(!isGeneric("predict"))
+ setGeneric("predict", function(object, ...) standardGeneric("predict"))
+
+if(!isGeneric("resid"))
+ setGeneric("resid", function(object, ...) standardGeneric("resid"))
+
+if(!isGeneric("residuals"))
+ setGeneric("residuals", function(object, ...) standardGeneric("residuals"))
+
+if(!isGeneric("weights"))
+ setGeneric("weights", function(object, ...) standardGeneric("weights"))
+
+
+
+
+
+
+
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
new file mode 100644
index 0000000..035c323
--- /dev/null
+++ b/R/add1.vglm.q
@@ -0,0 +1,6 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
diff --git a/R/attrassign.R b/R/attrassign.R
new file mode 100644
index 0000000..fefece0
--- /dev/null
+++ b/R/attrassign.R
@@ -0,0 +1,37 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+attrassignlm <- function(lmobj) {
+ attrassign(model.matrix(lmobj),terms(lmobj))
+}
+
+attrassigndefault <- function(mmat,tt) {
+ if (!inherits(tt,"terms"))
+ stop("need terms object")
+ aa<-attr(mmat,"assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll<-attr(tt,"term.labels")
+ if (attr(tt,"intercept")>0)
+ ll<-c("(Intercept)",ll)
+ aaa<-factor(aa,labels=ll)
+ split(order(aa),aaa)
+}
+
+
+if(!isGeneric("attrassign"))
+ setGeneric("attrassign", function(object, ...)
+ standardGeneric("attrassign"))
+
+setMethod("attrassign", "lm",
+ function(object, ...)
+ attrassignlm(object, ...))
+
+
+
diff --git a/R/build.terms.s b/R/build.terms.s
new file mode 100644
index 0000000..cfb748b
--- /dev/null
+++ b/R/build.terms.s
@@ -0,0 +1,50 @@
+Build.terms <- function(x, coefs, cov = NULL, assign, collapse = TRUE)
+{
+
+ cov.true <- !is.null(cov)
+ if(collapse) {
+ fit <- drop(x %*% coefs)
+ if(cov.true) {
+ var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
+ list(fitted.values = fit, se.fit = drop(sqrt(var)))
+ }
+ else fit
+ } else {
+ constant <- attr(x, "constant")
+ if(!is.null(constant))
+ constant <- sum(constant * coefs)
+ if(missing(assign))
+ assign <- attr(x, "assign")
+ if(is.null(assign))
+ stop("Need an 'assign' list")
+ fit <- array(0, c(nrow(x), length(assign)), list(dimnames(x)[[1
+ ]], names(assign)))
+ if(cov.true)
+ se <- fit
+ TL <- sapply(assign, length)
+ simple <- TL == 1
+ complex <- TL > 1
+ if(any(simple)) {
+ asss <- unlist(assign[simple])
+ ones <- rep(1, nrow(x))
+ fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
+ if(cov.true)
+ se[, simple] <- abs(x[, asss]) * outer(ones,
+ sqrt(diag(cov))[asss])
+ }
+ if(any(complex)) {
+ assign <- assign[complex]
+ for(term in names(assign)) {
+ TT <- assign[[term]]
+ xt <- x[, TT]
+ fit[, term] <- xt %*% coefs[TT]
+ if(cov.true)
+ se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) *
+ xt) %*% rep(1, length(TT))))
+ }
+ }
+ attr(fit, "constant") <- constant
+ if(is.null(cov)) fit else list(fitted.values = fit, se.fit = se)
+ }
+}
+
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
new file mode 100644
index 0000000..ee3d94c
--- /dev/null
+++ b/R/build.terms.vlm.q
@@ -0,0 +1,92 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+if(is.R()) {
+
+ if(!isGeneric("terms"))
+ setGeneric("terms", function(x, ...) standardGeneric("terms"))
+}
+
+terms.vlm = function(x, ...) {
+ v = x at terms
+ if(!length(v))
+ stop("terms slot is empty")
+ v = v$terms
+ if(!length(v))
+ stop("no terms component")
+ v
+}
+
+
+if(!isGeneric("terms"))
+ setGeneric("terms", function(x, ...)
+ standardGeneric("terms"))
+
+setMethod("terms", "vlm", function(x, ...) terms.vlm(x, ...))
+
+
+
+Build.terms.vlm <- function(x, coefs, cov = NULL, assign, collapse = TRUE, M,
+ dimname=NULL, coefmat = NULL)
+{
+
+ cov.true <- !is.null(cov)
+ if(collapse) {
+ fit <- matrix(x %*% coefs, ncol=M, byrow=TRUE)
+ dimnames(fit) <- dimname
+ if(M==1)
+ fit <- c(fit)
+ if(cov.true) {
+ var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
+ list(fitted.values = fit, se.fit = if(M==1) c(sqrt(var)) else
+ matrix(sqrt(var), ncol=M, byrow=TRUE, dimnames=dimname))
+ } else {
+ fit
+ }
+ } else {
+
+
+
+ constant <- attr(x, "constant")
+ if(!is.null(constant)) {
+ constant <- as.vector( t(coefmat) %*% constant )
+ }
+
+ if(missing(assign))
+ assign <- attr(x, "assign")
+ if(is.null(assign))
+ stop("Need an 'assign' list")
+ fit <- array(0, c(nrow(x), length(assign)),
+ list(dimnames(x)[[1]], names(assign)))
+ if(cov.true)
+ se <- fit
+ TL <- sapply(assign, length)
+ simple <- TL == 1
+ complex <- TL > 1
+ if(any(simple)) {
+ asss <- unlist(assign[simple])
+ ones <- rep(1, nrow(x))
+ fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
+ if(cov.true)
+ se[, simple] <- abs(x[, asss]) * outer(ones,
+ sqrt(diag(cov))[asss])
+ }
+ if(any(complex)) {
+ assign <- assign[complex]
+ for(term in names(assign)) {
+ TT <- assign[[term]]
+ xt <- x[, TT]
+ fit[, term] <- xt %*% coefs[TT]
+ if(cov.true)
+ se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) *
+ xt) %*% rep(1, length(TT))))
+ }
+ }
+ attr(fit, "constant") <- constant
+
+ if(cov.true) list(fitted.values = fit, se.fit = se) else fit
+ }
+}
+
diff --git a/R/calibrate.q b/R/calibrate.q
new file mode 100644
index 0000000..27de56b
--- /dev/null
+++ b/R/calibrate.q
@@ -0,0 +1,287 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+calibrate.qrrvglm.control = function(object,
+ trace=FALSE, # passed into optim()
+ Method.optim="BFGS", # passed into optim(method=Method)
+ gridSize = if(Rank==1) 9 else 5,
+ varlvI = FALSE, ...) {
+ Rank = object at control$Rank
+ EqualTolerances = object at control$EqualTolerances
+ if(!is.Numeric(gridSize, positive=TRUE, integer=TRUE, allow=1))
+ stop("bad input for \"gridSize\"")
+ if(gridSize < 2)
+ stop("gridSize must be >= 2")
+ list(# maxit=Maxit.optim, # Note the name change
+ trace=as.numeric(trace)[1],
+ Method.optim=Method.optim,
+ gridSize=gridSize,
+ varlvI = as.logical(varlvI)[1])
+}
+
+if(!isGeneric("calibrate"))
+ setGeneric("calibrate", function(object, ...) standardGeneric("calibrate"))
+
+
+calibrate.qrrvglm = function(object,
+ newdata=NULL,
+ type=c("lv","predictors","response","vcov","all3or4"),
+ initial.vals=NULL, ...) {
+
+ Quadratic = if(is.logical(object at control$Quadratic))
+ object at control$Quadratic else FALSE # T if CQO, F if CAO
+
+ if(!length(newdata)) {
+ if(!length(object at y)) stop("no newdata") else
+ newdata = data.frame(object at y)
+ }
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("lv","predictors","response","vcov","all3or4"))[1]
+
+ if(!Quadratic && type=="vcov")
+ stop("cannot have type=\"vcov\" when object is a \"cao\" object")
+
+ if(is.vector(newdata))
+ newdata = rbind(newdata)
+ if(!is.matrix(newdata))
+ newdata = as.matrix(newdata)
+ newdata = newdata[,object at misc$ynames,drop=FALSE]
+
+ obfunct = slot(object at family, object at misc$criterion) # Objective function
+ minimize.obfunct = if(Quadratic) object at control$min.criterion else
+ TRUE # Logical; TRUE for CAO objects because deviance is minimized
+ if(!is.logical(minimize.obfunct))
+ stop("object at control$min.criterion is not a logical")
+ optim.control = calibrate.qrrvglm.control(object=object, ...) # For cao too
+
+ if((Rank <- object at control$Rank) > 2)
+ stop("currently can only handle Rank=1 and 2")
+ Coefobject = if(Quadratic) {
+ Coef(object, varlvI=optim.control$varlvI)
+ } else {
+ Coef(object)
+ }
+ if(!length(initial.vals)) {
+ L = apply(Coefobject at lv, 2, min)
+ U = apply(Coefobject at lv, 2, max)
+ initial.vals = if(Rank==1)
+ cbind(seq(L, U, length=optim.control$gridSize)) else
+ expand.grid(seq(L[1], U[1], length=optim.control$gridSize),
+ seq(L[2], U[2], length=optim.control$gridSize))
+ }
+ ok = length(object at control$colx1.index)==1 &&
+ names(object at control$colx1.index) == "(Intercept)"
+ if(!ok) stop("The x1 vector must be an intercept only")
+
+ nn = nrow(newdata)
+ BestOFpar = NULL # It may be more efficient not to append
+ BestOFvalues = NULL # Best OF objective function values
+ for(i1 in 1:nn) {
+ if(optim.control$trace)
+ cat("\nOptimizing for observation", i1, "-----------------\n")
+ OFvalues = OFpar = NULL # OF means objective function
+ for(ii in 1:nrow(initial.vals)) {
+ if(optim.control$trace) {
+ cat("Starting from grid-point", ii, ":")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ ans = if(is.R()) {
+ if(Quadratic)
+ optim(par=initial.vals[ii,],
+ fn=.my.calib.objfunction.qrrvglm,
+ method=optim.control$Method.optim, # "BFGS", or "CG" or ...
+ control=c(fnscale=ifelse(minimize.obfunct,1,-1),
+ optim.control),
+ y=newdata[i1,],
+ extra=object at extra,
+ objfun=obfunct,
+ Coefs=Coefobject,
+ misc.list = object at misc,
+ everything = FALSE,
+ mu.function = slot(object at family, "inverse")) else
+ optim(par=initial.vals[ii,],
+ fn=.my.calib.objfunction.cao,
+ method=optim.control$Method.optim, # "BFGS", or "CG" or ...
+ control=c(fnscale=ifelse(minimize.obfunct,1,-1),
+ optim.control),
+ y=newdata[i1,],
+ extra=object at extra,
+ objfun=obfunct,
+ object=object,
+ Coefs=Coefobject,
+ misc.list = object at misc,
+ everything = FALSE,
+ mu.function = slot(object at family, "inverse"))
+ } else
+ stop("not implemented in S-PLUS yet")
+
+ if(optim.control$trace) {
+ if(ans$convergence == 0)
+ cat("Successful convergence\n") else
+ cat("Unsuccessful convergence\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ if(ans$convergence == 0) {
+ OFvalues = c(OFvalues, ans$value)
+ OFpar = rbind(OFpar, ans$par)
+ }
+ }
+ if(length(OFpar)) {
+ index = if(minimize.obfunct)
+ (1:nrow(OFpar))[OFvalues==min(OFvalues)] else
+ (1:nrow(OFpar))[OFvalues==max(OFvalues)]
+ if(length(index) > 1) {
+ warning(paste("multiple solutions found for observation ", i1,
+ ". Choosing one randomly.", sep=""))
+ index = sample(index, size=1)
+ } else if(length(index) == 0)
+ stop("length(index) is zero")
+ BestOFpar = rbind(BestOFpar, OFpar[index,])
+ BestOFvalues = c(BestOFvalues, OFvalues[index])
+ } else {
+ BestOFpar = rbind(BestOFpar, rep(as.numeric(NA), len=Rank))
+ BestOFvalues = c(BestOFvalues, NA)
+ }
+ }
+
+ pretty = function(BestOFpar, newdata, Rank) {
+ if(Rank==1) {
+ BestOFpar = c(BestOFpar)
+ names(BestOFpar) = dimnames(newdata)[[1]]
+ } else
+ dimnames(BestOFpar) = list(dimnames(newdata)[[1]],
+ if(Rank==1) "lv" else paste("lv", 1:Rank, sep=""))
+ BestOFpar
+ }
+
+ if(type=="lv") {
+ BestOFpar = pretty(BestOFpar, newdata, Rank)
+ attr(BestOFpar,"objectiveFunction")=pretty(BestOFvalues,newdata,Rank=1)
+ BestOFpar
+ } else {
+ etaValues = muValues = NULL #
+ if(Quadratic)
+ vcValues = array(0, c(Rank,Rank,nn))
+ for(i1 in 1:nn) {
+ ans = if(Quadratic) .my.calib.objfunction.qrrvglm(BestOFpar[i1, ],
+ y=newdata[i1,],
+ extra=object at extra,
+ objfun=obfunct,
+ Coefs=Coefobject,
+ misc.list = object at misc,
+ everything = TRUE,
+ mu.function = slot(object at family, "inverse")) else
+ .my.calib.objfunction.cao(BestOFpar[i1, ],
+ y=newdata[i1,],
+ extra=object at extra,
+ objfun=obfunct,
+ object=object,
+ Coefs=Coefobject,
+ misc.list = object at misc,
+ everything = TRUE,
+ mu.function = slot(object at family, "inverse"))
+ muValues = rbind(muValues, matrix(ans$mu, nrow=1))
+ etaValues = rbind(etaValues, matrix(ans$eta, nrow=1))
+ if(Quadratic)
+ vcValues[,,i1] = ans$vcmat # Can be NULL for "cao" objects
+ }
+ if(type=="response") {
+ dimnames(muValues) = dimnames(newdata)
+ muValues
+ } else if(type=="predictors") {
+ dimnames(etaValues) = list(dimnames(newdata)[[1]],
+ dimnames(object at predictors)[[2]])
+ etaValues
+ } else if(type=="vcov") {
+ if(Quadratic)
+ dimnames(vcValues) = list(as.character(1:Rank),
+ as.character(1:Rank),
+ dimnames(newdata)[[1]])
+ vcValues
+ } else if(type=="all3or4") {
+ if(Quadratic)
+ dimnames(vcValues) = list(as.character(1:Rank),
+ as.character(1:Rank),
+ dimnames(newdata)[[1]])
+ dimnames(muValues) = dimnames(newdata)
+ dimnames(etaValues) = list(dimnames(newdata)[[1]],
+ dimnames(object at predictors)[[2]])
+ BestOFpar = pretty(BestOFpar, newdata, Rank)
+ attr(BestOFpar,"objectiveFunction") =
+ pretty(BestOFvalues,newdata,Rank=1)
+ list(lv=BestOFpar,
+ predictors=etaValues,
+ response=muValues,
+ vcov=if(Quadratic) vcValues else NULL)
+ } else stop("type not matched")
+ }
+}
+
+.my.calib.objfunction.qrrvglm = function(bnu, y, extra=NULL,
+ objfun, Coefs,
+ misc.list,
+ everything=TRUE,
+ mu.function) {
+
+ bnumat = cbind(bnu)
+ Rank = length(bnu)
+ eta = cbind(c(Coefs at B1)) + Coefs at A %*% bnumat # bix1 = intercept only
+ M = misc.list$M
+ for(s in 1:M) {
+ temp = Coefs at D[,,s,drop=FALSE]
+ dim(temp) = dim(temp)[1:2] # c(Rank, Rank)
+ eta[s,1] = eta[s,1] + t(bnumat) %*% temp %*% bnumat
+ }
+ eta = matrix(eta, 1, M, byrow=TRUE)
+ mu = rbind(mu.function(eta, extra)) # Make sure it has one row
+ value = objfun(mu=mu, y=y,
+ w=1, # ignore prior.weights on the object
+ residuals=FALSE, eta=eta, extra=extra)
+ if(everything) {
+ vcmat = matrix(0, Rank, Rank)
+ for(s in 1:M) {
+ vec1 = cbind(Coefs at A[s,]) + 2 *
+ matrix(Coefs at D[,,s], Rank, Rank) %*% bnumat
+ vcmat = vcmat + mu[1,s] * vec1 %*% t(vec1)
+ }
+ vcmat = solve(vcmat)
+ } else vcmat = NULL
+ if(everything) list(eta=eta, mu=mu, value=value, vcmat=vcmat) else value
+}
+
+.my.calib.objfunction.cao = function(bnu, y, extra=NULL,
+ objfun, object, Coefs,
+ misc.list,
+ everything=TRUE,
+ mu.function) {
+ Rank = length(bnu)
+ NOS = Coefs at NOS
+ eta = matrix(as.numeric(NA), 1, NOS)
+ for(j in 1:NOS) {
+ eta[1,j] = predictcao(object, grid=bnu, sppno=j,
+ Rank=Rank, deriv=0)$yvals
+ }
+ mu = rbind(mu.function(eta, extra)) # Make sure it has one row
+ value = objfun(mu=mu, y=y,
+ w=1, # ignore prior.weights on the object
+ residuals=FALSE, eta=eta, extra=extra)
+ vcmat = NULL # No theory as of yet to compute the vcmat
+ if(everything) list(eta=eta, mu=mu, value=value, vcmat=vcmat) else value
+}
+
+
+setMethod("calibrate", "qrrvglm", function(object, ...)
+ calibrate.qrrvglm(object, ...))
+
+
diff --git a/R/cao.R b/R/cao.R
new file mode 100644
index 0000000..8229906
--- /dev/null
+++ b/R/cao.R
@@ -0,0 +1,177 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+cao <- function(formula,
+ family, data=list(),
+ weights=NULL, subset=NULL, na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=cao.control(...),
+ offset=NULL,
+ method="cao.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=NULL,
+ qr.arg=FALSE, smart=TRUE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "cao"
+
+ ocall <- match.call()
+
+ if(smart)
+ setup.smart("write")
+
+ mt <- terms(formula, data = data)
+ if(missing(data))
+ data <- environment(formula)
+
+ mf <- match.call(expand=FALSE)
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+ mf$coefstart <- mf$etastart <- mf$... <- NULL
+ mf$smart <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if(method == "model.frame")
+ return(mf)
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+ if(is.character(family))
+ family <- get(family)
+ if(is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ eval(vcontrol.expression)
+
+ if(!is.null(family at first))
+ eval(family at first)
+
+
+ cao.fitter <- get(method)
+
+
+ deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
+ for(tries in 1:control$Bestof) {
+ if(control$trace && (control$Bestof>1)) {
+ cat(paste("\n========================= Fitting model",
+ tries, "=========================\n"))
+ if(exists("flush.console"))
+ flush.console()
+ }
+ it <- cao.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family,
+ control=control,
+ constraints=constraints,
+ criterion=control$criterion,
+ extra=extra,
+ qr.arg = qr.arg,
+ Terms=mt, function.name=function.name, ...)
+ deviance.Bestof[tries] = it$crit.list$deviance
+ if(tries==1 || min(deviance.Bestof[1:(tries-1)])>deviance.Bestof[tries])
+ fit = it
+ }
+ fit$misc$deviance.Bestof = deviance.Bestof
+
+ fit$misc$dataname <- dataname
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ }
+
+ answer <-
+ new("cao",
+ "assign" = attr(x, "assign"),
+ "Bspline" = fit$Bspline,
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "criterion" = fit$crit.list,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "residuals" = as.matrix(fit$wresiduals),
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=mt))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ if(qr.arg) {
+ class(fit$qr) = "list"
+ slot(answer, "qr") = fit$qr
+ }
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ if(length(fit$fitted.values))
+ slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+ if(x.arg)
+ slot(answer, "x") = fit$x # The 'small' design matrix
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+
+
+ slot(answer, "control") = fit$control
+ slot(answer, "extra") = if(length(fit$extra)) {
+ if(is.list(fit$extra)) fit$extra else {
+ warning(paste("\"extra\" is not a list, therefore",
+ "placing \"extra\" into a list"))
+ list(fit$extra)
+ }
+ } else list() # R-1.5.0
+
+ slot(answer, "iter") = fit$iter
+ fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") = fit$predictors
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+
+
+
+
+ answer
+}
+attr(cao, "smart") <- TRUE
+
+
+
diff --git a/R/cao.fit.q b/R/cao.fit.q
new file mode 100644
index 0000000..4a6736a
--- /dev/null
+++ b/R/cao.fit.q
@@ -0,0 +1,1817 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ offset=0, family,
+ control=cao.control(...), criterion="coefficients",
+ qr.arg=FALSE, constraints=NULL, extra=NULL,
+ Terms=Terms, function.name="cao", ...)
+{
+ post = list()
+ check.rank = TRUE #
+ nonparametric <- TRUE
+ optim.maxit <- control$optim.maxit
+ backchat <- FALSE # control$backchat && !control$Quadratic # rrr;
+ save.weight <- control$save.weight
+ trace <- control$trace
+ minimize.criterion <- control$min.criterion
+
+ n <- dim(x)[1]
+
+
+ copyxbig <- FALSE # May be overwritten in @initialize
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+
+ n.save <- n
+
+
+ Rank <- control$Rank
+ rrcontrol <- control #
+
+ if(length(family at initialize))
+ eval(family at initialize) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+ modelno = switch(family at vfamily[1], "poissonff"=2,
+ "binomialff"=1, "quasipoissonff"=0, "quasibinomialff"=0,
+ "negbinomial"=3,
+ "gamma2"=5, "gaussianff"=8,
+ 0) # stop("can't fit this model using fast algorithm")
+ if(!modelno) stop("the family function does not work with cao()")
+ if(modelno == 1) modelno = get("modelno", envir = VGAMenv)
+
+ eval(rrr.init.expression)
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else family at inverse(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- family at link(mu, extra)
+ }
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+
+
+ if(length(family at constraints))
+ eval(family at constraints)
+
+
+ special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
+ findex = trivial.constraints(just.testing, special.matrix)
+ tc1 = trivial.constraints(constraints)
+
+
+ if(all(findex))
+ stop("No covariates to form latent variables from.")
+ colx1.index = names.colx1.index = NULL
+ dx2 = dimnames(x)[[2]]
+ if(sum(findex)) {
+ asx = attr(x, "assign")
+ for(ii in names(findex))
+ if(findex[ii]) {
+ names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index = c(colx1.index, asx[[ii]])
+ }
+ names(colx1.index) = names.colx1.index
+ }
+ rrcontrol$colx1.index=control$colx1.index=colx1.index #Save it on the object
+ colx2.index = 1:ncol(x)
+ names(colx2.index) = dx2
+ colx2.index = colx2.index[-colx1.index]
+ p1 = length(colx1.index); p2 = length(colx2.index)
+ rrcontrol$colx2.index=control$colx2.index=colx2.index #Save it on the object
+
+
+
+ Cmat = if(length(rrcontrol$Cinit)) matrix(rrcontrol$Cinit,p2,Rank) else {
+ if(!rrcontrol$Use.Init.Poisson.QO) {
+ matrix(rnorm(p2 * Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
+ } else {
+ .Init.Poisson.QO(ymat=as.matrix(y),
+ X1=x[,colx1.index,drop=FALSE],
+ X2=x[,colx2.index,drop=FALSE],
+ Rank=rrcontrol$Rank, trace=rrcontrol$trace,
+ max.ncol.etamat = rrcontrol$Etamat.colmax,
+ Crow1positive=rrcontrol$Crow1positive,
+ constwt= any(family at vfamily[1] ==
+ c("negbinomial","gamma2","gaussianff")),
+ takelog= any(family at vfamily[1] != c("gaussianff")))
+ }
+ }
+
+
+ rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+
+ Blist <- process.constraints(constraints, x, M)
+
+ nice31 = checkCMCO(Blist, control=control, modelno=modelno)
+ if(nice31 != 1) stop("not nice")
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ lv.mat = x[,colx2.index,drop=FALSE] %*% Cmat
+
+
+ rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.CAO.")
+
+ Nice21 = length(names.colx1.index)==1 && names.colx1.index == "(Intercept)"
+ if(!Nice21) stop("Norrr = ~ 1 is supported only, without constraints")
+ NOS = ifelse(modelno==3 || modelno==5, M/2, M)
+ p1star. = if(Nice21) ifelse(modelno==3 || modelno==5,2,1) else M
+ p2star. = if(Nice21) Rank else stop("not Nice21")
+ pstar. = p1star. + p2star.
+ nstar = if(Nice21) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ lenbeta = pstar. * ifelse(Nice21, NOS, 1)
+
+ othint = c(Rank,control$EqualTol, pstar. , dimw=1, inited=0, # w(,dimw) cols
+ modelno, maxitl=control$maxitl, actnits=0, twice=0, p1star. ,
+ p2star. , Nice21, lenbeta, controlITolerances=0, control$trace,
+ p1, p2=p2, imethod=control$method.init, bchat=0)
+ othdbl = c(small=control$SmallNo, fseps=control$epsilon,
+ .Machine$double.eps,
+ iKvector=rep(control$iKvector, len=NOS),
+ iShape=rep(control$iShape, len=NOS),
+ resss=0, bfeps=control$bf.epsilon, hstep=0.1)
+
+ for(iter in 1:optim.maxit) {
+ if(control$trace) {
+ cat("\nIteration", iter, "\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+flush.console()
+
+ conjgrad = optim(par=c(Cmat), fn=callcaof,
+ gr=if(control$GradientFunction) calldcaof else NULL,
+ method="BFGS",
+ control=list(fnscale=1, trace=as.integer(control$trace),
+ maxit=control$Maxit.optim, REPORT=10),
+ etamat=eta, xmat=x, ymat=y, # as.matrix(y),
+ wvec=w, modelno=modelno,
+ Control=control,
+ Nice21=Nice21,
+ p1star. = p1star. , p2star. = p2star. ,
+ n=n, M=M,
+ othint=othint, othdbl=othdbl,
+ alldump=FALSE)
+
+
+ Cmat = matrix(conjgrad$par, p2, Rank) # old because of scale(cmatrix)
+
+ # Cmat <- Cmat %*% Ut # Normalized
+
+ if(converged <- (conjgrad$convergence == 0)) break
+ }
+
+ if(!converged) {
+ if(maxitl > 1) {
+ warning(paste("convergence not obtained in", maxitl, "iterations."))
+ } else {
+ warning(paste("convergence not obtained"))
+ }
+ } else {
+ }
+ Cmat = crow1C(Cmat, control$Crow1positive) # Make sure the signs are right
+
+flush.console()
+ temp9 =
+ callcaof(cmatrix=Cmat,
+ etamat=eta, xmat=x, ymat=y, wvec=w, modelno=modelno,
+ Control=control,
+ Nice21=Nice21,
+ p1star. = p1star. , p2star. = p2star. ,
+ n=n, M=M,
+ othint=othint, othdbl=othdbl,
+ alldump=TRUE)
+ if(!is.list(extra))
+ extra = list()
+ extra$Cmat = temp9$Cmat
+ ynames = dimnames(y)[[2]]
+ extra$df1.nl = temp9$df1.nl
+ extra$spar1 = temp9$spar1
+ names(extra$spar1) = ynames
+ names(extra$df1.nl) = ynames
+ if(Rank == 2) {
+ extra$spar2 = temp9$spar2
+ extra$df2.nl = temp9$df2.nl
+ names(extra$spar2) = ynames
+ names(extra$df2.nl) = ynames
+ }
+
+ mu = matrix(temp9$fitted, n, NOS, byrow=TRUE)
+
+
+
+
+
+
+
+
+
+
+
+ dn <- labels(x)
+ yn <- dn[[1]]
+
+
+ if(is.matrix(mu)) {
+ if(length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if(length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
+
+
+ fit <- list(
+ fitted.values=mu,
+ Cmatrix = Cmat,
+ terms=Terms) # terms: This used to be done in vglm()
+
+
+
+
+ misc <- list(
+ criterion = criterion,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ nonparametric = nonparametric,
+ p = ncol(x),
+ ynames = ynames)
+
+ crit.list <- list()
+ crit.list$deviance = temp9$deviance
+
+
+
+
+
+ if(w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if(length(family at last))
+ eval(family at last)
+
+ structure(c(fit,
+ temp9,
+ list(
+ contrasts=attr(x, "contrasts"),
+ control=control,
+ crit.list=crit.list,
+ extra=extra,
+ family=family,
+ iter=iter,
+ misc=misc,
+ post = post,
+ x=x,
+ y=y)),
+ vclass=family at vfamily)
+}
+
+
+
+
+
+cao.control = function(Rank=1,
+ all.knots = FALSE,
+ criterion="deviance",
+ Cinit=NULL,
+ Crow1positive=TRUE,
+ epsilon = 1.0e-05,
+ Etamat.colmax = 10,
+ GradientFunction=FALSE, # For now 24/12/04
+ iKvector = 0.1,
+ iShape = 0.1,
+ Norrr = ~ 1,
+ SmallNo = 5.0e-13,
+ Use.Init.Poisson.QO=TRUE,
+
+ Bestof = if(length(Cinit)) 1 else 10,
+ maxitl = 40,
+ method.init = 1,
+ bf.epsilon = 1.0e-7,
+ bf.maxit = 40,
+ Maxit.optim = 250,
+ optim.maxit = 20,
+ SD.sitescores = 1.0,
+ SD.Cinit = 0.02,
+ trace = TRUE,
+ df1.nl = 2.5, # About 1.5-2.5 gives the flexibility of a quadratic
+ df2.nl = 2.5, # About 1.5-2.5 gives the flexibility of a quadratic
+ spar1 = 0, # 0 means df1.nl is used
+ spar2 = 0, # 0 means df2.nl is used
+ ...)
+{
+ if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for \"iShape\"")
+ if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for \"iKvector\"")
+ if(!is.Numeric(method.init, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"method.init\"")
+ if(criterion != "deviance") stop("\"criterion\" must be \"deviance\"")
+ if(GradientFunction) stop("14/1/05; GradientFunction=TRUE not working yet")
+ se.fit = as.logical(FALSE)
+ if(se.fit) stop("se.fit = FALSE handled only")
+
+ if(length(Cinit) && !is.Numeric(Cinit))
+ stop("Bad input for \"Cinit\"")
+ if(!is.Numeric(Bestof, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"Bestof\"")
+ if(!is.Numeric(maxitl, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"maxitl\"")
+ if(!is.Numeric(bf.epsilon, allow=1, posit=TRUE))
+ stop("Bad input for \"bf.epsilon\"")
+ if(!is.Numeric(bf.maxit, integ=TRUE, posit=TRUE, allow=1))
+ stop("Bad input for \"bf.maxit\"")
+ if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ stop("bad input for \"Etamat.colmax\"")
+ if(!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE, allow=1))
+ stop("Bad input for \"Maxit.optim\"")
+ if(!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"optim.maxit\"")
+ if(!is.Numeric(SD.sitescores, allow=1, posit=TRUE))
+ stop("Bad input for \"SD.sitescores\"")
+ if(!is.Numeric(SD.Cinit, allow=1, posit=TRUE))
+ stop("Bad input for \"SD.Cinit\"")
+ if(!is.Numeric(df1.nl) || any(df1.nl < 0))
+ stop("Bad input for \"df1.nl\"")
+ if(any(df1.nl >= 0 & df1.nl < 0.05)) {
+ warning("df1.nl values between 0 and 0.05 converted to 0.05")
+ df1.nl[df1.nl < 0.05] = 0.05
+ }
+ if(!is.Numeric(df2.nl) || any(df2.nl < 0))
+ stop("Bad input for \"df2.nl\"")
+ if(any(df2.nl >= 0 & df2.nl < 0.05)) {
+ warning("df2.nl values between 0 and 0.05 converted to 0.05")
+ df2.nl[df2.nl < 0.05] = 0.05
+ }
+ if(!is.Numeric(spar1) || any(spar1 < 0))
+ stop("Bad input for \"spar1\"")
+ if(!is.Numeric(spar2) || any(spar2 < 0))
+ stop("Bad input for \"spar2\"")
+ if(!is.Numeric(epsilon, posit=TRUE, allow=1))
+ stop("Bad input for \"epsilon\"")
+
+ if(!is.Numeric(SmallNo, posit=TRUE, allow=1))
+ stop("Bad input for \"SmallNo\"")
+ if((SmallNo < .Machine$double.eps) ||
+ (SmallNo > .0001)) stop("SmallNo is out of range")
+
+ ans = list(
+ Corner=FALSE, # A constant, not a control parameter; unneeded?
+ EqualTolerances=FALSE, # A constant, not a control parameter; needed
+ ITolerances=FALSE, # A constant, not a control parameter; unneeded?
+ Quadratic=FALSE, # A constant, not a control parameter; unneeded?
+ all.knots = as.logical(all.knots)[1],
+ Bestof = Bestof,
+ Cinit=Cinit,
+ ConstrainedO = TRUE, # A constant, not a control parameter
+ criterion=criterion,
+ Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ epsilon = epsilon,
+ Etamat.colmax = Etamat.colmax,
+ FastAlgorithm = TRUE, # A constant, not a control parameter
+ GradientFunction = as.logical(GradientFunction),
+ maxitl = maxitl,
+ bf.epsilon = bf.epsilon,
+ bf.maxit = bf.maxit,
+ method.init = method.init,
+ Maxit.optim = Maxit.optim,
+ optim.maxit = optim.maxit,
+ Norrr=Norrr,
+ Rank = Rank,
+ SD.sitescores = SD.sitescores,
+ SD.Cinit = SD.Cinit,
+ se.fit = se.fit, # If TRUE, then would need storage for S QR fits
+ SmallNo = SmallNo,
+ trace = as.integer(trace),
+ Use.Init.Poisson.QO=Use.Init.Poisson.QO,
+ iKvector = as.numeric(iKvector),
+ iShape = as.numeric(iShape),
+ DF1 = 2.5, # Used as Default value if df1.nl has no default
+ DF2 = 2.5, # Used as Default value if df2.nl has no default
+ SPAR1 = 0, # Used as Default value if spar1 has no default
+ SPAR2 = 0, # Used as Default value if spar2 has no default
+ df1.nl = df1.nl,
+ df2.nl = df2.nl,
+ spar1 = spar1,
+ spar2 = spar2)
+ ans
+}
+
+
+create.cms <- function(Rank=1, M, MSratio=1, which, p1=1) {
+ if(!is.Numeric(p1, allow=1, integ=TRUE, pos=TRUE)) stop("bad input for p1")
+ Blist. = vector("list", p1+Rank)
+ for(r in 1:(p1+Rank))
+ Blist.[[r]] = diag( M )
+ names(Blist.) = if(p1 == 1) c("(Intercept)", names(which)) else stop()
+ if(MSratio == 2) {
+ for(r in 1:Rank)
+ Blist.[[p1+r]] = eij(1, M)
+ }
+ Blist.
+}
+
+
+callcaof = function(cmatrix,
+ etamat, xmat, ymat, wvec, modelno,
+ Control, Nice21=TRUE,
+ p1star. = if(any(modelno==c(3,5))) 2 else 1, p2star. =Rank,
+ n, M,
+ othint, othdbl,
+ alldump=FALSE) {
+if(exists("flush.console")) flush.console()
+
+ control = Control
+ Rank = control$Rank
+ p1 = length(control$colx1.index)
+ p2 = length(control$colx2.index)
+ yn = dimnames(ymat)[[2]]
+ if(length(yn) != ncol(ymat)) stop("the column names of ymat must be given")
+ queue = qbig = Rank # 19/10/05; number of smooths per species
+ NOS = if(any(modelno==c(3,5))) M/2 else M
+ df1.nl = procVec(control$df1.nl, yn= yn , Def=control$DF1)
+ spar1 = procVec(control$spar1, yn= yn , Def= control$SPAR1)
+ df2.nl = procVec(control$df2.nl, yn= yn , Def=control$DF2)
+ spar2 = procVec(control$spar2, yn= yn , Def= control$SPAR2)
+ if(any(c(length(spar1),length(spar2),length(df1.nl),length(df2.nl)) != NOS))
+ stop("wrong length in at least one of df1.nl, df2.nl, spar1, spar2")
+
+ cmatrix = matrix(cmatrix, p2, Rank) # crow1C() needs a matrix as input
+ cmatrix = crow1C(cmatrix, crow=control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ cmatrix = cmatrix %*% temp7
+ cmatrix = crow1C(cmatrix, crow=control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+
+
+ dim(numat) = c(n, Rank)
+ mynames5 = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ nu1mat = cbind("(Intercept)"=1, lv=numat)
+ dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)", mynames5))
+
+ temp.smooth.frame = vector("list", p1+Rank) # A temporary makeshift frame
+ names(temp.smooth.frame) = c(names(control$colx1.index), mynames5)
+ temp.smooth.frame[[1]] = rep(1, len=n) # Ideally should pass in x1mat
+ for(uu in 1:(p1+Rank)) {
+ temp.smooth.frame[[uu]] = nu1mat[,uu]
+ }
+ temp.smooth.frame = data.frame(temp.smooth.frame)
+ for(uu in 1:Rank) {
+ attr(temp.smooth.frame[,uu+p1], "spar") = 0 # this value unused
+ attr(temp.smooth.frame[,uu+p1], "df") = 4 # this value unused
+ }
+
+ pstar. = p1star. + p2star. # Mdot + Rank
+ nstar = if(Nice21) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ lenbeta = pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
+
+ inited = if(is.R()) {
+ if(exists(".VGAM.CAO.etamat", envir=VGAMenv)) 1 else 0
+ } else 0
+ usethiseta = if(inited==1)
+ getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else t(etamat)
+
+ if(any(is.na(usethiseta))) {
+ usethiseta = t(etamat) # So that dim(usethiseta)==c(M,n)
+ rmfromVGAMenv("etamat", prefix=".VGAM.CAO.")
+ }
+
+ usethisbeta = if(inited==2)
+ getfromVGAMenv("beta", prefix = ".VGAM.CAO.") else double(lenbeta)
+ othint[5] = inited # Refine initialization within FORTRAN
+ pstar = NOS * pstar.
+ bnumat=if(Nice21) matrix(0,nstar,pstar.) else stop("code not written here")
+
+ M. = MSratio = M / NOS # 1 or 2 usually
+ which = p1 + (1:Rank) # These columns are smoothed
+ nwhich = names(which) = mynames5
+
+ origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
+ which=which,p1=p1) # For 1 species
+ ncolBlist. <- unlist(lapply(Blist. , ncol))
+ smooth.frame = s.vam(x=nu1mat, z=NULL, wz=NULL, s=NULL,
+ which=which,
+ smooth.frame=temp.smooth.frame,
+ bf.maxit=control$bf.maxit,
+ bf.epsilon=control$bf.epsilon,
+ trace=FALSE, se.fit=control$se.fit,
+ xbig.save=bnumat, Blist=Blist. ,
+ ncolBlist=ncolBlist. ,
+ M= M. , qbig=NULL, U=NULL, # NULL implies not needed
+ backchat=FALSE,
+ all.knots=control$all.knots, nk=NULL,
+ sf.only=TRUE)
+
+ ldk <- 4 * max(ncolBlist.[nwhich]) # was M; # Prior to 11/7/02
+ ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
+
+
+ dimw. = M. # Smoothing one spp. at a time
+ dimu. = M.
+ wz. = matrix(0, n, dimw. )
+ U. = matrix(0, dimu. , n)
+ if(names(Blist.)[1] != "(Intercept)") stop("something wrong here")
+ Blist.[[1]] <- NULL
+
+ trivc = rep(2 - M. , len=queue) # All of queue smooths are basic smooths
+ ncbvec <- ncolBlist.[nwhich]
+ ncolb <- max(ncbvec)
+ pmax.mwk <- rep( dimw. , length(trivc))
+ pmax.mwk <- pmax(ncbvec*(ncbvec+1)/2, dimw. )
+ size.twk <- max((4+4*smooth.frame$nef)*ncbvec + dimu. * smooth.frame$nef)
+ size.twk <- max(size.twk, M*smooth.frame$n)
+
+ qbig. = NOS * qbig # == NOS * Rank; holds all the smooths
+ if(!all.equal(as.vector(ncbvec), rep(1, len=queue)))
+ stop("ncbvec not right---should be a queue-vector of ones")
+ pbig = pstar. #
+ backchat = FALSE
+
+
+ npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat), q=Rank, # q=length(which),
+ se.fit=control$se.fit, 0,
+ control$bf.maxit, qrank=0, M= M. , nbig=nstar, pbig=pbig,
+ qbig=qbig, dimw= dimw. , dimu= dimu. , ier=0, ldk=ldk)
+
+
+
+
+
+ if(Rank == 2) {
+ spardf = (c(spar1,1+df1.nl,spar2,1+df2.nl))[interleave.VGAM(4*NOS,M=2)]
+ } else {
+ spardf = c(spar1, 1.0+df1.nl)
+ }
+
+ ans1 <- dotFortran(name = "vcao6f",
+ numat=as.double(numat),
+ ymat=as.double(ymat), wvec=as.double(wvec),
+ etamat=as.double(usethiseta),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), # bnumat=as.double(bnumat),
+ qr=double(nstar*pstar.), qraux=double(pstar.), qpivot=integer(pstar.),
+ n=as.integer(n), M=as.integer(M), NOS=as.integer(NOS),
+ nstar=as.integer(nstar), dimu=as.integer( M ), # for U, not U.
+ errcode=integer(1), othint=as.integer(othint),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(Nice21) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(othdbl),
+ npetc = as.integer(npetc), M. = as.integer( M. ),
+ spardf = as.double(spardf),
+ match=as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
+ which=as.integer(which),
+ etal = double( M. * n ),
+ smomat = as.double(matrix(0, n, qbig. )),
+ s0 = double((2* M. )*(2* M. )*2),
+ U. = as.double( U. ), etapmat = as.double( U. ),
+ nu1mat=as.double(nu1mat),
+ blist=as.double(unlist( Blist. )), as.integer(ncbvec),
+ smap=as.integer(1:(Rank+1)), #
+ rcind = integer( M. *( M. +1)), trivc = as.integer(trivc),
+ work1 = double(3*qbig + (9+2*4+max(smooth.frame$nknots))*
+ max(smooth.frame$nknots)),
+ wk2 = double(n* M. *3),
+ wwkmm = double( M. * M. *16 + M. * pbig),
+ work3 = double(max(max(2 * smooth.frame$nef * ncbvec^2),
+ max(smooth.frame$nknots * ncbvec * (4*ncbvec+1)))),
+ sgdub = double(max(smooth.frame$nknots) * max(4,ncolb)),
+ bmb = double( M. * M. ),
+ lev = double(NOS * max(smooth.frame$nef * ncbvec)),
+ mwk = double(max(smooth.frame$nef * (1 + 2* M. + pmax.mwk)) ),
+ ttwk = double(size.twk),
+ bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
+ knots = as.double(unlist(smooth.frame$knots)),
+ bindex = as.integer(smooth.frame$bindex),
+ nknots = as.integer(smooth.frame$nknots),
+ itwk = integer(2 * M. ),
+ kindex = as.integer(smooth.frame$kindex))
+if(exists("flush.console")) flush.console()
+
+
+ if(ans1$errcode == 0) {
+ assign2VGAMenv(c("etamat", "beta"), ans1, prefix=".VGAM.CAO.")
+ if(is.R()) {
+ assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAMenv)
+ } else {
+ .VGAM.CAO.cmatrix <<- matrix(cmatrix,p2,Rank) # matrix reqd for R=2
+ }
+
+ } else {
+ cat("warning in callcaof: error code =", ans1$errcode, "\n")
+ cat("warning in callcaof: npetc[14] =", ans1$npetc[14], "\n")
+ if(exists("flush.console")) flush.console()
+ rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.CAO.")
+ }
+
+ returnans = if(alldump) {
+ bindex = ans1$bindex
+ ncolBlist = ncbvec
+ Bspline2 <- vector("list", NOS)
+ names(Bspline2) <- dimnames(ymat)[[2]]
+ Bspline <- vector("list", length(nwhich))
+ names(Bspline) <- nwhich
+ ind9 = 0 # moving index
+ for(sppno in 1:NOS) {
+ for(ii in 1:length(nwhich)) {
+ ind7 = (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
+ ans = ans1$bcoeff[ind9+ind7]
+ ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
+ Bspline[[ii]] = new("vsmooth.spline.fit",
+ "Bcoefficients" = ans,
+ "xmax" = smooth.frame$xmax[ii],
+ "xmin" = smooth.frame$xmin[ii],
+ "knots" = as.vector(smooth.frame$knots[[ii]]))
+ }
+ ind9 = ind9 + smooth.frame$bindex[length(nwhich)+1]-1
+ Bspline2[[sppno]] = Bspline
+ }
+
+ qrank = npetc[7] # Assume all species have the same qrank value
+ dim(ans1$etamat) = c(M,n) # was c(n,M) prior to 22/8/06
+ if(Rank == 2) {
+ spardf = array(ans1$spardf, c(Rank,NOS,2))
+ df1.nl = spardf[1,,2] - 1
+ df2.nl = spardf[2,,2] - 1
+ spar1 = spardf[1,,1]
+ spar2 = spardf[2,,1]
+ } else {
+ df1.nl = ans1$spardf[ NOS+(1:NOS)] - 1
+ spar1 = ans1$spardf[1:NOS]
+ }
+ list(deviance=ans1$deviance,
+ bcoefficients = ans1$bcoefficients,
+ bindex = ans1$bindex,
+ Bspline = Bspline2,
+ Cmat = matrix(cmatrix, p2, Rank, dimnames=list(
+ names(control$colx2.index), mynames5)),
+ coefficients = ans1$beta,
+ df1.nl = df1.nl,
+ df2.nl = if(Rank == 2) df2.nl else NULL,
+ df.residual = n*M - qrank - sum(ans1$df - 1), # zz ??
+ fitted = ans1$fv, # NOS x n
+ kindex = ans1$kindex,
+ predictors = matrix(ans1$etamat, n, M, byrow=TRUE),
+ wresiduals = ans1$z - t(ans1$etamat), # n x M
+ spar1=spar1,
+ spar2=if(Rank == 2) spar2 else NULL)
+ } else
+ ans1$deviance
+ if(exists("flush.console")) flush.console()
+ returnans
+}
+
+
+
+calldcaof = function(cmatrix,
+ etamat, xmat, ymat, wvec, modelno,
+ Control, Nice21=TRUE,
+ p1star. = if(any(modelno==c(3,5))) 2 else 1, p2star. =Rank,
+ n, M,
+ othint, othdbl,
+ alldump=FALSE) {
+ if(alldump) stop("zz really used?")
+if(exists("flush.console")) flush.console()
+
+ if(!Nice21) stop("Nice21 must be TRUE")
+ control = Control
+ Rank = control$Rank
+ p2 = length(control$colx2.index)
+ yn = dimnames(ymat)[[2]]
+ if(!length( yn )) yn = paste("Y", 1:ncol(ymat), sep="")
+
+
+ cmatrix = scale(cmatrix)
+
+ xmat2 <- xmat[,control$colx2.index,drop=FALSE] #ccc
+ numat <- xmat2 %*% matrix(cmatrix, p2, Rank)
+ dim(numat) <- c(nrow(xmat), Rank)
+ temp.smooth.frame = vector("list", 1+Rank) # A temporary makeshift frame
+ mynames5 = if(Rank==1) "lv" else paste("lv",1:Rank,sep="")
+ names(temp.smooth.frame) = c("(Intercept)", mynames5)
+ temp.smooth.frame[[1]] = rep(1, len=n)
+ for(uu in 1:Rank) {
+ temp.smooth.frame[[uu+1]] = numat[,uu]
+ }
+ temp.smooth.frame = data.frame(temp.smooth.frame)
+ for(uu in 1:Rank) {
+ attr(temp.smooth.frame[,uu+1], "spar") = 0 # any old value
+ attr(temp.smooth.frame[,uu+1], "df") = 4 # any old value
+ }
+ pstar. = p1star. + p2star.
+ nstar = if(Nice21) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+ lenbeta = pstar. * ifelse(Nice21, NOS, 1)
+
+ if(TRUE) {
+ inited = if(is.R()) {
+ if(exists(".VGAM.CAO.etamat", envir = VGAMenv)) 1 else 0
+ } else 0
+ usethiseta = if(inited==1) {if(is.R()) get(".VGAM.CAO.etamat",
+ envir = VGAMenv) else .VGAM.CAO.etamat} else t(etamat)
+ }
+ usethisbeta = if(inited==2) {if(is.R()) get(".VGAM.CAO.beta",
+ envir = VGAMenv) else .VGAM.CAO.beta} else double(lenbeta)
+
+
+
+
+
+ pstar = NOS * pstar.
+ bnumat = if(Nice21) matrix(0,nstar,pstar) else stop("need Nice21")
+
+ M. = MSratio = M / NOS # 1 or 2 usually
+ which = p1 + (1:Rank) # The first 1 is the intercept term
+ nwhich = names(which) = mynames5
+
+ origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
+ which=which,p1=p1) # For 1 species
+ ncolBlist. <- unlist(lapply(Blist. , ncol))
+ nu1mat = cbind("(Intercept)"=1, lv=numat)
+ dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)","lv"))
+
+ smooth.frame = s.vam(x=nu1mat, z=NULL, wz=NULL, s=NULL,
+ which=which,
+ smooth.frame=temp.smooth.frame,
+ bf.maxit=control$bf.maxit,
+ bf.epsilon=control$bf.epsilon,
+ trace=FALSE, se.fit=control$se.fit,
+ xbig.save=bnumat, Blist=Blist.,
+ ncolBlist=ncolBlist. ,
+ M= M. , qbig=NULL, U=U, # NULL value implies not needed
+ backchat=FALSE,
+ all.knots=control$all.knots, nk=NULL,
+ sf.only=TRUE)
+
+ ldk <- 4 * max(ncolBlist.[nwhich]) # was M; # Prior to 11/7/02
+ ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
+
+
+
+ wz. = matrix(0, n, M. ) # not sure
+ U. = matrix(0, M. , n)
+ dimw. = if(is.matrix( wz. )) ncol( wz. ) else 1
+ dimu. <- if(is.matrix( U. )) nrow( U. ) else 1
+ Blist.[[1]] <- NULL
+ trivc = rep(2 - M. , len=queue) # All of queue smooths are basic smooths
+ ncbvec <- ncolBlist.[nwhich]
+ ncolb <- max(ncbvec)
+ pmax.mwk <- rep( dimw. , length(trivc))
+ pmax.mwk <- pmax(ncbvec*(ncbvec+1)/2, dimw. )
+ size.twk <- max((4+4*smooth.frame$nef)*ncbvec + dimu. *smooth.frame$nef)
+ size.twk <- max(size.twk, M*smooth.frame$n)
+
+
+ qbig. = NOS * qbig # == NOS * Rank
+ pbig = pstar. # Not sure
+ if(FALSE) {
+ df1.nl = rep(control$df1.nl, len=NOS) # This is used
+ spar1 = rep(control$spar1, len=NOS) # This is used
+ } else {
+ # This is used
+ df1.nl = procVec(control$df1.nl, yn= yn , Def=control$DF1)
+ spar1 = procVec(control$spar1, yn= yn , Def= control$SPAR1)
+ }
+ backchat = FALSE
+
+
+ npetc = c(n=n, p=1+Rank, length(which), se.fit=control$se.fit, 0,
+ maxitl=control$maxitl, qrank=0, M= M. , n.M = n* M. ,
+ pbig=sum( ncolBlist.),
+ qbig=qbig, dimw= dimw. , dimu= dimu. , ier=0, ldk=ldk)
+
+if(exists("flush.console")) flush.console()
+
+ ans1 <-
+ dotFortran(name = if(Nice21) "vdcaof" else stop("need Nice21"),
+ numat=as.double(numat),
+ as.double(ymat), as.double(wvec),
+ etamat=as.double(usethiseta),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), # bnumat=as.double(bnumat),
+ qr=double(nstar*pstar.), qraux=double(pstar.), qpivot=integer(pstar.),
+ as.integer(n), as.integer(M), NOS=as.integer(NOS),
+ as.integer(nstar), dimu=as.integer(M),
+ errcode=integer(1), othint=as.integer(othint),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(Nice21) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(othdbl),
+ xmat2=as.double(xmat2), onumat=as.double(numat), cmat=as.double(cmatrix),
+ p2=as.integer(p2), deriv=double(p2*Rank),
+ betasave=double(lenbeta),
+ npetc = as.integer(npetc), M. = as.integer( M. ),
+ spardf = as.double(c(spar1, 1.0+df1.nl, spar2, 1.0+df2.nl)),
+ match=as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
+ as.integer(which),
+ etal = double( M. * n ),
+ smomat = as.double(matrix(0, n, qbig. )),
+ s0 = double((2* M. )*(2* M. )*2),
+ U. = as.double( U. ), etapmat = as.double( U. ),
+ nu1mat=as.double(nu1mat),
+ as.double(unlist( Blist. )),
+ as.integer(ncbvec), smap=as.integer(1:(Rank+1)),
+ rcind = integer( M. *( M. +1)), trivc = as.integer(trivc),
+ work1 = double(3*qbig + (9+2*4+max(smooth.frame$nknots))*
+ max(smooth.frame$nknots)),
+ wk2 = double(n* M. *3),
+ wwkmm = double( M. * M. *16 + M. *pbig),
+ work3 = double(max(max(2 * smooth.frame$nef * ncbvec^2),
+ max(smooth.frame$nknots * ncbvec * (4*ncbvec+1)))),
+ sgdub = double(max(smooth.frame$nknots) * max(4,ncolb)),
+ bmb = double( M. * M. ),
+ lev = double(NOS * max(smooth.frame$nef * ncbvec)),
+ mwk = double(max(smooth.frame$nef * (1 + 2* M. + pmax.mwk)) ),
+ ttwk = double(size.twk),
+ bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
+ knots = as.double(unlist(smooth.frame$knots)),
+ bindex = as.integer(smooth.frame$bindex),
+ nknots = as.integer(smooth.frame$nknots),
+ itwk = integer(2* M. ),
+ kindex = as.integer(smooth.frame$kindex))
+if(exists("flush.console")) flush.console()
+
+ if(is.R()) {
+ assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
+ assign(".VGAM.CAO.z",ans1$z,envir=VGAMenv)# z; minus any offset
+ assign(".VGAM.CAO.U", ans1$U, envir=VGAMenv) # U
+ } else {
+ .VGAM.CAO.etamat <<- ans1$etamat
+ .VGAM.CAO.z <<- ans1$z
+ .VGAM.CAO.U <<- ans1$U
+ }
+ if(ans1$errcode == 0) {
+ } else {
+ cat("warning in calldcaof: error code =", ans1$errcode, "\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+
+ returnans = if(alldump) {
+ bindex = ans1$bindex
+ ncolBlist = ncbvec
+ Bspline2 <- vector("list", NOS)
+ names(Bspline2) <- dimnames(ymat)[[2]]
+ Bspline <- vector("list", length(nwhich))
+ names(Bspline) <- nwhich
+ ind9 = 0 # moving index
+ for(j in 1:NOS) {
+ for(i in 1:length(nwhich)) {
+ ind9 = ind9[length(ind9)] + (bindex[i]):(bindex[i+1]-1)
+ ans = ans1$bcoeff[ind9]
+ ans = matrix(ans, ncol=ncolBlist[nwhich[i]])
+ Bspline[[i]] = new("vsmooth.spline.fit",
+ "Bcoefficients" = ans,
+ "xmax" = smooth.frame$xmax[i],
+ "xmin" = smooth.frame$xmin[i],
+ "knots" = as.vector(smooth.frame$knots[[i]]))
+ }
+ Bspline2[[j]] = Bspline
+ }
+
+ qrank = npetc[7] # Assume all species have the same qrank value
+ dim(ans1$etamat) = c(M,n) # bug: was c(n,M) prior to 22/8/06
+ list(deviance=ans1$deviance,
+ bcoefficients = ans1$bcoefficients,
+ bindex = ans1$bindex,
+ Bspline = Bspline2,
+ Cmat=matrix(cmatrix, p2, Rank, dimnames=list(
+ names(control$colx2.index), mynames5)),
+ coefficients=ans1$beta,
+ df1.nl=ans1$spardf[ NOS+(1:NOS)] - 1,
+ df2.nl=ans1$spardf[3*NOS+(1:NOS)] - 1,
+ df.residual = n*M - qrank - sum(ans1$df - 1), # zz ??
+ fitted=ans1$fv,
+ kindex = ans1$kindex,
+ predictors=matrix(ans1$etamat, n, M, byrow=TRUE),
+ wresiduals = ans1$z - t(ans1$etamat), # n x M
+ spar1=ans1$spardf[1:NOS],
+ spar2=ans1$spardf[2*NOS+(1:NOS)])
+ } else
+ ans1$deriv
+ if(exists("flush.console")) flush.console()
+ returnans
+}
+
+
+
+
+
+Coef.cao = function(object,
+ epsOptimum = 0.00001, # determines how accurately Optimum is estimated
+ gridlen = 40, # Number of points on the grid (one level at a time)
+ maxgriditer = 10, # Maximum number of iterations allowed for grid search
+ smallno = 0.05,
+ ...) {
+
+ if(!is.Numeric(epsOptimum, posit=TRUE, allow=1))
+ stop("bad input for argument 'epsOptimum'")
+ if(!is.Numeric(gridlen, posit=TRUE, integer=TRUE) || gridlen < 5)
+ stop("bad input for argument 'gridlen'")
+ if(!is.Numeric(maxgriditer, posit=TRUE, allow=1, int=TRUE) || maxgriditer<3)
+ stop("bad input for argument 'maxgriditer'")
+ if(!is.logical(ConstrainedO <- object at control$ConstrainedO))
+ stop("can't determine whether the model is constrained or not")
+ if(!is.Numeric(smallno, posit=TRUE, allow=1) ||
+ smallno > 0.5 || smallno < 0.0001)
+ stop("bad input for argument 'smallno'")
+ ocontrol = object at control
+ if((Rank <- ocontrol$Rank) > 2) stop("Rank must be 1 or 2")
+ gridlen = rep(gridlen, length=Rank)
+ M = if(any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ NOS = if(length(object at y)) ncol(object at y) else M
+ MSratio = M / NOS # 1 or 2; First value is g(mean) = quadratic form in lv
+ nice21 = (length(ocontrol$colx1.index) == 1) &&
+ (names(ocontrol$colx1.index) == "(Intercept)")
+ if(!nice21) stop("Can only handle Norrr = ~ 1")
+
+ p1 = length(ocontrol$colx1.index)
+ p2 = length(ocontrol$colx2.index)
+ modelno = object at control$modelno # 1,2,3,... or 0
+ ynames = object at misc$ynames
+ if(!length(ynames)) ynames = object at misc$predictors.names
+ if(!length(ynames)) ynames = object at misc$ynames
+ if(!length(ynames)) ynames = paste("Y", 1:NOS, sep="")
+ lp.names = object at misc$predictors.names
+ if(!length(lp.names)) lp.names = NULL
+
+ lv.names = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ Cmat = object at extra$Cmat # p2 x Rank (provided maxitl > 1)
+ if(ConstrainedO)
+ dimnames(Cmat) = list(names(ocontrol$colx2.index), lv.names)
+ lv.mat = if(ConstrainedO) {
+ object at x[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ } else {
+ object at lv
+ }
+
+ optimum = matrix(as.numeric(NA), Rank, NOS, dimnames=list(lv.names, ynames))
+ extents = apply(lv.mat, 2, range) # 2 by R
+
+ maximum = rep(as.numeric(NA), len=NOS)
+
+ whichSpecies = 1:NOS # Do it for all species
+ if(Rank == 1) {
+ gridd = cbind(seq(extents[1,1], extents[2,1], len=gridlen))
+ } else {
+ gridd = expand.grid(seq(extents[1,1], extents[2,1], len=gridlen[1]),
+ seq(extents[1,2], extents[2,2], len=gridlen[2]))
+ eta2matrix = matrix(0, NOS, 1)
+ }
+ gridd.orig = gridd
+ # if(Rank == 2) then this is for initial values
+ for(sppno in 1:length(whichSpecies)) {
+ gridd = gridd.orig
+ gridres1 = gridd[2,1] - gridd[1,1]
+ gridres2 = if(Rank==2) gridd[2,2] - gridd[1,2] else 0
+ griditer = 1
+
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ if(is.na(indexSpecies))
+ stop("mismatch found in \"whichSpecies\"")
+
+ while(griditer == 1 ||
+ ((griditer <= maxgriditer) &&
+ ((gridres1 > epsOptimum) || (gridres2 > epsOptimum)))) {
+ temp = predictcao(object, grid=gridd, sppno=thisSpecies,
+ Rank=Rank, deriv=0, MSratio=MSratio)
+ yvals = temp$yvals # gridlen-vector
+ xvals = temp$xvals # gridlen x Rank; gridd
+ if(length(temp$eta2)) eta2matrix[sppno,1] = temp$eta2
+
+ nnn = length(yvals)
+ index = (1:nnn)[yvals==max(yvals)]
+ if(length(index)!=1) warning("could not find a single maximum")
+ if(Rank == 2) {
+ initvalue = rep(xvals[index,], length=Rank) # for optim()
+ # Make sure initvalue is in the interior
+ if(abs(initvalue[1] - extents[1,1]) < smallno)
+ initvalue[1] = extents[1,1] + smallno
+ if(abs(initvalue[1] - extents[2,1]) < smallno)
+ initvalue[1] = extents[2,1] - smallno
+ if(abs(initvalue[2] - extents[1,2]) < smallno)
+ initvalue[2] = extents[1,2] + smallno
+ if(abs(initvalue[2] - extents[2,2]) < smallno)
+ initvalue[2] = extents[2,2] - smallno
+ break
+ }
+ if(index == 1 || index == nnn) {
+ maximum[sppno] = optimum[1,sppno] = NA
+ gridres1 = epsOptimum + 1 # equivalent to a break
+ break # just in case
+ } else {
+ maximum[sppno] = yvals[index] # on the eta scale
+ optimum[1,sppno] = xvals[index,1]
+ gridd[,1] = seq(
+ max(extents[1,1], optimum[1,sppno]-gridres1),
+ min(extents[2,1], optimum[1,sppno]+gridres1),
+ len=gridlen)
+ gridres1 = gridd[2,1] - gridd[1,1]
+ griditer = griditer + 1
+ }
+ } # of while
+
+ if(Rank == 2) {
+ # Rank = 2, so use optim(). The above was to get initial values.
+ myfun = function(x, object, sppno, Rank=1, deriv=0, MSratio=1) {
+ # x is a 2-vector
+ x = matrix(x, 1, length(x))
+ temp = predictcao(object, grid=x, sppno=sppno,
+ Rank=Rank, deriv=deriv, MSratio=MSratio)
+ temp$yval
+ }
+ answer = optim(initvalue, myfun, gr=NULL, method="L-BFGS-B",
+ lower=extents[1,], upper=extents[2,],
+ control=list(fnscale = -1), # maximize!
+ object=object, sppno=sppno, Rank=Rank,
+ deriv=0, MSratio=MSratio)
+ # Check to see if the soln is at the boundary. If not, assign it.
+ for(rindex in 1:Rank)
+ if(abs(answer$par[rindex] - extents[1,rindex]) > smallno &&
+ abs(answer$par[rindex] - extents[2,rindex]) > smallno) {
+ optimum[rindex,sppno] = answer$par[rindex]
+ maximum[sppno] = answer$value
+ }
+ } # end of Rank=2
+ } # end of sppno
+ myetamat = rbind(maximum)
+ if(MSratio == 2) myetamat = kronecker(myetamat, matrix(1:0, 1, 2))
+ maximum = object at family@inverse(eta=myetamat, extra=object at extra)
+ maximum = c(maximum) # Convert from matrix to vector
+ names(maximum) = ynames
+
+ ans = new(Class="Coef.cao",
+ Bspline = object at Bspline,
+ Constrained=ConstrainedO,
+ df1.nl = object at extra$df1.nl,
+ lv = lv.mat,
+ lvOrder = lv.mat,
+ Maximum = maximum,
+ M = M,
+ NOS = NOS,
+ Optimum=optimum,
+ OptimumOrder=optimum,
+ Rank = Rank,
+ spar1 = object at extra$spar1)
+ if(ConstrainedO) {ans at C = Cmat} else {Cmat = NULL}
+ if(Rank == 2) {
+ dimnames(eta2matrix) = list(
+ object at misc$predictors.names[c(FALSE,TRUE)], " ")
+ ans at eta2 = eta2matrix
+ ans at df2.nl = object at extra$df2.nl
+ ans at spar2 = object at extra$spar2
+ }
+
+ for(rindex in 1:Rank) {
+ ans at OptimumOrder[rindex,] = order(ans at Optimum[rindex,])
+ ans at lvOrder[,rindex] = order(ans at lv[,rindex])
+ }
+
+ if(length(object at misc$estimated.dispersion) &&
+ object at misc$estimated.dispersion) {
+ p = length(object at coefficients)
+ n = object at misc$n
+ M = object at misc$M
+ NOS = if(length(object at y)) ncol(object at y) else M
+ pstar = p + length(Cmat) # Adjustment
+ adjusted.dispersion = object at misc$dispersion * (n*M - p) /
+ (n*M - pstar)
+ ans at dispersion = adjusted.dispersion
+ }
+ if(MSratio == 2) {
+ lcoef = object at coefficients
+ temp = lcoef[((1:NOS)-1)*(2+Rank)+2]
+ names(temp) = object at misc$predictors.names[2*(1:NOS)]
+ ans at dispersion = temp
+ }
+ dimnames(ans at Optimum) = list(lv.names, ynames)
+ ans
+}
+
+
+setClass("Coef.cao", representation(
+ "Bspline" = "list",
+ "C" = "matrix",
+ "Constrained" = "logical",
+ "df1.nl" = "numeric",
+ "df2.nl" = "numeric",
+ "dispersion" = "numeric",
+ "eta2" = "matrix",
+ "lv" = "matrix",
+ "lvOrder" = "matrix",
+ "M" = "numeric",
+ "Maximum" = "numeric",
+ "NOS" = "numeric",
+ "Optimum" = "matrix",
+ "OptimumOrder" = "matrix",
+ "Rank" = "numeric",
+ "spar1" = "numeric",
+ "spar2" = "numeric"))
+
+
+printCoef.cao = function(object, digits = max(2, options()$digits-2), ...) {
+ Rank = object at Rank
+ NOS = object at NOS
+ M = object at M
+
+ Maximum = if(length(object at Maximum)) cbind(Maximum=object at Maximum) else NULL
+ optmat = cbind(t(object at Optimum))
+ dimnames(optmat) = list(dimnames(optmat)[[1]],
+ if(Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep=".")
+ else "Optimum")
+
+ if( object at Constrained ) {
+ cat("\nC matrix (constrained/canonical coefficients)\n")
+ print(object at C, digits=digits, ...)
+ }
+ cat("\nOptima and maxima\n")
+ print(cbind(Optimum=optmat,
+ Maximum), digits = max(1, digits-1))
+ cat("\nNonlinear degrees of freedom\n")
+ if(Rank == 1) {
+ print(cbind(df1.nl = object at df1.nl), digits=max(2, digits-1), ...)
+ } else {
+ print(cbind(df1.nl = object at df1.nl,
+ df2.nl = object at df2.nl), digits=max(2, digits-1), ...)
+ }
+ invisible(object)
+}
+
+
+
+
+
+ setMethod("show", "Coef.cao", function(object)
+ printCoef.cao(object))
+ setMethod("print", "Coef.cao", function(x, ...)
+ printCoef.cao(object=x, ...))
+
+
+setMethod("coef", "cao", function(object, ...) Coef.cao(object, ...))
+setMethod("coefficients", "cao", function(object, ...) Coef.cao(object, ...))
+setMethod("Coef", "cao", function(object, ...) Coef.cao(object, ...))
+
+
+
+
+lvplot.cao = function(object,
+ add= FALSE, plot.it= TRUE, rugplot = TRUE, y = FALSE,
+ type=c("fitted.values", "predictors"),
+ xlab=paste("Latent Variable", if(Rank==1) "" else " 1", sep=""),
+ ylab=if(Rank==1) switch(type, predictors="Predictors",
+ fitted.values="Fitted values") else "Latent Variable 2",
+ pcex=par()$cex, pcol=par()$col, pch=par()$pch,
+ llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
+ label.arg= FALSE, adj.arg=-0.5,
+ sites= FALSE, spch=NULL, scol=par()$col, scex=par()$cex,
+ sfont=par()$font,
+ whichSpecies = NULL,
+ check.ok = TRUE, ...)
+{
+ type <- match.arg(type, c("fitted.values", "predictors"))[1]
+
+ if((Rank <- object at control$Rank) > 2)
+ stop("can only handle rank 1 or 2 models")
+ M = if(any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ NOS = ncol(object at y)
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ n = object at misc$n
+ colx2.index = object at control$colx2.index
+ cx1i = object at control$colx1.index
+ if(!length(whichSpecies)) whichSpecies = 1:NOS
+ if(check.ok)
+ if(!(length(cx1i)==1 && names(cx1i)=="(Intercept)"))
+ stop("latent variable plots allowable only for Norrr = ~ 1 models")
+
+ Coeflist = Coef(object)
+ Cmat = Coeflist at C
+ lvmat = Coeflist at lv # n x Rank
+
+ if(!plot.it) return(lvmat)
+
+ r.curves = slot(object, type) # n times (M or S) (\boldeta or \boldmu)
+ if(MSratio != 1 && type == "predictors")
+ stop("can only plot the predictors if M = S")
+ MorS = ncol(r.curves) # Actually, here, the value is S always.
+ if(!add) {
+ if(Rank==1) {
+ matplot(lvmat,
+ if( y && type=="fitted.values")
+ object at y[,whichSpecies,drop=FALSE] else
+ r.curves[,whichSpecies,drop=FALSE],
+ type="n", xlab=xlab, ylab=ylab, ...)
+ } else { # Rank==2
+ matplot(c(Coeflist at Optimum[1,whichSpecies], lvmat[,1]),
+ c(Coeflist at Optimum[2,whichSpecies], lvmat[,2]),
+ type="n", xlab=xlab, ylab=ylab, ...)
+ }
+ }
+
+
+ pch <- rep(pch, leng=length(whichSpecies))
+ pcol <- rep(pcol, leng=length(whichSpecies))
+ pcex <- rep(pcex, leng=length(whichSpecies))
+ llty <- rep(llty, leng=length(whichSpecies))
+ lcol <- rep(lcol, leng=length(whichSpecies))
+ llwd <- rep(llwd, leng=length(whichSpecies))
+ adj.arg <- rep(adj.arg, leng=length(whichSpecies))
+
+ sppnames = if(type=="predictors") dimnames(r.curves)[[2]] else
+ dimnames(object at y)[[2]]
+ if(Rank==1) {
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ if(is.na(indexSpecies))
+ stop("mismatch found in \"whichSpecies\"")
+ xx = lvmat
+ yy = r.curves[,indexSpecies]
+ o = sort.list(xx)
+ xx = xx[ o ]
+ yy = yy[ o ]
+ lines(xx, yy, col=lcol[sppno], lwd=llwd[sppno], lty=llty[sppno])
+ if( y && type=="fitted.values") {
+ ypts = object at y
+ if(ncol(as.matrix(ypts)) == ncol(r.curves))
+ points(xx, ypts[o,sppno], col=pcol[sppno],
+ cex=pcex[sppno], pch=pch[sppno])
+ }
+ }
+ if(rugplot) rug(xx)
+ } else {
+ if(sites) {
+ text(lvmat[,1], lvmat[,2], adj=0.5,
+ labels=if(is.null(spch)) dimnames(lvmat)[[1]] else
+ rep(spch, length=nrow(lvmat)), col=scol, cex=scex, font=sfont)
+ }
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ if(is.na(indexSpecies))
+ stop("mismatch found in \"whichSpecies\"")
+ points(Coeflist at Optimum[1,indexSpecies],
+ Coeflist at Optimum[2,indexSpecies],
+ col=pcol[sppno], cex=pcex[sppno], pch=pch[sppno])
+ }
+ if(label.arg) {
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ text(Coeflist at Optimum[1,indexSpecies],
+ Coeflist at Optimum[2,indexSpecies],
+ labels=(dimnames(Coeflist at Optimum)[[2]])[indexSpecies],
+ adj=adj.arg[sppno], col=pcol[sppno], cex=pcex[sppno])
+ }
+ }
+ }
+ invisible(lvmat)
+}
+
+
+setMethod("lvplot", "cao",
+ function(object, ...) {
+ invisible(lvplot.cao(object, ...))})
+
+
+
+predict.cao <- function (object, newdata=NULL,
+ type = c("link", "response", "terms"),
+ deriv = 0, ...) {
+ type <- match.arg(type, c("link", "response", "terms"))[1]
+ if(type != "link" && deriv != 0)
+ stop("Setting deriv=<positive integer> requires type=\"link\"")
+ na.act = object at na.action
+ object at na.action = list()
+ ocontrol = object at control
+ nice21 = (length(ocontrol$colx1.index) == 1) &&
+ (names(ocontrol$colx1.index) == "(Intercept)")
+ if(!nice21) stop("Can only handle Norrr = ~ 1")
+
+ if(!length(newdata) && type=="response" && length(object at fitted.values)) {
+ if(length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
+ }
+ }
+
+ attrassignlm <- function(object, ...)
+ attrassigndefault(model.matrix(object), object at terms)
+
+ attrassigndefault <- function(mmat, tt) {
+ if (!inherits(tt, "terms"))
+ stop("need terms object")
+ aa <- attr(mmat, "assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll <- attr(tt, "term.labels")
+ if (attr(tt, "intercept") > 0)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ split(order(aa), aaa)
+ }
+
+ if(!length(newdata)) {
+ X <- model.matrixvlm(object, type="lm", ...)
+ offset <- object at offset
+ tt <- terms(object)
+ if(is.R() && !length(object at x))
+ attr(X, "assign") <- attrassignlm(X, tt)
+ } else {
+ if(is.smart(object) && length(object at smart.prediction)) {
+ setup.smart("read", smart.prediction=object at smart.prediction)
+ }
+
+ tt <- terms(object) # 11/8/03; object at terms$terms
+ X <- model.matrix(delete.response(tt), newdata, contrasts =
+ if(length(object at contrasts)) object at contrasts else NULL,
+ xlev = object at xlevels)
+
+ if(is.R() && nice21 && nrow(X)!=nrow(newdata)) {
+ as.save = attr(X, "assign")
+ X = X[rep(1, nrow(newdata)),,drop=FALSE]
+ dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") = as.save # Restored
+ }
+
+ offset <- if (!is.null(off.num<-attr(tt,"offset")))
+ eval(attr(tt,"variables")[[off.num+1]], newdata)
+ else if (!is.null(object at offset))
+ eval(object at call$offset, newdata)
+
+ if(is.smart(object) && length(object at smart.prediction)) {
+ wrapup.smart()
+ }
+
+ if(is.R())
+ attr(X, "assign") <- attrassigndefault(X, tt)
+ }
+
+ # zz 30/8/06; use object at extra$Cmat instead? # Not 100% sure here
+ cancoefs = ccoef(object)
+
+ lvmat = X[,ocontrol$colx2.index,drop=FALSE] %*% cancoefs # n x Rank
+
+ Rank = ocontrol$Rank
+ NOS = ncol(object at y)
+ sppnames = dimnames(object at y)[[2]]
+ modelno = ocontrol$modelno # 1,2,3,5 or 0
+ M = if(any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ if(type == "terms") {
+ terms.mat = matrix(0, nrow(X), Rank*NOS) # 1st R colns for spp.1, etc.
+ interceptvector = rep(0, len=NOS)
+ } else {
+ etamat = matrix(0, nrow(X), M) # Could contain derivatives
+ }
+ ind8 = 1:Rank
+ whichSpecies = 1:NOS # Do it all for all species
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ if(is.na(indexSpecies))
+ stop("mismatch found in \"whichSpecies\"")
+
+ temp345 = predictcao(object, grid=lvmat, sppno=thisSpecies,
+ Rank=Rank, deriv=deriv, MSratio=MSratio,
+ type=ifelse(type=="response", "link", type))
+ if(MSratio == 2) {
+ if(any(type == c("link", "response"))) {
+ etamat[,2*sppno-1] = temp345$yvals
+ etamat[,2*sppno ] = temp345$eta2
+ } else {
+ terms.mat[,ind8] = temp345
+ interceptvector[sppno] = attr(temp345, "constant")
+ }
+ } else {
+ if(any(type == c("link", "response"))) {
+ etamat[,sppno] = temp345$yvals
+ } else {
+ terms.mat[,ind8] = temp345
+ interceptvector[sppno] = attr(temp345, "constant")
+ }
+ }
+ ind8 = ind8 + Rank
+ }
+
+ if(length(offset) && any(offset != 0))
+ etamat <- etamat + offset
+
+ if(type == "link") {
+ dimnames(etamat) = list(dimnames(X)[[1]], if(deriv == 0)
+ object at misc$predictors.names else NULL)
+ return(etamat)
+ } else if(type == "response") {
+ fv <- object at family@inverse(etamat, extra=object at extra)
+ dimnames(fv) = list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ return(fv)
+ } else {
+ attr(terms.mat, "constant") = interceptvector
+ terms.mat
+ }
+}
+
+
+
+setMethod("predict", "cao", function(object, ...)
+ predict.cao(object, ...))
+
+
+predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
+ type="link") {
+ if(type != "link" && type != "terms")
+ stop("'link' must be \"link\" or \"terms\"")
+ if(ncol(grid <- as.matrix(grid)) != Rank)
+ stop(paste("'grid' must have", Rank, "columns"))
+ if(!is.Numeric(1+deriv, allow=1, positive=TRUE, integ=TRUE))
+ stop("'deriv' must be a non-negative integer")
+ if(type == "terms" && deriv != 0)
+ stop("'deriv' must be 0 when type=\"terms\"")
+
+ temp.b = object at Bspline[[sppno]]
+ if(type == "terms") {
+ meanlv = apply(grid, 2, mean)
+ answer = matrix(0, nrow(grid), Rank)
+ } else {
+ nlfunvalues = 0
+ }
+ for(rindex in 1:Rank) {
+ temp = temp.b[[rindex]] # temp is of class "vsmooth.spline.fit"
+ nlpart = predict(temp, grid[,rindex], deriv=deriv)
+ yvals = nlpart$y
+ if(type == "terms") {
+ answer[,rindex] = yvals
+ } else {
+ nlfunvalues = nlfunvalues + yvals
+ }
+ }
+
+ # Get the linear part of the additive predictor (intercept and slopes)
+ lcoef = object at coefficients # linear coeffs; dont use coef() (==Coef)
+ llcoef = lcoef[(1+(sppno-1)*(MSratio+Rank)):(sppno*(MSratio+Rank))]
+ if(type == "terms") {
+ interceptvector = llcoef[1]
+ for(rindex in 1:Rank) {
+ answer[,rindex] = answer[,rindex] +
+ (grid[,rindex] - meanlv[rindex]) * llcoef[MSratio+rindex]
+ interceptvector = interceptvector +
+ meanlv[rindex] * llcoef[MSratio+rindex]
+ }
+ } else {
+ linpar = if(deriv==0) {llcoef[1]+grid %*% llcoef[-(1:MSratio)]} else
+ {if(deriv==1) llcoef[MSratio+rindex] else 0}
+ nlfunvalues = nlfunvalues + linpar # Now complete
+ }
+ if(type == "terms") {
+ attr(answer, "constant") = interceptvector
+ answer
+ } else {
+ list(xvals = grid,
+ yvals = c(nlfunvalues),
+ eta2 = if(MSratio == 2) llcoef[MSratio] else NULL)
+ }
+}
+
+
+
+
+plot.cao = function(x,
+ xlab=if(Rank==1) "Latent Variable" else
+ paste("Latent Variable", 1:Rank),
+ ylab=NULL, residuals.arg=FALSE,
+ pcol=par()$col, pcex=par()$cex, pch=par()$pch,
+ lcol=par()$col, lwd=par()$lwd, lty=par()$lty,
+ add=FALSE,
+ main=NULL,
+ center.cf = Rank > 1,
+ WhichRank = 1:Rank,
+ whichSpecies = NULL, # a numeric or character vector
+ rugplot=TRUE, se.arg=FALSE, deriv=0,
+ scale=0, ylim=NULL,
+ overlay = FALSE, ...)
+{
+ Rank = x at control$Rank
+ if(!is.logical(center.cf) || length(center.cf) != 1)
+ stop("bad input for argument 'center.cf'")
+ if(Rank > 1 && !center.cf)
+ stop("center.cf=TRUE is needed for models with Rank>1")
+ NOS = ncol(x at y)
+ sppnames = dimnames(x at y)[[2]]
+ modelno = x at control$modelno # 1,2,3, or 0
+ M = if(any(slotNames(x) == "predictors") &&
+ is.matrix(x at predictors)) ncol(x at predictors) else x at misc$M
+ if(all((MSratio <- M / NOS) != c(1,2))) stop("bad value for 'MSratio'")
+ pcol = rep(pcol, length=Rank*NOS)
+ pcex = rep(pcex, length=Rank*NOS)
+ pch = rep(pch, length=Rank*NOS)
+ lcol = rep(lcol, length=Rank*NOS)
+ lwd = rep(lwd, length=Rank*NOS)
+ lty = rep(lty, length=Rank*NOS)
+ xlab = rep(xlab, length=Rank)
+ if(!length(whichSpecies)) whichSpecies = 1:NOS
+ if(length(ylab))
+ ylab = rep(ylab, len=length(whichSpecies)) # Too long if overlay
+ if(length(main))
+ main = rep(main, len=length(whichSpecies)) # Too long if overlay
+ lvmat = lv(x)
+ nice21 = length(x at control$colx1.index) == 1 &&
+ names(x at control$colx1.index) == "(Intercept)"
+ if(!nice21)
+ stop("can only handle intercept-only models")
+ counter = 0
+ for(sppno in 1:length(whichSpecies)) {
+ thisSpecies = whichSpecies[sppno]
+ indexSpecies = if(is.character(whichSpecies))
+ match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
+ if(is.na(indexSpecies))
+ stop("mismatch found in \"whichSpecies\"")
+ terms.mat = predictcao(object=x, grid=lvmat, type="terms",
+ sppno=indexSpecies, Rank=Rank,
+ deriv=deriv, MSratio=MSratio)
+ for(rindex in WhichRank) {
+ xvals = lvmat[,rindex]
+ yvals = terms.mat[,rindex]
+ o = sort.list(xvals)
+ xvals = xvals[ o ]
+ yvals = yvals[ o ]
+ if(!center.cf) yvals = yvals + attr(terms.mat, "constant")
+ if(!add)
+ if(sppno==1 || !overlay) {
+ ylim.use = if(length(ylim)) ylim else
+ ylim.scale(range(yvals), scale)
+ matplot(xvals, yvals, type="n",
+ xlab=xlab[rindex],
+ ylab=if(length(ylab)) ylab[sppno] else
+ ifelse(overlay, "Fitted functions", "Fitted function"),
+ main=if(length(main)) main[sppno] else
+ ifelse(overlay, "", sppnames[thisSpecies]),
+ ylim=ylim.use,
+ ...)
+ }
+ if(residuals.arg) {
+ stop("can't handle residuals=TRUE yet")
+ points(x at x, yymat[,i], col=pcol[i], pch=pch[i], cex=pcex[i])
+ }
+ counter = counter + 1
+ lines(xvals, yvals,
+ col=lcol[counter], lwd=lwd[counter], lty=lty[counter])
+ if(rugplot) rug(xvals)
+ }
+ }
+ invisible(x)
+}
+
+
+
+
+setMethod("plot", "cao",
+ function(x, y, ...) {
+ if(!missing(y)) stop("can't process the \"y\" argument")
+ invisible(plot.cao(x, ...))})
+
+
+
+persp.cao = function(x,
+ plot.it=TRUE,
+ xlim=NULL, ylim=NULL, zlim=NULL, # zlim ignored if Rank==1
+ gridlength=if(Rank==1) 301 else c(51,51),
+ whichSpecies = NULL,
+ xlab=if(Rank==1) "Latent Variable" else "Latent Variable 1",
+ ylab=if(Rank==1) "Expected Value" else "Latent Variable 2",
+ zlab="Expected value",
+ labelSpecies = FALSE, # For Rank==1 only
+ stretch = 1.05, # quick and dirty, Rank==1 only
+ main="",
+ ticktype = "detailed",
+ col = if(Rank==1) par()$col else "white",
+ lty=par()$lty,
+ lwd=par()$lwd,
+ rugplot=FALSE,
+ ...) {
+ object = x # don't like x as the primary argument
+ coefobj = Coef(object) # (Rotate=FALSE, IToleranc=TRUE) # zzzz
+ if((Rank <- coefobj at Rank) > 2)
+ stop("object must be a rank-1 or rank-2 model")
+ fvmat = fitted(object)
+ NOS = ncol(fvmat) # Number of species
+ M = if(any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+
+ xlim = if(length(xlim)) xlim else range(coefobj at lv[,1])
+ if(!length(ylim)) {
+ ylim = if(Rank==1) c(0, max(fvmat)*stretch) else range(coefobj at lv[,2])
+ }
+ xlim = rep(xlim, length=2)
+ ylim = rep(ylim, length=2)
+ gridlength = rep(gridlength, length=Rank)
+ lv1 = seq(xlim[1], xlim[2], length=gridlength[1])
+ lv2 = if(Rank == 2) seq(ylim[1], ylim[2], len=gridlength[2]) else NULL
+ lvmat = if(Rank == 2) expand.grid(lv1, lv2) else cbind(lv1)
+
+ sppNames = dimnames(object at y)[[2]]
+ if(!length(whichSpecies)) {
+ whichSpecies = sppNames[1:NOS]
+ whichSpecies.numer = 1:NOS
+ } else
+ if(is.numeric(whichSpecies)) {
+ whichSpecies.numer = whichSpecies
+ whichSpecies = sppNames[whichSpecies.numer] # Convert to character
+ } else
+ whichSpecies.numer = match(whichSpecies, sppNames)
+
+ LP = matrix(as.numeric(NA), nrow(lvmat), NOS) # For first eta for each spp.
+ for(sppno in 1:NOS) {
+ temp = predictcao(object=object, grid=lvmat, sppno=sppno,
+ Rank=Rank, deriv=0, MSratio=MSratio)
+ LP[,sppno] = temp$yval
+ }
+ if(MSratio == 2) {
+ LP = kronecker(LP, matrix(1:0, 1, 2)) # n x M
+ }
+ fitvals = object at family@inverse(LP, extra=object at extra) # n by NOS
+ dimnames(fitvals) = list(NULL, dimnames(fvmat)[[2]])
+
+ if(Rank==1) {
+ if(plot.it) {
+ ylim = c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
+ col = rep(col, len=length(whichSpecies.numer))
+ lty = rep(lty, len=length(whichSpecies.numer))
+ lwd = rep(lwd, len=length(whichSpecies.numer))
+ matplot(lv1, fitvals, xlab=xlab, ylab=ylab,
+ type="n", main=main, xlim=xlim, ylim=ylim, ...)
+ if(rugplot) rug(lv(object))
+ for(sppno in 1:length(whichSpecies.numer)) {
+ ptr2 = whichSpecies.numer[sppno] # points to species column
+ lines(lv1, fitvals[,ptr2], col=col[sppno],
+ lty=lty[sppno], lwd=lwd [sppno], ...)
+ if(labelSpecies) {
+ ptr1=(1:nrow(fitvals))[max(fitvals[,ptr2])==fitvals[,ptr2]]
+ ptr1 = ptr1[1]
+ text(lv1[ptr1], fitvals[ptr1,ptr2]+(stretch-1) *
+ diff(range(ylim)), label=sppNames[sppno],
+ col=col[sppno], ...)
+ }
+ }
+ }
+ } else {
+ maxfitted = matrix(fitvals[,whichSpecies[1]], length(lv1), length(lv2))
+ if(length(whichSpecies) > 1)
+ for(sppno in whichSpecies[-1]) {
+ maxfitted = pmax(maxfitted, matrix(fitvals[,sppno],
+ length(lv1), length(lv2)))
+ }
+ if(!length(zlim))
+ zlim = range(maxfitted, na.rm = TRUE)
+ if(plot.it)
+ graphics:::persp.default(lv1, lv2, maxfitted,
+ zlim=zlim,
+ xlab=xlab, ylab=ylab, zlab=zlab,
+ ticktype = ticktype, col = col, main=main, ...)
+ }
+
+ invisible(list(fitted=fitvals,
+ lv1grid=lv1,
+ lv2grid=if(Rank==2) lv2 else NULL,
+ maxfitted=if(Rank==2) maxfitted else NULL))
+}
+
+
+if(!isGeneric("persp"))
+ setGeneric("persp", function(x, ...) standardGeneric("persp"))
+setMethod("persp", "cao", function(x, ...) persp.cao(x=x, ...))
+
+
+
+lv.cao = function(object, ...) {
+ Coef(object, ...)@lv
+}
+lv.Coef.cao = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ object at lv
+}
+
+
+if(!isGeneric("lv"))
+ setGeneric("lv", function(object, ...) standardGeneric("lv"))
+setMethod("lv", "cao", function(object, ...) lv.cao(object, ...))
+setMethod("lv", "Coef.cao", function(object, ...) lv.Coef.cao(object, ...))
+
+
+
+
+
+setClass("summary.cao", representation("Coef.cao",
+ "misc" = "list",
+ "call" = "call"))
+
+summary.cao = function(object, ...) {
+ answer = Coef(object, ...)
+ class(answer) = "summary.cao"
+ answer at misc = object at misc
+ answer at call = object at call
+ answer
+}
+
+printsummary.cao = function(x, ...) {
+ cat("\nCall:\n")
+ dput(x at call)
+
+ printCoef.cao(x, ...)
+
+ cat("\nNumber of species: ", x at NOS, "\n")
+
+ if(length(x at misc$dispersion) == 1) {
+ cat("\nDispersion parameter(s): ", x at misc$dispersion, "\n")
+ } else if(is.Numeric(x at dispersion)) {
+ cat("\nDispersion parameter(s)\n")
+ print( x at dispersion, ... )
+ }
+ invisible(x)
+}
+
+setMethod("summary", "cao", function(object, ...)
+ summary.cao(object, ...))
+
+setMethod("print", "summary.cao",
+ function(x, ...)
+ invisible(printsummary.cao(x, ...)))
+
+setMethod("show", "summary.cao",
+ function(object)
+ invisible(printsummary.cao(object)))
+
+
+
+
+ccoef.cao = function(object, ...) {
+ Coef(object, ...)@C
+}
+
+ccoef.Coef.cao = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ object at C
+}
+
+
+if(!isGeneric("ccoef"))
+ setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+setMethod("ccoef", "cao", function(object, ...) ccoef.cao(object, ...))
+setMethod("ccoef", "Coef.cao", function(object, ...) ccoef.Coef.cao(object, ...))
+
+
+if(!isGeneric("calibrate"))
+ setGeneric("calibrate", function(object, ...) standardGeneric("calibrate"))
+setMethod("calibrate", "cao", function(object, ...)
+ calibrate.qrrvglm(object, ...))
+
+
+setMethod("calibrate", "qrrvglm", function(object, ...)
+ calibrate.qrrvglm(object, ...))
+
+
+Tol.cao = function(object, ...) {
+ stop("The tolerance for a \"cao\" object is undefined")
+}
+
+if(!isGeneric("Tol"))
+ setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
+setMethod("Tol", "cao", function(object, ...)
+ Tol.cao(object, ...))
+
+
+
+
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
new file mode 100644
index 0000000..c416fcc
--- /dev/null
+++ b/R/coef.vlm.q
@@ -0,0 +1,211 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+coefvlm <- function(object, matrix.out=FALSE, label=TRUE, compress=TRUE)
+{
+
+ ans <- object at coefficients
+ if(!label)
+ names(ans) <- NULL
+ if(!matrix.out && compress)
+ return(ans)
+
+
+ ncolx <- object at misc$p # = length(object at constraints)
+ M <- object at misc$M
+
+ xij <- object at control$xij
+ Blist <- object at constraints
+ if(!length(xij) && all(trivial.constraints(Blist))) {
+ B <- matrix(ans, nrow=ncolx, ncol=M, byrow=TRUE)
+ } else {
+ B <- matrix(as.numeric(NA), nrow=ncolx, ncol=M)
+
+ if(length(xij)) {
+
+ Xmat = object at x # model.matrix(object)
+ atx = attr(Xmat, "assign")
+ tmp9=attributes(lm2vlm.model.matrix(Xmat,object at constraints,xij=xij))
+ nasgn = tmp9$assign
+ vasgn = tmp9$vassign
+ if(!length(atx) || !length(nasgn) || !length(vasgn))
+ stop("can't get atx, nasgn and/or vasgn")
+
+ if(inherits(xij, "formula"))
+ xij = list(xij)
+
+ for(i in 1:length(xij)) {
+ tform = terms(xij[[i]])
+ atform = attr(tform, "term.labels")
+ for(k in 1:length(atform)) {
+ for(s in atx[[(atform[[k]])]]) {
+ if(length(Blist[[s]])) {
+ Blist[[s]] = ei(k, M) # changed
+ }
+ }
+ }
+ }
+
+
+ ncolBlist <- unlist(lapply(Blist, ncol)) # Modified
+ ans.save = ans # small
+ ans = rep(as.numeric(NA), len=sum(ncolBlist)) # big
+ ans.copied = rep(FALSE, len=length(ans))
+ ans.save.copied = rep(FALSE, len=length(ans.save))
+
+ ptr0 = rep(as.numeric(NA), len=length(xij))
+ for(i in 1:length(xij)) {
+ tform = terms(xij[[i]])
+ atform = attr(tform, "term.labels")
+ response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
+
+ for(s in 1:M)
+ if(length(nasgn[[atform[s]]])) {
+ ptr0[i] = s
+ break
+ }
+ dest.col.index = nasgn[[atform[ptr0[i]]]]
+ rindex = vlabel(response.name,
+ length(dest.col.index), M=M, separator="")
+ dummy = ans.save*0 + 1:length(ans.save) # names retained
+ dest.col.index = dummy[rindex]
+
+ for(k in ((1:M))) {
+ from.col.index = nasgn[[atform[k]]] # May be NULL
+ if(length(from.col.index)) {
+ ans[from.col.index] = ans.save[dest.col.index]
+ ans.copied[from.col.index] = TRUE
+ ans.save.copied[dest.col.index] = TRUE
+ }
+ }
+ }
+
+ if(any(!ans.copied)) {
+ ans[!ans.copied] = ans.save[!ans.save.copied]
+ }
+ names(ans) = vlabel(names(ncolBlist), ncolBlist,
+ M=M, separator="")
+
+ }
+
+ if(!matrix.out && !compress)
+ return(ans)
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ nasgn <- names(Blist)
+ temp <- c(0, cumsum(ncolBlist))
+ for(i in 1:length(nasgn)) {
+ index <- (temp[i]+1):temp[i+1]
+ cm <- Blist[[nasgn[i]]]
+ B[i,] <- cm %*% ans[index]
+ }
+ }
+
+ if(label) {
+ d1 <- object at misc$colnames.x
+ d2 = object at misc$predictors.names # Could be NULL
+ dimnames(B) <- list(d1, d2)
+ }
+
+ if(compress && length(xij)) {
+ ci2 = NULL
+ for(i in 1:length(xij)) {
+ tform = terms(xij[[i]])
+ atform = attr(tform, "term.labels")
+ response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
+
+ dest.col.index = atx[[atform[ptr0[i]]]]
+
+
+ for(k in ((1:M)[-ptr0[i]])) {
+ from.col.index = atx[[atform[k]]] # May be NULL
+ if(length(from.col.index)) {
+ B[dest.col.index,] = B[dest.col.index,] + B[from.col.index,]
+ tmp5 = dimnames(B)[[1]]
+ tmp5[dest.col.index] = vlabel(response.name,
+ length(dest.col.index), M=M, separator="")
+ dimnames(B) = list(tmp5, dimnames(B)[[2]])
+ ci2 = c(ci2, from.col.index)
+ }
+ }
+ }
+ B = B[-ci2,,drop=FALSE] # Delete rows not wanted
+ }
+
+ B
+} # end of coefvlm
+
+
+
+
+ setMethod("coefficients", "vlm", function(object, ...)
+ coefvlm(object, ...))
+ setMethod("coef", "vlm", function(object, ...)
+ coefvlm(object, ...))
+ setMethod("coefficients", "vglm", function(object, ...)
+ coefvlm(object, ...))
+ setMethod("coef", "vglm", function(object, ...)
+ coefvlm(object, ...))
+
+
+
+
+
+Coef.vlm <- function(object, ...) {
+ LL <- length(object at family@vfamily)
+ funname = paste("Coef.", object at family@vfamily[LL], sep="")
+ if(exists(funname)) {
+ newcall = paste("Coef.", object at family@vfamily[LL],
+ "(object, ...)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ eval(newcall)
+ } else
+ if(length(tmp2 <- object at misc$link) &&
+ object at misc$intercept.only &&
+ trivial.constraints(object at constraints)) {
+
+ answer = eta2theta(rbind(coef(object)),
+ link=object at misc$link,
+ earg=object at misc$earg)
+ answer = c(answer)
+ if(length(ntmp2 <- names(tmp2)) == object at misc$M)
+ names(answer) = ntmp2
+ answer
+ } else {
+ coef(object, ... )
+ }
+}
+
+setMethod("Coefficients", "vlm", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coef", "vlm", function(object, ...)
+ Coef.vlm(object, ...))
+
+
+if(!is.R()) {
+setMethod("Coefficients", "vglm", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coef", "vglm", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coefficients", "vgam", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coef", "vgam", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coefficients", "rrvglm", function(object, ...)
+ Coef.vlm(object, ...))
+setMethod("Coef", "rrvglm", function(object, ...)
+ Coef.vlm(object, ...))
+}
+
+
+
+if(FALSE)
+coef.rrvglm = function(object,
+ type = c("all", "vlm"),
+ matrix.out=FALSE, label=TRUE) {
+
+}
+
+
diff --git a/R/cqo.R b/R/cqo.R
new file mode 100644
index 0000000..108ec03
--- /dev/null
+++ b/R/cqo.R
@@ -0,0 +1,162 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+cqo <- function(formula,
+ family, data=list(),
+ weights=NULL, subset=NULL, na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=qrrvglm.control(...),
+ offset=NULL,
+ method="cqo.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=NULL,
+ smart=TRUE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "cqo"
+
+ ocall <- match.call()
+
+ if(smart)
+ setup.smart("write")
+
+ mt <- terms(formula, data = data)
+ if(missing(data))
+ data <- environment(formula)
+
+ mf <- match.call(expand=FALSE)
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <- NULL
+ mf$coefstart <- mf$etastart <- mf$... <- NULL
+ mf$smart <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if(method == "model.frame")
+ return(mf)
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+ if(is.character(family))
+ family <- get(family)
+ if(is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ control$criterion = "coefficients" # Specifically for vcontrol.expression
+ eval(vcontrol.expression)
+
+ if(!is.null(family at first))
+ eval(family at first)
+
+
+ cqo.fitter <- get(method)
+
+
+ deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
+ for(tries in 1:control$Bestof) {
+ if(control$trace && (control$Bestof>1))
+ cat(paste("\n========================= Fitting model", tries,
+ "=========================\n"))
+ it <- cqo.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family, control=control, constraints=constraints,
+ extra=extra, Terms=mt, function.name=function.name, ...)
+ deviance.Bestof[tries] = if(length(it$crit.list$deviance))
+ it$crit.list$deviance else it$crit.list$loglikelihood
+ if(tries==1 || min(deviance.Bestof[1:(tries-1)])>deviance.Bestof[tries])
+ fit = it
+ }
+ fit$misc$deviance.Bestof = deviance.Bestof
+
+
+ fit$misc$dataname <- dataname
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ }
+
+ answer <-
+ new(Class="qrrvglm",
+ "assign" = attr(x, "assign"),
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "criterion" = list("deviance"=min(deviance.Bestof)),
+ "dispersion" = 1,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "residuals" = as.matrix(fit$residuals),
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=mt))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ if(length(fit$fitted.values))
+ slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+ if(x.arg)
+ slot(answer, "x") = fit$x # The 'small' design matrix
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+
+ fit$control$min.criterion = TRUE # needed for calibrate; a special case
+
+
+ slot(answer, "control") = fit$control
+ slot(answer, "extra") = if(length(fit$extra)) {
+ if(is.list(fit$extra)) fit$extra else {
+ warning("\"extra\" is not a list, therefore placing \"extra\" into a list")
+ list(fit$extra)
+ }
+ } else list() # R-1.5.0
+ slot(answer, "iter") = fit$iter
+ fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") = fit$predictors
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+ answer
+}
+attr(cqo, "smart") <- TRUE
+
+
+
+
+
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
new file mode 100644
index 0000000..f8e4533
--- /dev/null
+++ b/R/cqo.fit.q
@@ -0,0 +1,859 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+callcqof = function(cmatrix, etamat, xmat, ymat, wvec,
+ xbig.save1, modelno, Control,
+ n, M, p1star, p2star, nice31, allofit=FALSE) {
+ ocmatrix = cmatrix
+ control = Control
+ Rank = control$Rank
+ p1 = length(control$colx1.index); p2 = length(control$colx2.index)
+ dim(cmatrix) = c(p2, Rank) # for crow1C
+ pstar = p1star + p2star
+ maxMr = max(M, Rank)
+ nstar = if(nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+ lenbeta = pstar * ifelse(nice31, NOS, 1)
+
+ if(itol <- control$ITolerances) {
+ if(Rank > 1) {
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu = eigen(var(numat))
+ cmatrix = cmatrix %*% evnu$vector
+ }
+
+ cmatrix = crow1C(cmatrix, control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ sdnumat = sd(numat)
+ for(lookat in 1:Rank)
+ if(sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
+ muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
+ sdnumat[lookat]
+ numat[,lookat] = numat[,lookat] * muxer
+ cmatrix[,lookat] = cmatrix[,lookat]*muxer # unneeded in callcqof
+ if(control$trace) {
+ cat(paste("Taking evasive action for latent variable ",
+ lookat, ".\n", sep=""))
+ if(exists("flush.console")) flush.console()
+ }
+ rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+ "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ }
+ } else {
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ cmatrix = cmatrix %*% temp7
+ cmatrix = crow1C(cmatrix, control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ }
+
+ inited = if(is.R()) {
+ if(exists(".VGAM.CQO.etamat", envir = VGAMenv)) 1 else 0
+ } else 0
+
+
+ usethiseta = if(inited==1)
+ getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
+ usethisbeta = if(inited==2)
+ getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
+
+ othint = c(Rank, control$EqualTol, pstar, dimw=1, inited=inited,
+ modelno, maxitl=control$maxitl, actnits=0, twice=0,
+ p1star=p1star, p2star=p2star, nice31=nice31, lenbeta,
+ itol=itol, control$trace, p1, p2, control$method.init)
+ bnumat = if(nice31) matrix(0,nstar,pstar) else
+ cbind(matrix(0,nstar,p2star), xbig.save1)
+if(TRUE) {
+}
+
+ ans1 <-
+ dotFortran(name=if(nice31) "cqo1f" else "cqo2f",
+ numat=as.double(numat), as.double(ymat),
+ as.double(if(p1) xmat[,control$colx1.index] else 999),
+ as.double(wvec), etamat=as.double(usethiseta),
+ moff=double(if(itol) n else 1),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), bnumat=as.double(bnumat),
+ qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
+ as.integer(n), as.integer(M), NOS=as.integer(NOS),
+ as.integer(nstar), dimu=as.integer(M),
+ errcode=integer(1), othint=as.integer(othint),
+ rowind=integer(maxMr*(maxMr+1)/2), colind=integer(maxMr*(maxMr+1)/2),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(c(small=control$SmallNo, epsilon=control$epsilon,
+ .Machine$double.eps,
+ iKvector=rep(control$iKvector, len=NOS),
+ iShape=rep(control$iShape, len=NOS))))
+
+
+ if(ans1$errcode == 0) {
+ assign2VGAMenv(c("etamat", "z", "U", "beta", "deviance"),
+ ans1, prefix=".VGAM.CQO.")
+ if(is.R()) {
+ assign(".VGAM.CQO.cmatrix", cmatrix, envir = VGAMenv)
+ assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAMenv)
+ } else {
+ .VGAM.CQO.cmatrix <<- cmatrix
+ .VGAM.CQO.ocmatrix <<- ocmatrix
+ }
+ } else {
+ warning(paste("error code in callcqof =", ans1$errcode))
+ rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+ "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ }
+ if(control$trace && exists("flush.console"))
+ flush.console()
+ if(allofit) list(deviance=ans1$deviance, coefficients=ans1$beta) else
+ ans1$deviance
+}
+
+calldcqof = function(cmatrix, etamat, xmat, ymat, wvec,
+ xbig.save1, modelno, Control,
+ n, M, p1star, p2star, nice31, allofit=FALSE) {
+ control = Control
+ Rank = control$Rank
+ p1 = length(control$colx1.index); p2 = length(control$colx2.index)
+ dim(cmatrix) = c(p2, Rank) # for crow1C
+
+ xmat2 <- xmat[,control$colx2.index,drop=FALSE] #ccc
+ numat <- double(n*Rank) #ccc
+ pstar = p1star + p2star
+ maxMr = max(M, Rank)
+ nstar = if(nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+ lenbeta = pstar * ifelse(nice31, NOS, 1)
+
+ if(itol <- control$ITolerances) {
+ if(Rank > 1) {
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu = eigen(var(numat))
+ cmatrix = cmatrix %*% evnu$vector
+ }
+
+ cmatrix = crow1C(cmatrix, control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ sdnumat = sd(numat)
+ for(lookat in 1:Rank)
+ if(sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
+ muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
+ sdnumat[lookat]
+ cmatrix[,lookat] = cmatrix[,lookat] * muxer
+ if(control$trace) {
+ cat(paste("Taking evasive action for latent variable ",
+ lookat, ".\n", sep=""))
+ if(exists("flush.console")) flush.console()
+ }
+ rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+ "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+ }
+ } else {
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ cmatrix = cmatrix %*% temp7
+ cmatrix = crow1C(cmatrix, control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ }
+
+ inited = if(is.R()) {
+ if(exists(".VGAM.CQO.etamat", envir = VGAMenv)) 1 else 0
+ } else 0
+
+
+ usethiseta = if(inited==1)
+ getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
+ usethisbeta = if(inited==2)
+ getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
+
+ othint = c(Rank, control$EqualTol, pstar, dimw=1, inited=inited,
+ modelno, maxitl=control$maxitl, actnits=0, twice=0,
+ p1star=p1star, p2star=p2star, nice31=nice31, lenbeta,
+ itol=itol, control$trace,
+ p1, p2, control$method.init) # other ints
+ bnumat = if(nice31) matrix(0,nstar,pstar) else
+ cbind(matrix(0,nstar,p2star), xbig.save1)
+ if(exists("flush.console"))
+ flush.console()
+
+ ans1 <-
+ dotFortran(name="dcqof", numat=as.double(numat), as.double(ymat),
+ as.double(if(p1) xmat[,control$colx1.index] else 999),
+ as.double(wvec), etamat=as.double(usethiseta),
+ moff=double(if(itol) n else 1),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), bnumat=as.double(bnumat),
+ qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
+ as.integer(n), as.integer(M), NOS=as.integer(NOS),
+ as.integer(nstar), dimu=as.integer(M),
+ errcode=integer(1), othint=as.integer(othint),
+ rowind=integer(maxMr*(maxMr+1)/2), colind=integer(maxMr*(maxMr+1)/2),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(c(small=control$SmallNo, epsilon=control$epsilon,
+ .Machine$double.eps,
+ iKvector=rep(control$iKvector, len=NOS),
+ iShape=rep(control$iShape, len=NOS))),
+ xmat2=as.double(xmat2), onumat=as.double(numat), cmat=as.double(cmatrix),
+ p2=as.integer(p2), deriv=double(p2*Rank), hstep=as.double(control$Hstep),
+ betasave=double(lenbeta))
+
+ if(ans1$errcode != 0) {
+ warning(paste("error code in calldcqof =", ans1$errcode))
+ }
+
+ if(exists("flush.console"))
+ flush.console()
+ ans1$deriv
+}
+
+
+checkCMCO <- function(Blist, control, modelno) {
+
+ p1 = length(colx1.index <- control$colx1.index)
+ p2 = length(colx2.index <- control$colx2.index)
+ if(p1 + p2 != length(Blist))
+ stop("'Blist' is the wrong length")
+ if(p1 == 0 || p2 == 0)
+ stop("Some variables are needed in Norrr and non-Norrr arguments")
+ if(all(names(colx1.index) != "(Intercept)"))
+ stop("an intercept term must be in the argument 'Norrr' formula")
+ Blist1 = vector("list", p1)
+ Blist2 = vector("list", p2)
+ for(k in 1:p1)
+ Blist1[[k]] = Blist[[(colx1.index[k])]]
+ for(k in 1:p2)
+ Blist2[[k]] = Blist[[(colx2.index[k])]]
+
+ if(modelno == 3 || modelno == 5) {
+ if(p1 > 1)
+ for(k in 2:p1)
+ Blist1[[k]] = (Blist1[[k]])[c(TRUE,FALSE),,drop=FALSE]
+ for(k in 1:p2)
+ Blist2[[k]] = (Blist2[[k]])[c(TRUE,FALSE),,drop=FALSE]
+ }
+
+ if(!all(trivial.constraints(Blist2)))
+ stop("the constraint matrices for the non-Norrr terms are not trivial")
+ if(!trivial.constraints(Blist1[[1]]))
+ stop("the constraint matrices for intercept term is not trivial")
+ if(p1 > 1)
+ for(k in 2:p1)
+ if(!trivial.constraints(list(Blist1[[k]])))
+ stop("the constraint matrices for some Norrr terms is not trivial")
+
+ nice31 = if(control$Quadratic)
+ (!control$EqualTol || control$ITolerances) else TRUE
+ as.numeric(nice31)
+}
+
+
+
+
+
+
+cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ offset=0, family,
+ control=qrrvglm.control(...),
+ constraints=NULL,
+ extra=NULL,
+ Terms=Terms, function.name="cqo", ...)
+{
+ if(!all(offset == 0)) stop("cqo.fit() cannot handle offsets")
+ post = list()
+ nonparametric <- FALSE
+ epsilon <- control$epsilon
+ maxitl <- control$maxitl
+ save.weight <- control$save.weight
+ trace <- control$trace
+ orig.stepsize <- control$stepsize
+
+
+ n <- dim(x)[1]
+
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+
+ n.save <- n
+
+
+
+ Rank <- control$Rank
+ rrcontrol <- control #
+ backchat = FALSE # Specifically for rrr.init.expression
+
+ if(length(family at initialize))
+ eval(family at initialize) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+ eval(rrr.init.expression)
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else family at inverse(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- family at link(mu, extra)
+ }
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+ if(is.character(rrcontrol$Dzero)) {
+ index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+ if(any(is.na(index)))
+ stop("Dzero argument didn't fully match y-names")
+ if(length(index) == M)
+ stop(paste("all linear predictors are linear in the",
+ "latent variable(s); so set Quadratic=FALSE"))
+ rrcontrol$Dzero = control$Dzero = index
+ }
+
+ if(length(family at constraints))
+ eval(family at constraints)
+
+
+ special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
+ findex = trivial.constraints(just.testing, special.matrix)
+ tc1 = trivial.constraints(constraints)
+
+ if(!control$Quadratic && sum(!tc1)) {
+ for(ii in names(tc1))
+ if(!tc1[ii] && !any(ii == names(findex)[findex==1]))
+ warning(paste("\"", ii, "\"", " is a non-trivial constraint",
+ " that will be overwritten by reduced-rank regression", sep=""))
+ }
+
+ if(all(findex))
+ stop("use vglm(), not rrvglm()!")
+ colx1.index = names.colx1.index = NULL
+ dx2 = dimnames(x)[[2]]
+ if(sum(findex)) {
+ asx = attr(x, "assign")
+ for(ii in names(findex))
+ if(findex[ii]) {
+ names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index = c(colx1.index, asx[[ii]])
+ }
+ names(colx1.index) = names.colx1.index
+ }
+ rrcontrol$colx1.index=control$colx1.index=colx1.index #Save it on the object
+ colx2.index = 1:ncol(x)
+ names(colx2.index) = dx2
+ colx2.index = colx2.index[-colx1.index]
+ p1 = length(colx1.index); p2 = length(colx2.index)
+ rrcontrol$colx2.index=control$colx2.index=colx2.index #Save it on the object
+
+
+
+
+ Amat <- if(length(rrcontrol$Ainit)) rrcontrol$Ainit else
+ matrix(rnorm(M * Rank, sd=rrcontrol$SD.Cinit), M, Rank)
+
+ Cmat = if(length(rrcontrol$Cinit)) matrix(rrcontrol$Cinit, p2, Rank) else {
+ if(!rrcontrol$Use.Init.Poisson.QO) {
+ matrix(rnorm(p2 * Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
+ } else
+ .Init.Poisson.QO(ymat=as.matrix(y),
+ X1=x[,colx1.index,drop=FALSE],
+ X2=x[,colx2.index,drop=FALSE],
+ Rank=rrcontrol$Rank, trace=rrcontrol$trace,
+ max.ncol.etamat = rrcontrol$Etamat.colmax,
+ Crow1positive=rrcontrol$Crow1positive,
+ isdlv=rrcontrol$isdlv,
+ constwt= any(family at vfamily[1] ==
+ c("negbinomial","gamma2","gaussianff")),
+ takelog= any(family at vfamily[1] != c("gaussianff")))
+ }
+
+ if(rrcontrol$ITolerances) {
+ lvmat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
+ lvmatmeans = t(lvmat) %*% matrix(1/n, n, 1)
+ if(!all(abs(lvmatmeans) < 4))
+ warning(paste("ITolerances=TRUE but the variables making up the",
+ "latent variable(s) do not appear to be centered,"))
+ }
+ if(modelno==3 || modelno==5)
+ Amat[c(FALSE,TRUE),] <- 0 # Intercept only for log(k)
+
+
+ if(length(control$Structural.zero))
+ Amat[control$Structural.zero,] = 0
+
+ rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
+ rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+
+ Blist <- process.constraints(constraints, x, M)
+ nice31 = checkCMCO(Blist, control=control, modelno=modelno)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
+
+ xbig.save <- if(nice31) {
+ NULL
+ } else {
+ tmp500=lm2qrrvlm.model.matrix(x=x,Blist=Blist,C=Cmat,control=control)
+ xsmall.qrr = tmp500$new.lv.model.matrix
+ B.list = tmp500$constraints # Doesn't change or contain \bI_{Rank} \bnu
+ lv.mat = tmp500$lv.mat
+ if(length(tmp500$offset)) {
+ offset = tmp500$offset
+ }
+ lm2vlm.model.matrix(xsmall.qrr, B.list, xij=control$xij)
+ }
+
+ if(length(coefstart) && length(xbig.save)) {
+ eta <- if(ncol(xbig.save) > 1) xbig.save %*% coefstart +
+ offset else xbig.save * coefstart + offset
+ eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
+ mu <- family at inverse(eta, extra)
+ }
+
+ rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
+ "cmatrix", "ocmatrix"), prefix=".VGAM.CQO.")
+
+ eval(cqo.init.derivative.expression)
+ for(iter in 1:control$optim.maxit) {
+ eval(cqo.derivative.expression)
+ if(!quasi.newton$convergence) break
+ }
+ if(maxitl>1 && iter>=maxitl && quasi.newton$convergence)
+ warning(paste("convergence not obtained in", maxitl, "iterations."))
+
+ if(length(family at fini))
+ eval(family at fini)
+
+ asgn <- attr(x, "assign")
+ coefs = getfromVGAMenv("beta", prefix = ".VGAM.CQO.")
+ if(control$ITolerances) {
+ if(NOS == M) {
+ coefs = c(t(matrix(coefs, ncol=M))) # Get into right order
+ } else {
+ coefs = coefs
+ }
+ }
+
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+ residuals <- z - fv # zz - offset ?? # not sure 3/10/05
+ if(M==1) {
+ residuals <- as.vector(residuals)
+ names(residuals) <- yn
+ } else {
+ dimnames(residuals) <- list(yn, predictors.names)
+ }
+
+ if(is.matrix(mu)) {
+ if(length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if(length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
+
+ df.residual <- 55 - 8 - Rank*p2 # zz n.big==55, rank==8
+ fit <- list(assign=asgn,
+ coefficients=coefs,
+ constraints=Blist, # B.list? zz
+ df.residual=df.residual,
+ df.total=n*M,
+ fitted.values=mu,
+ offset=offset,
+ residuals=residuals,
+ terms=Terms) # terms: This used to be done in vglm()
+
+ if(M==1) {
+ wz <- as.vector(wz) # Convert wz into a vector
+ }
+ fit$weights <- if(save.weight) wz else NULL
+
+ misc <- list(
+ colnames.x = xn,
+ criterion = "deviance",
+ function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ nonparametric = nonparametric,
+ orig.assign = attr(x, "assign"),
+ p = ncol(x),
+ ynames = dimnames(y)[[2]])
+
+ if(w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if(length(family at last))
+ eval(family at last)
+
+ deviance = getfromVGAMenv("deviance", prefix = ".VGAM.CQO.")
+ crit.list = list(deviance = deviance)
+ structure(c(fit, list(predictors=matrix(eta,n,M),
+ contrasts=attr(x, "contrasts"),
+ control=control,
+ crit.list = crit.list,
+ extra=extra,
+ family=family,
+ iter=iter,
+ misc=misc,
+ post = post,
+ rss=000,
+ x=x,
+ y=y)),
+ vclass=family at vfamily)
+}
+
+
+
+
+.Init.Poisson.QO = function(ymat, X1, X2, Rank=1, epsilon=1/32,
+ max.ncol.etamat = 10,
+ trace=FALSE, Crow1positive=rep(TRUE, len=Rank),
+ isdlv = rep(1, lengt=Rank),
+ constwt=FALSE, takelog=TRUE) {
+
+
+ print.expression = expression({
+ if(trace && length(X2)) {
+ cat("\nUsing initial values\n")
+ dimnames(ans) = list(dimnames(X2)[[2]],
+ if(Rank==1) "lv" else paste("lv", 1:Rank, sep=""))
+ if(p2>5) print(ans, dig=3) else print(t(ans), dig=3)
+ }
+ if(exists("flush.console"))
+ flush.console()
+ })
+ sd.scale.X2.expression = expression({
+ if(length(isdlv)) {
+ actualSD = c( sqrt(diag(var(X2 %*% ans))) )
+ for(ii in 1:Rank)
+ ans[,ii] = ans[,ii] * isdlv[ii] / actualSD[ii]
+ }
+ })
+
+ Crow1positive = if(length(Crow1positive)) rep(Crow1positive, len=Rank) else
+ rep(TRUE, len=Rank) # Not nice. Because $Crow1positive==NULL for RRVGLMs
+ if(epsilon <= 0)
+ stop("epsilon > 0 is required")
+ ymat = cbind(ymat) + epsilon # ymat==0 cause problems
+ NOS = ncol(ymat)
+ p2 = ncol(X2)
+ if(NOS < 2*Rank) {
+ ans = crow1C(matrix(rnorm(p2*Rank, sd=0.02), p2, Rank), Crow1positive)
+ eval(sd.scale.X2.expression)
+ if(NOS == 1) {
+ eval(print.expression)
+ return(ans)
+ } else ans.save = ans; # ans.save contains scaled guesses
+ }
+
+ calS = 1:NOS # Set of all species available for the approximation
+ effrank = min(Rank, floor(NOS/2)) # effective rank
+ ncol.etamat = min(if(length(X2)) floor(NOS/2) else effrank, max.ncol.etamat)
+ etamat = wts = matrix(0, nrow=nrow(ymat), ncol=ncol.etamat) # has >=1 coln
+ rr = 1
+ for(ii in 1:floor(NOS/2)) {
+ if(length(calS) < 2) break
+ index = sample(calS, size=2) # Randomness here
+ etamat[,rr] = etamat[,rr] + (if(takelog)
+ log(ymat[,index[1]] / ymat[,index[2]]) else
+ ymat[,index[1]] - ymat[,index[2]])
+ wts[,rr]=wts[,rr]+(if(constwt) 1 else ymat[,index[1]]+ymat[,index[2]])
+ calS = setdiff(calS, index)
+ rr = (rr %% ncol.etamat) + 1
+ }
+ if(trace)
+ cat("\nObtaining initial values\n")
+
+ if(length(X2)) {
+ alt = valt(x=cbind(X1, X2), z=etamat, U=sqrt(t(wts)), Rank=effrank,
+ Blist=NULL, Cinit=NULL, trace=FALSE, colx1.index=1:ncol(X1),
+ Criterion="rss")
+ temp.control = list(Rank=effrank, colx1.index = 1:ncol(X1), Alpha=0.5,
+ colx2.index = (ncol(X1)+1):(ncol(X1) + ncol(X2)),
+ Corner=FALSE, Svd.arg=TRUE, Uncor=TRUE, Quadratic=FALSE)
+
+ ans2 = if(Rank > 1) rrr.normalize(rrcontrol=temp.control, A=alt$A,
+ C=alt$C, x=cbind(X1, X2)) else alt
+ ans = crow1C(ans2$C, rep(Crow1positive, len=effrank))
+
+ Rank.save = Rank
+ Rank = effrank
+ eval(sd.scale.X2.expression)
+ Rank = Rank.save
+
+ if(effrank < Rank) {
+ ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
+ }
+ eval(print.expression)
+ } else {
+ xij = NULL # temporary measure
+ U = t(sqrt(wts))
+ tmp = vlm.wfit(x=X1, z=etamat, Blist=NULL, U=U, matrix.out=TRUE,
+ XBIG=FALSE, rss=TRUE, qr=FALSE, xij=xij)
+ ans = crow1C(as.matrix(tmp$resid), rep(Crow1positive, len=effrank))
+ if(effrank < Rank) {
+ ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
+ }
+
+ if(Rank > 1) {
+ evnu = eigen(var(ans))
+ ans = ans %*% evnu$vector
+ }
+
+ if(length(isdlv)) {
+ actualSD = sd(ans)
+ for(ii in 1:Rank)
+ ans[,ii] = ans[,ii] * isdlv[ii] / actualSD[ii]
+ }
+ ans = crow1C(ans, rep(Crow1positive, len=Rank))
+ dimnames(ans) = list(dimnames(X1)[[1]],
+ if(Rank==1) "lv" else paste("lv", 1:Rank, sep=""))
+ if(trace)
+ {if(nrow(ans) > 10) print(t(ans), dig=3) else print(ans, dig=3)}
+ }
+ ans
+}
+
+
+
+
+cqo.init.derivative.expression <- expression({
+ which.optimizer = if(is.R()) {
+ if(control$Quadratic && control$FastAlgorithm) {
+ "BFGS"
+ } else {
+ if(iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
+ }
+ } else "Quasi-Newton"
+ if(trace && control$OptimizeWrtC) {
+ cat("\nUsing", which.optimizer, "algorithm\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+
+
+if(FALSE) {
+ constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
+
+ nice31 = (!control$EqualTol || control$ITolerances) &&
+ all(trivial.constraints(constraints))
+}
+
+ NOS = ifelse(modelno==3 || modelno==5, M/2, M)
+ canfitok = if(is.R())
+ (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAMenv)) else
+ (exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
+ if(!canfitok)
+ stop("can't fit this model using fast algorithm")
+
+ p2star = if(nice31)
+ ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
+ (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
+ p1star = if(nice31) ifelse(modelno==3 || modelno==5,1+p1,p1) else
+ (ncol(xbig.save)-p2star)
+ xbig.save1 = if(p1star > 0) xbig.save[,-(1:p2star)] else NULL
+})
+
+
+
+
+cqo.derivative.expression <- expression({
+
+
+if(is.R()) {
+ if(iter == 1 || quasi.newton$convergence) {
+ quasi.newton = optim(par=Cmat, fn=callcqof,
+ gr=if(control$GradientFunction) calldcqof else NULL,
+ method=which.optimizer,
+ control=list(fnscale=1,trace=as.integer(control$trace),
+ parscale=rep(control$Parscale, len=length(Cmat)),
+ maxit=control$Maxit.optim),
+ etamat=eta, xmat=x, ymat=y, wvec=w,
+ xbig.save1 = xbig.save1,
+ modelno=modelno, Control=control,
+ n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
+
+ z = matrix(getfromVGAMenv("z", prefix=".VGAM.CQO."), n, M)
+ U = matrix(getfromVGAMenv("U", prefix=".VGAM.CQO."), M, n)
+ }
+} else {
+ stop("not written for Splus yet")
+}
+
+
+ocmatrix = getfromVGAMenv("ocmatrix", prefix = ".VGAM.CQO.")
+maxdiff = max(abs(c(ocmatrix) - c(quasi.newton$par)) / (1+abs(c(ocmatrix))))
+if(maxdiff < 1.0e-4) {
+ Cmat = getfromVGAMenv("cmatrix", prefix = ".VGAM.CQO.")
+} else {
+ warning("solution does not correspond to .VGAM.CQO.cmatrix")
+}
+
+
+alt = valt.1iter(x=x, z=z, U=U, Blist=Blist, C=Cmat, nice31=nice31,
+ control=rrcontrol, lp.names=predictors.names,
+ MSratio=M/NOS)
+
+if(length(alt$offset))
+ offset = alt$offset
+
+B1.save = alt$B1 # Put later into extra
+tmp.fitted = alt$fitted # contains \bI_{Rank} \bnu if Corner
+
+if(trace && control$OptimizeWrtC) {
+ cat("\n")
+ cat(which.optimizer, "using", if(is.R()) "optim():" else "nlminb():", "\n")
+ cat("Objective =", if(is.R())
+ quasi.newton$value else format(quasi.newton$objective), "\n")
+ cat("Parameters (= c(C)) = ", if(length(quasi.newton$par) < 5) "" else "\n")
+ cat(if(is.R()) alt$Cmat else format(alt$Cmat), fill=TRUE)
+ cat("\n")
+ if(!is.R()) {
+ cat("Gradient norm =", format(quasi.newton$grad.norm), "\n")
+ cat("Number of gradient evaluations =", quasi.newton$g.evals, "\n")
+ }
+ cat("Number of function evaluations =", if(is.R())
+ quasi.newton$count[1] else quasi.newton$f.evals, "\n")
+ if(length(quasi.newton$message))
+ cat("Message =", quasi.newton$message, "\n")
+ cat("\n")
+ if(exists("flush.console"))
+ flush.console()
+}
+
+Amat = alt$Amat #
+Cmat = alt$Cmat #
+Dmat = alt$Dmat #
+
+eval(cqo.end.expression) #
+})
+
+
+cqo.end.expression = expression({
+
+ rmfromVGAMenv(c("etamat"), prefix=".VGAM.CQO.")
+
+
+ if(control$Quadratic) {
+ if(!length(extra)) extra=list()
+ extra$Amat = Amat # Not the latest iteration ??
+ extra$Cmat = Cmat # Saves the latest iteration
+ extra$Dmat = Dmat # Not the latest iteration
+ extra$B1 = B1.save # Not the latest iteration (not good)
+ } else {
+ Blist = replace.constraints(Blist.save, Amat, colx2.index)
+ }
+
+
+ fv <- tmp.fitted # Contains \bI \bnu
+ eta <- fv + offset
+ mu <- family at inverse(eta, extra)
+
+ if(any(is.na(mu)))
+ warning("there are NAs in mu")
+
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset # Contains \bI \bnu
+
+
+})
+
+crow1C = function(cmat, crow1positive=rep(TRUE, len=ncol(cmat)), amat=NULL) {
+ if(!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
+ stop("bad input in crow1C")
+ for(LV in 1:ncol(cmat))
+ if(( crow1positive[LV] && cmat[1,LV] < 0) ||
+ (!crow1positive[LV] && cmat[1,LV] > 0)) {
+ cmat[,LV] = -cmat[,LV]
+ if(length(amat)) amat[,LV] = -amat[,LV]
+ }
+ if(length(amat)) list(cmat=cmat, amat=amat) else cmat
+}
+
+
+
+
+
+printqrrvglm <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ if(FALSE) {
+ Rank <- x at Rank
+ if(!length(Rank))
+ Rank <- sum(!nas)
+ }
+
+ if(FALSE) {
+ nobs <- if(length(x at df.total)) x at df.total else length(x at residuals)
+ rdf <- x at df.residual
+ if(!length(rdf))
+ rdf <- nobs - Rank
+ }
+ cat("\n")
+
+ if(length(deviance(x)))
+ cat("Residual Deviance:", format(deviance(x)), "\n")
+
+ if(length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(i in ncrit)
+ if(i!="loglikelihood" && i!="deviance")
+ cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
+ }
+
+ invisible(x)
+}
+
+
+setMethod("Coef", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
+
+setMethod("coef", "qrrvglm", function(object, ...)
+ Coef.qrrvglm(object, ...))
+setMethod("coefficients", "qrrvglm", function(object, ...)
+ Coef.qrrvglm(object, ...))
+
+if(!isGeneric("deviance"))
+ setGeneric("deviance", function(object, ...) standardGeneric("deviance"))
+setMethod("deviance", "qrrvglm", function(object,...) object at criterion$deviance)
+
+setMethod("fitted", "qrrvglm", function(object, ...) fitted.vlm(object))
+setMethod("fitted.values", "qrrvglm", function(object, ...) fitted.vlm(object))
+
+
+
+setMethod("print", "qrrvglm", function(x, ...) printqrrvglm(x, ...))
+
+ setMethod("show", "qrrvglm", function(object) printqrrvglm(object))
+
+
+
+
+
+
+
+
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
new file mode 100644
index 0000000..74cb0cc
--- /dev/null
+++ b/R/deviance.vlm.q
@@ -0,0 +1,54 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+deviance.vlm <- function(object, ...)
+ object at criterion$deviance
+
+deviance.vglm <- function(object, ...)
+ object at criterion$deviance
+
+
+
+
+if(!isGeneric("deviance"))
+ setGeneric("deviance", function(object, ...) standardGeneric("deviance"))
+
+
+setMethod("deviance", "vlm", function(object, ...)
+ deviance.vlm(object, ...))
+
+if(is.R()) {
+
+
+ setMethod("deviance", "vglm", function(object, ...)
+ deviance.vglm(object, ...))
+} else {
+ setMethod("deviance", "vglm", function(object, ...)
+ deviance.vglm(object, ...))
+}
+
+
+
+
+df.residual.vlm <- function(object, ...)
+ object at df.residual
+
+if(is.R()) {
+
+
+ setMethod("df.residual", "vlm", function(object, ...)
+ df.residual.vlm(object, ...))
+} else {
+ if(!isGeneric("df.residual"))
+ setGeneric("df.residual", function(object, ...)
+ standardGeneric("df.residual"))
+ setMethod("df.residual", "vlm", function(object, ...)
+ df.residual.vlm(object, ...))
+}
+
+
+
+
+
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
new file mode 100644
index 0000000..1287a63
--- /dev/null
+++ b/R/effects.vglm.q
@@ -0,0 +1,25 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+effects.vlm <- function(object, ...)
+{
+ cat("Sorry, this function has not been written yet. Returning a NULL.\n")
+ invisible(NULL)
+}
+
+if(!isGeneric("effects"))
+ setGeneric("effects", function(object, ...) standardGeneric("effects"))
+
+if(is.R()) {
+ setMethod("effects", "vlm", function(object, ...)
+ effects.vlm(object, ...))
+} else {
+ setMethod("effects", "vlm", function(object, ...)
+ effects.vlm(object, ...))
+}
+
+
diff --git a/R/family.basics.q b/R/family.basics.q
new file mode 100644
index 0000000..461cbbf
--- /dev/null
+++ b/R/family.basics.q
@@ -0,0 +1,839 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+getind <- function(constraints, M, ncolx) {
+
+
+
+ if(!length(constraints)) {
+
+ constraints = vector("list", ncolx)
+ for(i in 1:ncolx)
+ constraints[[i]] <- diag(M)
+ }
+
+ ans <- vector("list", M+1)
+ names(ans) <- c(paste("eta", 1:M, sep=""), "ncolxbig")
+
+ temp2 <- matrix(unlist(constraints), nrow=M)
+ for(k in 1:M) {
+ ansx <- NULL
+ for(i in 1:length(constraints)) {
+ temp <- constraints[[i]]
+ isfox <- any(temp[k,] != 0)
+ if(isfox) {
+ ansx <- c(ansx, i)
+ }
+ }
+ ans[[k]] <- list(xindex=ansx,
+ xbigindex=(1:ncol(temp2))[temp2[k,] != 0])
+ }
+ ans[[M+1]] <- ncol(temp2)
+
+ ans
+}
+
+
+
+cm.vgam <- function(cm, x, bool, constraints,
+ intercept.apply=FALSE, overwrite=FALSE)
+{
+
+
+
+ M <- nrow(cm)
+ asgn <- attr(x, "assign")
+ nasgn <- names(asgn)
+ ninasgn <- nasgn[nasgn != "(Intercept)"]
+
+ if(!length(constraints)) {
+ constraints <- vector("list", length(nasgn))
+ for(i in 1:length(nasgn)) {
+ constraints[[i]] <- diag(M)
+ }
+ names(constraints) <- nasgn
+ }
+ if(!is.list(constraints))
+ stop("\"constraints\" must be a list")
+
+ if(length(constraints) != length(nasgn) ||
+ any(sort(names(constraints)) != sort(nasgn))) {
+ cat("names(constraints)\n")
+ cat("The above don't match;\n")
+ stop("\"constraints\" is half-pie")
+ }
+
+ if(is.logical(bool)) {
+ if(bool) {
+ if(intercept.apply && any(nasgn=="(Intercept)"))
+ constraints[["(Intercept)"]] <- cm
+ if(length(ninasgn))
+ for(i in ninasgn)
+ constraints[[i]] <- cm
+ } else {
+ return(constraints)
+ }
+ } else {
+ if(!is.R()) {
+ warn.save <- options()$warn
+ options(warn=-1)
+ tbool <- terms(bool) # Sqawks if FALSE or TRUE is response
+ options(warn=warn.save) # Restore the warnings
+ } else
+ tbool <- terms(bool)
+ if(attr(tbool, "response")) {
+ i <- attr(tbool, "factors")
+ default <- dimnames(i)[[1]]
+ default <- default[1]
+ default <- parse(text=default[1])[[1]]
+ default <- as.logical(eval(default))
+ } else {
+ default <- TRUE
+ }
+ tl <- attr(tbool, "term.labels")
+ if(attr(tbool, "intercept"))
+ tl <- c("(Intercept)", tl)
+
+ for(i in nasgn) {
+ if(default && any(tl==i))
+ constraints[[i]] <- cm
+ if(!default && !any(tl==i))
+ constraints[[i]] <- cm
+ }
+ }
+
+ constraints
+}
+
+
+
+cm.nointercept.vgam <- function(constraints, x, nointercept, M)
+{
+
+ asgn <- attr(x, "assign")
+ nasgn <- names(asgn)
+ if(is.null(constraints)) {
+ constraints <- vector("list", length(nasgn)) # list()
+ names(constraints) <- nasgn
+ }
+ if(!is.list(constraints))
+ stop("\"constraints\" must be a list")
+ for(i in 1:length(asgn))
+ constraints[[nasgn[i]]] <-
+ if(is.null(constraints[[nasgn[i]]])) diag(M) else
+ eval(constraints[[nasgn[i]]])
+
+ if(is.null(nointercept))
+ return(constraints)
+ if(!is.numeric(nointercept))
+ stop("\"nointercept\" must be numeric")
+
+ nointercept <- unique(sort(nointercept))
+ if(length(nointercept) == 0 || length(nointercept) >= M)
+ stop("too few or too many values")
+
+ if(any(nointercept < 1 | nointercept > M))
+ stop("\"nointercept\" out of range")
+ if(nasgn[1] != "(Intercept)" || M == 1)
+ stop("Need an (Intercept) constraint matrix with M>1")
+ if(!all.equal(constraints[["(Intercept)"]], diag(M)))
+ warning("Constraint matrix of (Intercept) not diagonal")
+
+ temp <- constraints[["(Intercept)"]]
+ temp <- temp[,-nointercept,drop=FALSE] # Will have M rows & at least 1 coln
+ constraints[["(Intercept)"]] <- temp
+ constraints
+}
+
+
+
+cm.zero.vgam <- function(constraints, x, zero, M)
+{
+
+ asgn <- attr(x, "assign")
+ nasgn <- names(asgn)
+ if(is.null(constraints)) {
+ constraints <- vector("list", length(nasgn)) # list()
+ names(constraints) <- nasgn
+ }
+ if(!is.list(constraints)) stop("\"constraints\" must be a list")
+ for(i in 1:length(asgn))
+ constraints[[nasgn[i]]] <-
+ if(is.null(constraints[[nasgn[i]]])) diag(M) else
+ eval(constraints[[nasgn[i]]])
+
+ if(is.null(zero))
+ return(constraints)
+ if(!is.numeric(zero)) stop("\"zero\" must be numeric")
+ if(any(zero < 1 | zero > M))
+ stop("\"zero\" out of range")
+ if(nasgn[1] != "(Intercept)")
+ stop("can't fit an intercept to a no-intercept model")
+
+ if(2 <= length(constraints))
+ for(i in 2:length(constraints)) {
+ temp <- constraints[[nasgn[i]]]
+ temp[zero,] <- 0
+ index <- NULL
+ for(j in 1:ncol(temp))
+ if(all(temp[,j]==0)) index <- c(index,j)
+ if(length(index)==ncol(temp))
+ stop("constraint matrix has no columns!")
+ if(!is.null(index))
+ temp <- temp[,-index,drop=FALSE]
+ constraints[[nasgn[i]]] <- temp
+ }
+ constraints
+}
+
+
+process.constraints <- function(constraints, x, M, by.col=TRUE)
+{
+
+
+
+ asgn <- attr(x, "assign")
+ nasgn <- names(asgn)
+
+ if(is.null(constraints)) {
+ constraints <- vector("list", length(nasgn))
+ for(i in 1:length(nasgn))
+ constraints[[i]] <- diag(M)
+ names(constraints) <- nasgn
+ }
+
+ if(is.matrix(constraints))
+ constraints <- list(constraints)
+
+ if(!is.list(constraints))
+ stop("\"constraints\" must be a list")
+
+ lenconstraints <- length(constraints)
+ if(lenconstraints > 0)
+ for(i in 1:lenconstraints) {
+ constraints[[i]] <- eval(constraints[[i]])
+ if(!is.null(constraints[[i]]) && !is.matrix(constraints[[i]]))
+ stop("\"constraints\"[[",i,"]] is not a matrix")
+ }
+
+ if(is.null(names(constraints)))
+ names(constraints) <- rep(nasgn, length=lenconstraints)
+
+ temp <- if(!is.R()) list() else {
+ junk <- vector("list", length(nasgn))
+ names(junk) <- nasgn
+ junk
+ }
+ for(i in 1:length(nasgn))
+ temp[[nasgn[i]]] <-
+ if(is.null(constraints[[nasgn[i]]])) diag(M) else
+ eval(constraints[[nasgn[i]]])
+
+ for(i in 1:length(asgn)) {
+ if(!is.matrix(temp[[i]])) {
+ stop("not a constraint matrix")
+ }
+ if(ncol(temp[[i]]) > M)
+ stop("constraint matrix has too many columns")
+ }
+
+ if(!by.col)
+ return(temp)
+
+
+ constraints <- temp
+ Blist <- vector("list", ncol(x))
+ for(i in 1:length(asgn)) {
+ cols <- asgn[[i]]
+ cm <- constraints[[i]]
+ for(j in cols)
+ Blist[[j]] <- cm
+ }
+ names(Blist) <- dimnames(x)[[2]]
+ Blist
+}
+
+
+
+
+trivial.constraints <- function(Blist, target=diag(M))
+{
+
+ if(is.null(Blist))
+ return(1)
+
+ if(is.matrix(Blist))
+ Blist <- list(Blist)
+ M <- dim(Blist[[1]])[1]
+
+ if(!is.matrix(target))
+ stop("target is not a matrix")
+ dimtar = dim(target)
+
+ trivc <- rep(1, length(Blist))
+ names(trivc) <- names(Blist)
+ for(i in 1:length(Blist)) {
+ d <- dim(Blist[[i]])
+ if(d[1] != dimtar[1]) trivc[i] <- 0
+ if(d[2] != dimtar[2]) trivc[i] <- 0
+ if(d[1] != M) trivc[i] <- 0
+ if(length(Blist[[i]]) != length(target)) trivc[i] <- 0
+ if(trivc[i] == 0) next
+ if(!all(c(Blist[[i]]) == c(target)))
+ trivc[i] <- 0
+ if(trivc[i] == 0) next
+ }
+ trivc
+}
+
+
+add.constraints <- function(constraints, new.constraints,
+ overwrite=FALSE, check=FALSE)
+{
+
+ empty.list <- function(l)
+ (is.null(l) || (is.list(l) && length(l)==0))
+
+ if(empty.list(constraints))
+ if(is.list(new.constraints))
+ return(new.constraints) else
+ return(list()) # Both NULL probably
+
+ constraints <- as.list(constraints)
+ new.constraints <- as.list(new.constraints)
+ nc <- names(constraints) # May be NULL
+ nn <- names(new.constraints) # May be NULL
+
+ if(is.null(nc) || is.null(nn))
+ stop("lists must have names")
+ if(any(nc=="") || any(nn==""))
+ stop("lists must have names")
+
+ if(!empty.list(constraints) && !empty.list(new.constraints)) {
+ for(i in nn) {
+ if(any(i==nc)) {
+ if(check &&
+ (!(all(dim(constraints[[i]])==dim(new.constraints[[i]])) &&
+ all(constraints[[i]]==new.constraints[[i]]))))
+ stop("apparent contradiction in the specification of the constraints")
+ if(overwrite)
+ constraints[[i]] <- new.constraints[[i]]
+ } else
+ constraints[[i]] <- new.constraints[[i]]
+ }
+ } else {
+ if(!empty.list(constraints))
+ return(as.list(constraints)) else
+ return(as.list(new.constraints))
+ }
+
+ constraints
+}
+
+
+
+
+
+
+
+
+iam <- function(j, k, M, hbw=M, both=FALSE, diagonal=TRUE)
+{
+
+ if(M==1)
+ if(!diagonal) stop("can't handle this")
+
+ if(M==1)
+ if(both) return(list(row.index=1, col.index=1)) else return(1)
+
+ upper <- if(diagonal) M else M-1
+ i2 <- as.list(upper:1)
+ i2 <- lapply(i2, seq)
+ i2 <- unlist(i2)
+
+
+ i1 <- matrix(1:M, M, M)
+ i1 <- if(diagonal) c(i1[row(i1)>=col(i1)]) else c(i1[row(i1)>col(i1)])
+
+
+ if(both) list(row.index=i2, col.index=i1) else {
+ if(j > M || k > M || j < 1 || k < 1)
+ stop("range error in j or k")
+ both <- (i1==j & i2==k) | (i1==k & i2==j)
+ (1:length(i2))[both]
+ }
+}
+
+
+
+dimm <- function(M, hbw=M)
+{
+
+
+ if(!is.numeric(hbw))
+ hbw <- M
+
+ if(hbw > M || hbw < 1)
+ stop("range error in hbw")
+ hbw * (2*M - hbw +1) / 2
+}
+
+
+
+
+
+
+m2avglm <- function(object, upper=FALSE, allow.vector=FALSE) {
+ m2adefault(wweights(object), M=object at misc$M,
+ upper=upper, allow.vector=allow.vector)
+}
+
+
+m2adefault <- function(m, M, upper=FALSE, allow.vector=FALSE)
+{
+ if(!is.numeric(m))
+ stop("argument m is not numeric")
+
+ if(!is.matrix(m))
+ m <- cbind(m)
+ n <- nrow(m)
+ dimm <- ncol(m)
+ index <- iam(NA, NA, M=M, both=TRUE, diag=TRUE)
+ if(dimm > length(index$row.index))
+ stop("bad value for M; it is too small")
+ if(dimm < M) {
+ stop("bad value for M; it is too big")
+ }
+
+ fred <- dotC(name="m2a", as.double(t(m)), ans=double(M*M*n),
+ as.integer(dimm),
+ as.integer(index$row-1),
+ as.integer(index$col-1),
+ as.integer(n), as.integer(M),
+ as.integer(as.numeric(upper)), NAOK=TRUE)
+ dim(fred$ans) <- c(M,M,n)
+ alpn <- NULL
+ dimnames(fred$ans) <- list(alpn, alpn, dimnames(m)[[1]])
+ fred$a
+}
+
+
+a2m <- function(a, hbw=M)
+{
+
+
+
+ if(is.matrix(a) && ncol(a)==nrow(a))
+ a <- array(a, c(nrow(a), ncol(a), 1))
+ if(!is.array(a))
+ dim(a) <- c(1,1,length(a))
+
+ M <- dim(a)[1]
+ n <- dim(a)[3]
+ dimm.value <- dimm(M, hbw)
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+
+
+ fred <- dotC(name="a2m", as.double(a), m=double(dimm.value*n),
+ as.integer(dimm.value),
+ as.integer(index$row-1),
+ as.integer(index$col-1),
+ as.integer(n), as.integer(M), NAOK=TRUE)
+ dim(fred$m) <- c(dimm.value,n)
+ fred$m <- t(fred$m)
+
+ if(hbw != M)
+ attr(fred$m, "hbw") <- hbw
+ if(length(lpn <- dimnames(a)[[1]]) != 0)
+ attr(fred$m, "predictors.names") <- lpn
+ fred$m
+}
+
+
+vindex <- function(M, row.arg=FALSE, col.arg=FALSE, length.arg=M*(M+1)/2)
+{
+
+
+
+ if((row.arg + col.arg) != 1)
+ stop("only one of row and col must be TRUE")
+ if(M==1) {
+ ans <- 1
+ } else {
+ if(row.arg) {
+ i1 <- matrix(1:M, M, M)
+ ans <- c(i1[row(i1)+col(i1)<=(M+1)])
+ } else {
+ i1 <- matrix(1:M, M, M)
+ ans <- c(i1[row(i1)>=col(i1)])
+ }
+ }
+ if(length.arg>length(ans))
+ stop("length argument too big")
+ rep(ans, len=length.arg)
+}
+
+
+
+if(!exists("is.R")) is.R <- function()
+ exists("version") && !is.null(version$language) && version$language=="R"
+
+
+wweights = function(object, matrix.arg=TRUE, deriv.arg=FALSE,
+ ignore.slot=FALSE, checkwz=TRUE) {
+
+
+
+
+ if(length(wz <- object at weights) && !ignore.slot && !deriv.arg) {
+ return(wz)
+ }
+
+ n <- object at misc$n # Done below
+
+ if(any(slotNames(object)=="extra")) {
+ extra <- object at extra
+ if(length(extra)==1 && !length(names(extra))) {
+ # Usage was something like vglm(..., extra = 5)
+ # so, internally, extra == 5 and not a list
+ extra <- extra[[1]]
+ }
+ }
+ mu <- object at fitted.values
+ if(any(slotNames(object)=="predictors"))
+ eta <- object at predictors
+ mt <- terms(object) # object at terms$terms; 11/8/03
+ Blist <- constraints <- object at constraints
+ new.coeffs <- object at coefficients
+ if(any(slotNames(object)=="iter"))
+ iter <- object at iter
+
+ w <- rep(1, n)
+ if(any(slotNames(object)=="prior.weights"))
+ w <- object at prior.weights
+ if(!length(w))
+ w <- rep(1, n)
+
+ x <- object at x
+ if(!length(x))
+ x <- model.matrixvlm(object, type="lm")
+ y <- object at y
+
+ if(any(slotNames(object)=="control"))
+ for(i in names(object at control)) {
+ assign(i, object at control[[i]])
+ }
+
+ if(length(object at misc))
+ for(i in names(object at misc)) {
+ assign(i, object at misc[[i]])
+ }
+
+ if(any(slotNames(object)=="family")) {
+ expr <- object at family@deriv
+ deriv.mu <- eval(expr)
+ # Need to compute wz only if it couldn't be extracted from the object
+ if(!length(wz)) {
+ expr <- object at family@weight
+ wz <- eval(expr)
+
+
+ if(M > 1)
+ dimnames(wz) = list(dimnames(wz)[[1]], NULL) # Remove colnames
+ wz = if(matrix.arg) as.matrix(wz) else c(wz)
+ }
+ if(deriv.arg) list(deriv=deriv.mu, weights=wz) else wz
+ } else NULL
+}
+
+
+pweights = function(object, ...) {
+ ans = object at prior.weights
+ if(length(ans)) {
+ ans
+ } else {
+ temp = object at y
+ ans = rep(1, nrow(temp)) # Assumed all equal and unity.
+ names(ans) = dimnames(temp)[[1]]
+ ans
+ }
+}
+
+
+procVec = function(vec, yn, Default) {
+
+
+
+
+ if(any(is.na(vec)))
+ stop("vec cannot contain any NAs")
+ L = length(vec)
+ nvec <- names(vec) # vec[""] undefined
+ named = length(nvec) # FALSE for c(1,3)
+ if(named) {
+ index = (1:L)[nvec==""]
+ default = if(length(index)) vec[index] else Default
+ } else {
+ default = vec
+ }
+
+ answer = rep(default, len=length(yn)) # Recycling may be premature if named
+ names(answer) = yn
+ if(named) {
+ nvec2 = nvec[nvec != ""]
+ if(length(nvec2)) {
+ if(any(!is.element(nvec2, yn)))
+ stop("some names given which are superfluous")
+ answer = rep(as.numeric(NA), len=length(yn))
+ names(answer) = yn
+ answer[nvec2] = vec[nvec2]
+ answer[is.na(answer)] = rep(default, len=sum(is.na(answer)))
+ }
+ }
+
+ answer
+}
+
+
+
+if(FALSE) {
+if(!isGeneric("m2a"))
+ setGeneric("m2a", function(object, ...) standardGeneric("m2a"))
+
+setMethod("m2a", "vglm",
+ function(object, ...)
+ m2avglm(object, ...))
+}
+
+
+weightsvglm = function(object, type = c("prior", "working"),
+ matrix.arg=TRUE, ignore.slot=FALSE,
+ deriv.arg=FALSE, ...) {
+ weightsvlm(object, type = type, matrix.arg=matrix.arg,
+ ignore.slot=ignore.slot,
+ deriv.arg=deriv.arg, ...)
+}
+
+weightsvlm = function(object, type = c("prior", "working"),
+ matrix.arg=TRUE, ignore.slot=FALSE,
+ deriv.arg=FALSE, ...) {
+ if(mode(type) != "character" && mode(type) != "name")
+ type = as.character(substitute(type))
+ type = match.arg(type, c("prior", "working"))[1]
+
+ if(type == "working") {
+ wweights(object=object,
+ matrix.arg=matrix.arg, deriv.arg=deriv.arg,
+ ignore.slot=ignore.slot, ...)
+ } else {
+ if(deriv.arg) stop("can't set deriv=TRUE when type=\"prior\"")
+ ans = pweights(object)
+ if(matrix.arg) as.matrix(ans) else c(ans)
+ }
+}
+
+setMethod("weights", "vlm",
+ function(object, ...)
+ weightsvlm(object, ...))
+
+setMethod("weights", "vglm",
+ function(object, ...)
+ weightsvglm(object, ...))
+
+
+
+
+
+
+dotFortran = function(name, ..., NAOK = FALSE, DUP = TRUE,
+ PACKAGE="VGAM") {
+ if(is.R()) {
+ .Fortran(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE=PACKAGE)
+ } else {
+ stop()
+ }
+}
+
+dotC = function(name, ..., NAOK = FALSE, DUP = TRUE, PACKAGE="VGAM") {
+ if(is.R()) {
+ .C(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE=PACKAGE)
+ } else {
+ stop()
+ }
+}
+
+
+
+qnupdate = function(w, wzold, dderiv, deta, M, keeppd=TRUE,
+ trace=FALSE, reset=FALSE, effpos=.Machine$double.eps^0.75) {
+
+
+ if(M ==1) {
+ dderiv = cbind(dderiv)
+ deta = cbind(deta)
+ }
+ Bs = mux22(t(wzold), deta, M=M, upper=FALSE, as.mat=TRUE) # n x M
+ sBs = c( (deta * Bs) %*% rep(1, M) ) # should have positive values
+ sy = c( (dderiv * deta) %*% rep(1, M) )
+ wznew = wzold
+ index = iam(NA, NA, M=M, both=TRUE)
+ index$row.index = rep(index$row.index, len=ncol(wzold))
+ index$col.index = rep(index$col.index, len=ncol(wzold))
+ updateThese = if(keeppd) (sy > effpos) else rep(TRUE, len=length(sy))
+ if(!keeppd || any(updateThese)) {
+ wznew[updateThese,] = wznew[updateThese,] - Bs[updateThese,index$row] *
+ Bs[updateThese,index$col] / sBs[updateThese] +
+ dderiv[updateThese,index$row] * dderiv[updateThese,index$col] /
+ sy[updateThese]
+ notupdated = sum(!updateThese)
+ if(notupdated && trace)
+ cat(notupdated,"weight matrices not updated out of",length(sy),"\n")
+ } else {
+ warning("no BFGS quasi-Newton update made at all")
+ cat("no BFGS quasi-Newton update made at all\n")
+ if(exists("flush.console")) flush.console()
+ }
+ wznew
+}
+
+
+
+
+
+
+mbesselI0 = function(x, deriv.arg=0) {
+ if(!is.Numeric(deriv.arg, allow=1, integer=TRUE, positi=TRUE) && deriv.arg!=0)
+ stop("deriv.arg must be a single non-negative integer")
+ if(!(deriv.arg==0 || deriv.arg==1 || deriv.arg==2))
+ stop("deriv must be 0, 1, or 2")
+ if(!is.Numeric(x))
+ stop("bad input for x")
+ n = length(x)
+ if(FALSE) {
+ }
+
+ # Use finite differences
+ ans = matrix(as.numeric(NA), nrow=n, ncol=deriv.arg+1)
+ ans[,1] = besselI(x, nu=0)
+ if(deriv.arg>=1) ans[,2] = besselI(x, nu=1)
+ if(deriv.arg>=2) ans[,3] = ans[,1] - ans[,2] / x
+ ans
+}
+
+
+
+VGAM.matrix.norm = function(A, power=2, suppressWarning=FALSE) {
+ if((nrow(A) != ncol(A)) && !suppressWarning)
+ warning("norms should be calculated for square matrices; A is not square")
+ if(power=="F") {
+ sqrt(sum(A^2))
+ } else if(power==1) {
+ max(apply(abs(A), 2, sum))
+ } else if(power==2) {
+ sqrt(max(eigen(t(A) %*% A)$value))
+ } else if(!is.finite(power)) {
+ max(apply(abs(A), 1, sum))
+ } else stop("argument \"power\" not recognised")
+}
+
+
+rmfromVGAMenv = function(varnames, prefix="") {
+ evarnames = paste(prefix, varnames, sep="")
+ if(is.R()) {
+ for(i in evarnames) {
+ mytext1 = "exists(x=i, envir = VGAMenv)"
+ myexp1 = parse(text=mytext1)
+ is.there = eval(myexp1)
+ if(is.there) {
+ rm(list=i, envir = VGAMenv)
+ }
+ }
+ } else {
+ warning("this code needs checking 9")
+ for(i in evarnames)
+ while(exists(i, inherits=TRUE))
+ rm(i, inherits=TRUE)
+
+ }
+}
+
+existsinVGAMenv = function(varnames, prefix="") {
+ evarnames = paste(prefix, varnames, sep="")
+ ans = NULL
+ if(is.R()) {
+ for(i in evarnames) {
+ mytext1 = "exists(x=i, envir = VGAMenv)"
+ myexp1 = parse(text=mytext1)
+ is.there = eval(myexp1)
+ ans = c(ans, is.there)
+ }
+ } else {
+ warning("this code needs checking 8")
+ for(i in evarnames) {
+ is.there = exists(i, inherits=TRUE)
+ ans = c(ans, is.there)
+ }
+ }
+ ans
+}
+
+assign2VGAMenv = function(varnames, mylist, prefix="") {
+ evarnames = paste(prefix, varnames, sep="")
+ if(is.R()) {
+ for(i in 1:length(varnames)) {
+ assign(evarnames[i], mylist[[(varnames[i])]], envir = VGAMenv)
+ }
+ } else {
+ warning("not sure about the assign function---esp. re. frame 0")
+ for(i in 1:length(varnames))
+ assign(evarnames[i], mylist[[(varnames[i])]], frame=0)
+ }
+}
+
+
+getfromVGAMenv = function(varname, prefix="") {
+ varname = paste(prefix, varname, sep="")
+ if(length(varname) > 1) stop("varname must be of length 1")
+ if(is.R()) {
+ get(varname, envir = VGAMenv)
+ } else {
+ get(varname)
+ }
+}
+
+
+lerch <- function(x, s, v, tolerance=1.0e-10, iter=100) {
+ if(!is.Numeric(x) || !is.Numeric(s) || !is.Numeric(v))
+ stop("bad input in x, s, and/or v")
+ if(is.complex(c(x,s,v)))
+ stop("complex arguments not allowed in x, s and v")
+ if(!is.Numeric(tolerance, allow=1, posi=TRUE) || tolerance > 0.01)
+ stop("bad input for argument \"tolerance\"")
+ if(!is.Numeric(iter, allow=1, integ=TRUE, posi=TRUE))
+ stop("bad input for argument \"iter\"")
+ L = max(length(x), length(s), length(v))
+ x = rep(x, length=L); s = rep(s, length=L); v = rep(v, length=L);
+ xok = abs(x) < 1 & !(v <= 0 & v==round(v))
+ x[!xok] = 0 # Fix this later
+ ans = dotC(name="lerchphi123", err=integer(L), as.integer(L),
+ as.double(x), as.double(s), as.double(v),
+ acc=as.double(tolerance), result=double(L), as.integer(iter))
+ ifelse(ans$err == 0 & xok , ans$result, NA)
+}
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.binomial.q b/R/family.binomial.q
new file mode 100644
index 0000000..084288d
--- /dev/null
+++ b/R/family.binomial.q
@@ -0,0 +1,794 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+process.binomial2.data.vgam <- expression({
+
+
+ if(!is.matrix(y))
+ {
+ yf <- as.factor(y)
+ lev <- levels(yf)
+ llev <- length(lev)
+ if(llev != 4)
+ stop("response must have 4 levels")
+ nn <- length(yf)
+ y <- matrix(0, nn, llev)
+ y[cbind(1:nn,as.vector(unclass(yf)))] <- 1
+ colnamesy <- paste(lev, ":", c("00","01","10","11"), sep="")
+ dimnames(y) <- list(names(yf), colnamesy)
+ input.type <- 1
+ } else
+ if(ncol(y)==2)
+ {
+ if(!all(y==0 | y==1))
+ stop("response must contains 0's and 1's only")
+ col.index <- y[,2] + 2*y[,1] + 1 # 1:4
+ nn <- nrow(y)
+ y <- matrix(0, nn, 4)
+ y[cbind(1:nn,col.index)] <- 1
+ dimnames(y) <- list(dimnames(y)[[1]], c("00","01","10","11"))
+ input.type <- 2
+ } else
+ if(ncol(y)==4)
+ {
+ input.type <- 3
+ } else
+ stop("response unrecognized")
+
+
+ nvec <- drop(y %*% rep(1,ncol(y)))
+
+ w <- w * nvec
+ y <- y / nvec # Convert to proportions
+
+ mu <- y + (1/ncol(y) - y)/nvec
+ dimnames(mu) <- dimnames(y)
+
+})
+
+
+
+
+
+
+
+
+
+
+betabinomial <- function(lmu="logit", lrho="logit", irho=0.5, zero=2)
+{
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lrho) != "character" && mode(lrho) != "name")
+ lrho = as.character(substitute(lrho))
+ if(length(irho) && (!is.Numeric(irho, positive=TRUE) || max(irho) >= 1))
+ stop("bad input for argument \"irho\"")
+
+ new("vglmff",
+ blurb=c("Beta-binomial model\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("rho", lrho), "\n",
+ "Variance: mu*(1-mu)*(1+(w-1)*rho)/w"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
+ ycounts = y * w # Convert proportions to counts
+ if(max(abs(ycounts-round(ycounts))) > 1.0e-6)
+ stop("the response (as counts) does not appear to be integer-valued")
+ predictors.names = c(namesof("mu", .lmu, tag=FALSE),
+ namesof("rho", .lrho, tag=FALSE))
+ if(!length(etastart)) {
+ if(is.Numeric( .irho )) {
+ init.rho = rep( .irho, length=n)
+ } else {
+ init.rho = rep(0, length=n)
+ Loglikfun = function(ycounts, nvec, shape1, shape2)
+ if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
+ lbeta(shape1, shape2)) else
+ sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
+ lgamma(shape1+shape2+nvec) -
+ (lgamma(shape1) + lgamma(shape2) - lgamma(shape1+shape2)))
+ rho.grid = rvar = seq(0.05, 0.95, len=11) #
+ for(ii in 1:length(rho.grid))
+ rvar[ii] = Loglikfun(ycounts=y*w,
+ shape1=mustart*(1-rho.grid[ii])/rho.grid[ii],
+ shape2=(1-mustart)*(1-rho.grid[ii])/rho.grid[ii],
+ nvec=w)
+ try.this = rho.grid[rvar == max(rvar)]
+ init.rho = rep(try.this, len=n)
+ }
+
+ etastart = cbind(theta2eta(mustart, .lmu),
+ theta2eta(init.rho, .lrho))
+ }
+ }), list( .lmu=lmu, .lrho=lrho, .irho=irho ))),
+ inverse=eval(substitute(function(eta, extra=NULL)
+ eta2theta(eta[,1], .lmu),
+ list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$link <- c(mu = .lmu, rho = .lrho)
+ misc$zero <- .zero
+ misc$expected <- TRUE
+ }), list( .lmu=lmu, .lrho=lrho, .zero=zero ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ ycounts = y * w # Convert proportions to counts
+ mymu = eta2theta(eta[,1], .lmu)
+ rho = eta2theta(eta[,2], .lrho)
+ shape1 = mymu * (1 - rho) / rho
+ shape2 = (1-mymu) * (1 - rho) / rho
+ nvec = w
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
+ lbeta(shape1, shape2)) else
+ sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
+ lgamma(shape1+shape2+nvec) -
+ (lgamma(shape1) + lgamma(shape2) - lgamma(shape1+shape2)))
+ }
+ }, list( .lmu=lmu, .lrho=lrho ))),
+ vfamily=c("betabinomial"),
+ deriv=eval(substitute(expression({
+ nvec = w # extra$nvec # for summary()
+ ycounts = y * w # Convert proportions to counts
+ mymu = eta2theta(eta[,1], .lmu)
+ rho = eta2theta(eta[,2], .lrho)
+ shape1 = mymu * (1 - rho) / rho
+ shape2 = (1-mymu) * (1 - rho) / rho
+ dshape1.dmu = (1 - rho) / rho
+ dshape2.dmu = -(1 - rho) / rho
+ dshape1.drho = -mymu / rho^2
+ dshape2.drho = -(1 - mymu) / rho^2
+ dmu.deta = dtheta.deta(mymu, .lmu)
+ drho.deta = dtheta.deta(rho, .lrho)
+ dl.dmu = dshape1.dmu * (digamma(shape1+ycounts) -
+ digamma(shape2+nvec-ycounts) -
+ digamma(shape1) + digamma(shape2))
+ dl.drho = (-1/rho^2) * (mymu * digamma(shape1+ycounts) +
+ (1-mymu) * digamma(shape2+nvec-ycounts) -
+ digamma(shape1+shape2+nvec) -
+ mymu * digamma(shape1) -
+ (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
+ temp5 = cbind(dl.dmu * dmu.deta, dl.drho * drho.deta)
+ temp5
+ }), list( .lmu=lmu, .lrho=lrho ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz11 = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2))
+ wz22 = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2))
+ wz21 = -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
+ wz[,iam(1,1,M)] = dmu.deta^2 * (wz11 * dshape1.dmu^2 +
+ wz22 * dshape2.dmu^2 +
+ 2 * wz21 * dshape1.dmu * dshape2.dmu)
+ wz[,iam(2,2,M)] = drho.deta^2 * (wz11 * dshape1.drho^2 +
+ wz22 * dshape2.drho^2 +
+ 2 * wz21 * dshape1.drho * dshape2.drho)
+ wz[,iam(2,1,M)] = dmu.deta * drho.deta *
+ (dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
+ dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
+ wz
+ }), list( .lmu=lmu, .lrho=lrho ))))
+}
+
+
+
+
+
+
+
+binom2.or <- function(lp="logit", lp1=lp, lp2=lp, lor="loge",
+ zero=3, exchangeable=FALSE, tol=0.001)
+{
+ if(mode(lp1) != "character" && mode(lp1) != "name")
+ lp1 <- as.character(substitute(lp1))
+ if(mode(lp2) != "character" && mode(lp2) != "name")
+ lp2 <- as.character(substitute(lp2))
+ if(mode(lor) != "character" && mode(lor) != "name")
+ lor <- as.character(substitute(lor))
+ if(is.logical(exchangeable) && exchangeable && (lp1 != lp2))
+ stop("exchangeable=TRUE but marginal links are not equal")
+ if(!is.Numeric(tol, positive=TRUE, allow=1))
+ stop("bad input for argument \"tol\"")
+
+ new("vglmff",
+ blurb=c("Palmgren model\n",
+ "Links: ",
+ namesof("mu1", lp1), ", ",
+ namesof("mu2", lp2), "; ",
+ namesof("OR", lor)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,0,0,0,1),3,2), x,
+ .exchangeable, constraints,
+ intercept.apply=TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable=exchangeable, .zero=zero ))),
+ deviance=Deviance.categorical.data.vgam,
+ initialize=eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
+ predictors.names <- c(namesof("mu1", .lp1, short=TRUE),
+ namesof("mu2", .lp2, short=TRUE),
+ namesof("OR", .lor, short=TRUE))
+ }), list( .lp1=lp1, .lp2=lp2, .lor=lor ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ pm <- cbind(eta2theta(eta[,1], .lp1), eta2theta(eta[,2], .lp2))
+ or <- eta2theta(eta[,3], .lor)
+ a <- 1 + (pm[,1]+pm[,2])*(or-1)
+ b <- -4 * or * (or-1) * pm[,1] * pm[,2]
+ temp <- sqrt(a^2+b)
+ pj4 <- ifelse(abs(or-1) < .tol, pm[,1]*pm[,2], (a-temp)/(2*(or-1)))
+ pj2 <- pm[,2] - pj4
+ pj3 <- pm[,1] - pj4
+ cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
+ }, list( .tol=tol, .lp1=lp1, .lp2=lp2, .lor=lor ))),
+ last=eval(substitute(expression({
+ misc$link <- c("mu1"= .lp1, "mu2"= .lp2, "OR"= .lor)
+ misc$tol <- .tol
+ }), list( .tol=tol, .lp1=lp1, .lp2=lp2, .lor=lor ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ pm <- cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ or <- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
+ cbind(theta2eta(pm[,1], .lp1),
+ theta2eta(pm[,2], .lp2),
+ theta2eta(or, .lor))
+ }, list( .lp1=lp1, .lp2=lp2, .lor=lor ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("binom2.or", "binom2"),
+ deriv=eval(substitute(expression({
+ pm <- cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ or <- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
+
+ a <- 1 + (pm[,1]+pm[,2])*(or-1)
+ b <- -4 * or * (or-1) * pm[,1] * pm[,2]
+ temp <- sqrt(a^2+b)
+
+ coeff <- -0.5 + (2*or*pm[,2]-a)/(2*temp)
+ d1 <- coeff*(y[,1]/mu[,1]-y[,3]/mu[,3])-
+ (1+coeff)*(y[,2]/mu[,2]-y[,4]/mu[,4])
+
+ coeff <- -0.5 + (2*or*pm[,1]-a)/(2*temp)
+ d2 <- coeff*(y[,1]/mu[,1]-y[,2]/mu[,2])-
+ (1+coeff)*(y[,3]/mu[,3]-y[,4]/mu[,4])
+
+ coeff <- (y[,1]/mu[,1]-y[,2]/mu[,2]-y[,3]/mu[,3]+y[,4]/mu[,4])
+ d3 <- ifelse(abs(or-1) < .tol,
+ coeff * pm[,1] * (1-pm[,1]) * pm[,2] * (1-pm[,2]),
+ (1/(or-1)) * coeff * ( (pm[,1]+pm[,2])*(1-a/temp)/2 +
+ (2*or-1)*pm[,1]*pm[,2]/temp - (a-temp)/(2*(or-1)) ))
+ w * cbind(d1 * dtheta.deta(pm[,1], .lp1),
+ d2 * dtheta.deta(pm[,2], .lp2),
+ d3 * dtheta.deta(or, .lor))
+ }), list( .tol=tol, .lp1=lp1, .lp2=lp2, .lor=lor ))),
+ weight=eval(substitute(expression({
+ Vab <- 1/(1/mu[,1] + 1/mu[,2] + 1/mu[,3] + 1/mu[,4])
+ deltapi <- mu[,3]*mu[,2] - mu[,4]*mu[,1]
+ delta <- mu[,1]*mu[,2]*mu[,3]*mu[,4]
+ pq <- pm[,1:2]*(1-pm[,1:2])
+
+ wz <- matrix(0, n, 4)
+ wz[,iam(1,1,M)] <- dtheta.deta(pm[,1], .lp1)^2 * pq[,2] * Vab / delta
+ wz[,iam(2,2,M)] <- dtheta.deta(pm[,2], .lp2)^2 * pq[,1] * Vab / delta
+ wz[,iam(3,3,M)] <- Vab * (dtheta.deta(or, .lor) /
+ dtheta.deta(or, "loge"))^2
+ wz[,iam(1,2,M)] <- Vab * deltapi * dtheta.deta(pm[,1], .lp1) *
+ dtheta.deta(pm[,2], .lp2) / delta
+ w * wz
+ }), list( .lp1=lp1, .lp2=lp2, .lor=lor ))))
+}
+
+
+
+
+
+
+binom2.rho <- function(lrho="rhobit", init.rho=0.4, zero=3, exchangeable=FALSE)
+{
+
+ if(mode(lrho) != "character" && mode(lrho) != "name")
+ lrho <- as.character(substitute(lrho))
+
+ new("vglmff",
+ blurb=c("Bivariate probit model\n",
+ "Links: ",
+ "probit(mu1), probit(mu2); ",
+ namesof("rho", lrho)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,0,0,0,1),3,2), x,
+ .exchangeable, constraints, intercept.apply=TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable=exchangeable, .zero=zero ))),
+ deviance=Deviance.categorical.data.vgam,
+ initialize=eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
+ predictors.names <- c("probit(mu1)", "probit(mu2)",
+ namesof("rho", .lrho, short=TRUE))
+ if(is.null(etastart))
+ etastart <- cbind(theta2eta(mu[,3]+mu[,4], "probit"),
+ theta2eta(mu[,2]+mu[,4], "probit"),
+ theta2eta(.init.rho, .lrho))
+ }), list( .lrho=lrho, .init.rho=init.rho ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ pm <- cbind(pnorm(eta[,1]),pnorm(eta[,2]))
+ rho <- eta2theta(eta[,3], .lrho)
+ p11 <- pnorm2(eta[,1], eta[,2], rho)
+ p01 <- pm[,2]-p11
+ p10 <- pm[,1]-p11
+ p00 <- 1-p01-p10-p11
+ cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11)
+ }, list( .lrho=lrho ))),
+ last=eval(substitute(expression({
+ misc$link <- c(mu1 = "probit", mu2 = "probit", rho = .lrho)
+ }), list( .lrho=lrho ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ if(is.null(extra))
+ stop("rho must be passed into $link through \"extra\"")
+ pm <- cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
+ cbind("probit(mu1)"=qnorm(pm[,1]),
+ "probit(mu2)"=qnorm(pm[,2]),
+ "link(rho)"=theta2eta(extra, .lrho))
+ }, list( .lrho=lrho ))),
+ vfamily=c("binom2.rho", "binom2"),
+ deriv=eval(substitute(expression({
+ pm <- cbind(pnorm(eta[,1]),pnorm(eta[,2]))
+ rho <- eta2theta(eta[,3], .lrho)
+ p11 <- pnorm2(eta[,1], eta[,2], rho)
+ p01 <- pm[,2]-p11
+ p10 <- pm[,1]-p11
+ p00 <- 1-p01-p10-p11
+
+ B <- (eta[,2]-rho*eta[,1])/sqrt(1-rho^2)
+ A <- (eta[,1]-rho*eta[,2])/sqrt(1-rho^2)
+ phi1 <- dnorm(eta[,1])
+ phi2 <- dnorm(eta[,2])
+ PhiA <- pnorm(A)
+ PhiB <- pnorm(B)
+
+ ff <- dnorm2(eta[,1], eta[,2], rho)
+ d1 = phi1*(PhiB*(y[,4]/p11-y[,2]/p01) + (1-PhiB)*(y[,3]/p10-y[,1]/p00))
+ d2 = phi2*(PhiA*(y[,4]/p11-y[,3]/p10) + (1-PhiA)*(y[,2]/p01-y[,1]/p00))
+ dl.drho <- (y[,4]/p11-y[,3]/p10-y[,2]/p01+y[,1]/p00)* ff
+ drho.deta <- dtheta.deta(rho, .lrho)
+ w * cbind(d1, d2, dl.drho * drho.deta)
+ }), list( .lrho=lrho ))),
+ weight=eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # 6=dimm(M)
+ wz[,iam(1,1,M)] = phi1^2*(PhiB^2*(1/p11+1/p01)+(1-PhiB)^2*(1/p10+1/p00))
+ wz[,iam(2,2,M)] = phi2^2*(PhiA^2*(1/p11+1/p10)+(1-PhiA)^2*(1/p01+1/p00))
+ wz[,iam(1,2,M)] = phi1*phi2*(PhiA*PhiB/p11 + (1-PhiA)*(1-PhiB)/p00 -
+ PhiA*(1-PhiB)/p10 - (1-PhiA)*PhiB/p01)
+ d2l.drhoeta1 <- ff*phi1*(PhiB*(1/p11+1/p01) - (1-PhiB)*(1/p10+1/p00))
+ wz[,iam(1,3,M)] <- d2l.drhoeta1 * drho.deta
+ d2l.drhoeta2 <- ff*phi2*(PhiA*(1/p11+1/p10) - (1-PhiA)*(1/p01+1/p00))
+ wz[,iam(2,3,M)] <- d2l.drhoeta2 * drho.deta
+ d2l.drho2 <- ff^2 * (1/p11+1/p01+1/p10+1/p00)
+ wz[,iam(3,3,M)] <- d2l.drho2 * drho.deta^2
+ wz * w
+ }), list( .lrho=lrho ))))
+}
+
+
+
+dnorm2 <- function(x, y, r)
+ exp(-0.5*(x^2+y^2-2*x*y*r)/(1-r^2)) / (2*pi*sqrt(1-r^2))
+
+
+pnorm2 <- function(ah, ak, r)
+{
+
+ ans <- ah
+ size <- length(ah)
+ singler <- ifelse(length(r)==1,1,0)
+ dotC(name="pnorm2", ah=as.double(-ah), ak=as.double(-ak), r=as.double(r),
+ size=as.integer(size), singler=as.integer(singler),
+ ans=as.double(ans))$ans
+}
+
+
+
+
+
+my.dbinom <- function(x,
+ size = stop("no size arg"),
+ prob = stop("no prob arg"))
+{
+
+ exp( lgamma(size+1) - lgamma(size-x+1) - lgamma(x+1) +
+ x * log(prob/(1-prob)) + size * log(1-prob) )
+}
+
+
+
+size.binomial <- function(prob=0.5, link="loge")
+{
+ if(any(prob<=0 || prob>=1))
+ stop("some values of prob out of range")
+ if(!missing(link)) link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Binomial with n unknown, prob known (prob=",prob,")\n",
+ "Links: ",
+ namesof("size", link, tag=TRUE),
+ " (treated as real-valued)\n",
+ "Variance: Var(Y) = size * prob * (1-prob);",
+ " Var(size) is intractable"),
+ initialize=eval(substitute(expression({
+ predictors.names <- "size"
+ extra$temp2 <- rep( .prob , length=n)
+ if(is.null(etastart)) {
+ nvec <- (y+0.1)/extra$temp2
+ etastart <- theta2eta(nvec, .link)
+ }
+ }), list( .prob =prob, .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ nvec <- eta2theta(eta, .link)
+ nvec*extra$temp2
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link <- c(size = .link)
+ misc$prob <- extra$temp2
+ }), list( .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ nvec <- mu/extra$temp2
+ theta2eta(nvec, .link)
+ }, list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, res=FALSE,eta, extra=NULL) {
+ nvec <- mu/extra$temp2
+ sum(w * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
+ y * log(.prob / (1- .prob)) + nvec * log(1- .prob)))
+ }, list( .prob=prob ))),
+ vfamily=c("size.binomial"),
+ deriv=eval(substitute(expression({
+ nvec <- mu/extra$temp2
+ dldnvec = digamma(nvec+1) - digamma(nvec-y+1) + log(1-extra$temp2)
+ dnvecdeta <- dtheta.deta(nvec, .link)
+ w * cbind(dldnvec * dnvecdeta)
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ d2ldnvec2 <- trigamma(nvec+1) - trigamma(nvec-y+1)
+ # Note: if y==0 then d2ldnvec2 is 0. Below is a quick fix.
+ d2ldnvec2[y==0] = -sqrt(.Machine$double.eps)
+ wz = -w * dnvecdeta^2 * d2ldnvec2
+ wz
+ }), list( .link=link ))))
+}
+
+
+
+dbetabin.ab = function(x, size, shape1, shape2, log = FALSE) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(size, posit=TRUE, integer=TRUE))
+ stop("bad input for argument \"size\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ L = max(length(x), length(size), length(shape1), length(shape2))
+ x = rep(x, len=L); size = rep(size, len=L);
+ shape1 = rep(shape1, len=L); shape2 = rep(shape2, len=L);
+ answer = 0 * x
+ if(length(ok <- round(x) == x & x >= 0 & x <= size))
+ answer[ok] = if(log) lchoose(size[ok], x[ok]) +
+ lbeta(shape1[ok]+x[ok], shape2[ok]+size[ok]-x[ok]) -
+ lbeta(shape1[ok], shape2[ok]) else
+ choose(size[ok], x[ok]) *
+ beta(shape1[ok]+x[ok], shape2[ok]+size[ok]-x[ok]) /
+ beta(shape1[ok], shape2[ok])
+ answer
+}
+
+
+pbetabin.ab = function(q, size, shape1, shape2, log.p=FALSE) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(size, posit=TRUE, integer=TRUE))
+ stop("bad input for argument \"size\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ N = max(length(q), length(size), length(shape1), length(shape2))
+ q = rep(q, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
+ size = rep(size, len=N);
+ ans = q * 0 # Retains names(q)
+ if(max(abs(size-size[1])) < 1.0e-08 &&
+ max(abs(shape1-shape1[1])) < 1.0e-08 &&
+ max(abs(shape2-shape2[1])) < 1.0e-08) {
+ qstar = floor(q)
+ temp = if(max(qstar) >= 0) dbetabin.ab(0:max(qstar),
+ size=size[1], shape1=shape1[1], shape2=shape2[1]) else 0*qstar
+ unq = unique(qstar)
+ for(i in unq) {
+ index = qstar == i
+ ans[index] = if(i >= 0) sum(temp[1:(1+i)]) else 0
+ }
+ } else
+ for(i in 1:N) {
+ qstar = floor(q[i])
+ ans[i] = if(qstar >= 0) sum(dbetabin.ab(x=0:qstar, size=size[i],
+ shape1=shape1[i], shape2=shape2[i])) else 0
+ }
+ if(log.p) log(ans) else ans
+}
+
+rbetabin.ab = function(n, size, shape1, shape2) {
+ if(!is.Numeric(n,integ=TRUE, allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(size, posit=TRUE, integer=TRUE))
+ stop("bad input for argument \"size\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ size = rep(size, len=n);
+ shape1 = rep(shape1, len=n); shape2 = rep(shape2, len=n);
+ rbinom(n=n, size=size, prob=rbeta(n=n, shape1=shape1, shape2=shape2))
+}
+
+
+dbetabin = function(x, size, prob, rho, log = FALSE) {
+ rbetabin.ab(x=x, size=size, shape1=prob*(1-rho)/rho,
+ shape2=(1-prob)*(1-rho)/rho, log=log)
+}
+
+pbetabin = function(q, size, prob, rho, log.p=FALSE) {
+ pbetabin.ab(q=q, size=size, shape1=prob*(1-rho)/rho,
+ shape2=(1-prob)*(1-rho)/rho, log.p=log.p)
+}
+
+rbetabin = function(n, size, prob, rho) {
+ rbetabin.ab(n=n, size=size, shape1=prob*(1-rho)/rho,
+ shape2=(1-prob)*(1-rho)/rho)
+}
+
+
+expected.betabin.ab = function(nvec, shape1, shape2, first) {
+
+ n = length(nvec)
+ ans = rep(0.0, len=n)
+ if(!is.R()) {
+ lchoose = function(a,b) log(choose(a,b))
+ lbeta = function(a,b) lgamma(a) + lgamma(b) - lgamma(a+b)
+ }
+ if(first) {
+ for(i in 1:n) {
+ temp639 = lbeta(shape1[i], shape2[i])
+ for(y in 0:nvec[i])
+ ans[i] = ans[i] + trigamma(shape1[i]+y) *
+ exp(lchoose(nvec[i], y) +
+ lbeta(shape1[i]+y, shape2[i]+nvec[i]-y) - temp639)
+ }
+ } else {
+ for(i in 1:n) {
+ temp639 = lbeta(shape1[i], shape2[i])
+ for(y in 0:nvec[i])
+ ans[i] = ans[i] + trigamma(nvec[i]+shape2[i]-y) *
+ exp(lchoose(nvec[i], y) +
+ lbeta(shape1[i]+y, shape2[i]+nvec[i]-y) - temp639)
+ }
+ }
+ ans
+}
+
+
+
+
+betabin.ab = function(link.shape12="loge", i1=1, i2=NULL, zero=NULL)
+{
+ if(mode(link.shape12) != "character" && mode(link.shape12) != "name")
+ link.shape12 = as.character(substitute(link.shape12))
+ if(!is.Numeric(i1, positive=TRUE)) stop("bad input for argument \"i1\"")
+ if(length(i2) && !is.Numeric(i2, pos=TRUE))
+ stop("bad input for argument \"i2\"")
+
+ new("vglmff",
+ blurb=c("Beta-binomial model\n",
+ "Links: ",
+ namesof("shape1", link.shape12), ", ",
+ namesof("shape2", link.shape12), "\n",
+ "Variance: mu*(1-mu)[1+(w-1)*rho]/w where mu=alpha/(alpha+beta)"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ # Compute initial values for mustart -------
+ eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
+ predictors.names = c(namesof("shape1", .link.shape12, tag=FALSE),
+ namesof("shape2", .link.shape12, short=FALSE))
+
+ if(!length(etastart)) {
+ shape1 = rep( .i1, len=n)
+ shape2 = if(length( .i2)) rep( .i2,len=n) else shape1*(1/mustart-1)
+ ycounts = y * w # Convert proportions to counts
+ if(max(abs(ycounts-round(ycounts))) > 1.0e-6)
+ stop("ycounts not integer")
+ ycounts = round(ycounts) # Make sure it is an integer
+ etastart = cbind(theta2eta(shape1, .link.shape12),
+ theta2eta(shape2, .link.shape12))
+ }
+ }), list( .link.shape12=link.shape12, .i1=i1 , .i2=i2 ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape1 = eta2theta(eta[,1], .link.shape12)
+ shape2 = eta2theta(eta[,2], .link.shape12)
+ shape1 / (shape1 + shape2)
+ }, list( .link.shape12=link.shape12 ))),
+ last=eval(substitute(expression({
+ misc$link = c("shape1" = .link.shape12, "shape2" = .link.shape12)
+ shape1 = eta2theta(eta[,1], .link.shape12)
+ shape2 = eta2theta(eta[,2], .link.shape12)
+ misc$rho = 1 / (shape1 + shape2 + 1)
+ misc$expected = TRUE
+ }), list( .link.shape12=link.shape12 ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta, extra=NULL) {
+ ycounts = y * w # Convert proportions to counts
+ shape1 = eta2theta(eta[,1], .link.shape12)
+ shape2 = eta2theta(eta[,2], .link.shape12)
+ nvec = w
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R()) sum(lbeta(shape1+ycounts, shape2+nvec-ycounts) -
+ lbeta(shape1, shape2)) else
+ sum(lgamma(shape1+ycounts) + lgamma(shape2+nvec-ycounts) -
+ lgamma(shape1+shape2+nvec) -
+ (lgamma(shape1) + lgamma(shape2) - lgamma(shape1+shape2)))
+ }
+ }, list( .link.shape12=link.shape12 ))),
+ vfamily=c("betabin.ab"),
+ deriv=eval(substitute(expression({
+ nvec = w # extra$nvec # for summary()
+ ycounts = y * w # Convert proportions to counts
+ shape1 = eta2theta(eta[,1], .link.shape12)
+ shape2 = eta2theta(eta[,2], .link.shape12)
+ dshape1.deta = dtheta.deta(shape1, .link.shape12)
+ dshape2.deta = dtheta.deta(shape2, .link.shape12)
+ dl.dshape1 = digamma(shape1+ycounts) - digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1+shape2)
+ dl.dshape2 = digamma(nvec+shape2-ycounts) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape2) + digamma(shape1+shape2)
+ cbind(dl.dshape1 * dshape1.deta, dl.dshape2 * dshape2.deta)
+ }), list( .link.shape12=link.shape12 ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2)) *
+ dshape1.deta^2
+ wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec, shape1, shape2, FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2)) *
+ dshape2.deta^2
+ wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
+ trigamma(shape1+shape2+nvec)) *
+ dshape1.deta * dshape2.deta
+ wz
+ }), list( .link.shape12=link.shape12 ))))
+}
+
+
+
+betageometric = function(lprob="logit", lshape="loge",
+ iprob = NULL, ishape = 0.1,
+ moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
+{
+ if(mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!is.Numeric(ishape, positive=TRUE))
+ stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(moreSummation, positive=TRUE, allow=2, integ=TRUE))
+ stop("bad input for argument \"moreSummation\"")
+ if(!is.Numeric(tolerance, positive=TRUE, allow=1) || 1.0-tolerance >= 1.0)
+ stop("bad input for argument \"tolerance\"")
+
+ new("vglmff",
+ blurb=c("Beta-geometric distribution\n",
+ "Links: ", namesof("prob", lprob), ", ",
+ namesof("shape", lshape)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ eval(geometric()@initialize)
+ predictors.names = c(namesof("prob", .lprob, tag=FALSE),
+ namesof("shape", .lshape, short=FALSE))
+ if(length( .iprob))
+ prob.init = rep( .iprob, len=n)
+ if(!length(etastart) || ncol(cbind(etastart)) != 2) {
+ shape.init = rep( .ishape, len=n)
+ etastart = cbind(theta2eta(prob.init, .lprob),
+ theta2eta(shape.init, .lshape))
+ }
+ }), list( .iprob=iprob, .ishape=ishape, .lprob=lprob, .lshape=lshape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ prob = eta2theta(eta[,1], .lprob)
+ shape = eta2theta(eta[,2], .lshape)
+ mymu = (1-prob) / (prob - shape)
+ ifelse(mymu >= 0, mymu, NA)
+ }, list( .lprob=lprob, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c("prob" = .lprob, "shape" = .lshape)
+ if(intercept.only) {
+ misc$shape1 = shape1[1] # These quantities computed in @deriv
+ misc$shape2 = shape2[1]
+ }
+ misc$expected = TRUE
+ misc$tolerance = .tolerance
+ misc$zero = .zero
+ misc$moreSummation = .moreSummation
+ }), list( .lprob=lprob, .lshape=lshape, .tolerance=tolerance,
+ .moreSummation=moreSummation, .zero=zero ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta, extra=NULL) {
+ prob = eta2theta(eta[,1], .lprob)
+ shape = eta2theta(eta[,2], .lshape)
+ ans = log(prob)
+ maxy = max(y)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ for(ii in 1:maxy) {
+ index = ii <= y
+ ans[index]=ans[index] + log(1-prob[index]+(ii-1)*shape[index])-
+ log(1+(ii-1)*shape[index])
+ }
+ ans = ans - log(1+(y+1-1)*shape)
+ sum(w * ans)
+ }
+ }, list( .lprob=lprob, .lshape=lshape ))),
+ vfamily=c("betageometric"),
+ deriv=eval(substitute(expression({
+ prob = eta2theta(eta[,1], .lprob)
+ shape = eta2theta(eta[,2], .lshape)
+ shape1 = prob / shape; shape2 = (1-prob) / shape;
+ dprob.deta = dtheta.deta(prob, .lprob)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ dl.dprob = 1 / prob
+ dl.dshape = 0 * y
+ maxy = max(y)
+ for(ii in 1:maxy) {
+ index = ii <= y
+ dl.dprob[index] = dl.dprob[index] -
+ 1/(1-prob[index]+(ii-1)*shape[index])
+ dl.dshape[index] = dl.dshape[index] +
+ (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
+ (ii-1)/(1+(ii-1)*shape[index])
+ }
+ dl.dshape = dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
+ w * cbind(dl.dprob * dprob.deta, dl.dshape * dshape.deta)
+ }), list( .lprob=lprob, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ wz = matrix(0, n, dimm(M)) #3=dimm(2)
+ wz[,iam(1,1,M)] = 1 / prob^2
+ moresum = .moreSummation
+ maxsummation = round(maxy * moresum[1] + moresum[2])
+ for(ii in 3:maxsummation) {
+ temp7 = 1 - pbetageom(q=ii-1-1, shape1=shape1, shape2=shape2)
+ denom1 = (1-prob+(ii-2)*shape)^2
+ denom2 = (1+(ii-2)*shape)^2
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 / denom1
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] - (ii-2) * temp7 / denom1
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + (ii-2)^2 * temp7 / denom1 -
+ (ii-1)^2 * temp7 / denom2
+ if(max(temp7) < .tolerance ) break;
+ }
+ ii = 2
+ temp7 = 1 - pbetageom(q=ii-1-1, shape1=shape1, shape2=shape2)
+ denom1 = (1-prob+(ii-2)*shape)^2
+ denom2 = (1+(ii-2)*shape)^2
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 / denom1
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - (ii-1)^2 * temp7 / denom2
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dprob.deta^2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dshape.deta^2
+ wz[,iam(2,1,M)] = wz[,iam(2,1,M)] * dprob.deta * dshape.deta
+ w * wz
+ }), list( .lprob=lprob, .lshape=lshape, .moreSummation=moreSummation,
+ .tolerance=tolerance ))))
+}
+
+
+
+
+
+
+
diff --git a/R/family.bivariate.q b/R/family.bivariate.q
new file mode 100644
index 0000000..2d8af1d
--- /dev/null
+++ b/R/family.bivariate.q
@@ -0,0 +1,615 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+bilogistic4.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+bilogistic4 = function(llocation="identity",
+ lscale="loge",
+ iloc1=NULL, iscale1=NULL,
+ iloc2=NULL, iscale2=NULL,
+ method.init=1, zero=NULL) {
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("method.init must be 1 or 2")
+
+ new("vglmff",
+ blurb=c("Bivariate logistic distribution\n\n",
+ "Link: ",
+ namesof("location1", llocation), ", ",
+ namesof("scale1", lscale), ", ",
+ namesof("location2", llocation), ", ",
+ namesof("scale2", lscale),
+ "\n", "\n",
+ "Means: location1, location2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ if(!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+ predictors.names = c(namesof("location1", .llocation, tag= FALSE),
+ namesof("scale1", .lscale, tag= FALSE),
+ namesof("location2", .llocation, tag= FALSE),
+ namesof("scale2", .lscale, tag= FALSE))
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init1 = y[,1]
+ scale.init1 = sqrt(3) * sd(y[,1]) / pi
+ location.init2 = y[,2]
+ scale.init2 = sqrt(3) * sd(y[,2]) / pi
+ } else {
+ location.init1 = median(rep(y[,1], w))
+ location.init2 = median(rep(y[,2], w))
+ scale.init1=sqrt(3)*sum(w*(y[,1]-location.init1)^2)/(sum(w)*pi)
+ scale.init2=sqrt(3)*sum(w*(y[,2]-location.init2)^2)/(sum(w)*pi)
+ }
+ loc1.init = if(length(.iloc1)) rep(.iloc1, len=n) else
+ rep(location.init1, len=n)
+ loc2.init = if(length(.iloc2)) rep(.iloc2, len=n) else
+ rep(location.init2, len=n)
+ scale1.init = if(length(.iscale1)) rep(.iscale1, len=n) else
+ rep(1, len=n)
+ scale2.init = if(length(.iscale2)) rep(.iscale2, len=n) else
+ rep(1, len=n)
+ if(.llocation=="loge") location.init1 = abs(location.init1) + 0.001
+ if(.llocation=="loge") location.init2 = abs(location.init2) + 0.001
+ etastart = cbind(theta2eta(location.init1, .llocation),
+ theta2eta(scale1.init, .lscale),
+ theta2eta(location.init2, .llocation),
+ theta2eta(scale2.init, .lscale))
+ }
+ }), list(.method.init=method.init, .iloc1=iloc1, .iloc2=iloc2,
+ .llocation=llocation,
+ .iscale1=iscale1, .iscale2=iscale2, .lscale=lscale))),
+ inverse=function(eta, extra=NULL) {
+ cbind(eta[,1], eta[,2])
+ },
+ last=eval(substitute(expression({
+ misc$link = c(location1= .llocation, scale1= .lscale,
+ location2= .llocation, scale2= .lscale)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list(.lscale=lscale, .llocation=llocation))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ loc1 = eta2theta(eta[,1], .llocation)
+ Scale1 = eta2theta(eta[,2], .lscale)
+ loc2 = eta2theta(eta[,3], .llocation)
+ Scale2 = eta2theta(eta[,4], .lscale)
+ zedd1 = (y[,1]-loc1) / Scale1
+ zedd2 = (y[,2]-loc2) / Scale2
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-zedd1 - zedd2 - 3 * log(1+exp(-zedd1)+exp(-zedd2)) -
+ log(Scale1) - log(Scale2)))
+ }, list(.lscale=lscale, .llocation=llocation))),
+ vfamily=c("bilogistic4"),
+ deriv=eval(substitute(expression({
+ loc1 = eta2theta(eta[,1], .llocation)
+ Scale1 = eta2theta(eta[,2], .lscale)
+ loc2 = eta2theta(eta[,3], .llocation)
+ Scale2 = eta2theta(eta[,4], .lscale)
+ zedd1 = (y[,1]-loc1) / Scale1
+ zedd2 = (y[,2]-loc2) / Scale2
+ ezedd1 = exp(-zedd1)
+ ezedd2 = exp(-zedd2)
+ denom = 1 + ezedd1 + ezedd2
+ dl.dloc1 = (1 - 3 * ezedd1 / denom) / Scale1
+ dl.dloc2 = (1 - 3 * ezedd2 / denom) / Scale2
+ dl.dscale1 = (zedd1 - 1 - 3 * ezedd1 * zedd1 / denom) / Scale1
+ dl.dscale2 = (zedd2 - 1 - 3 * ezedd2 * zedd2 / denom) / Scale2
+ dloc1.deta = dtheta.deta(loc1, .llocation)
+ dloc2.deta = dtheta.deta(loc2, .llocation)
+ dscale1.deta = dtheta.deta(Scale1, .lscale)
+ dscale2.deta = dtheta.deta(Scale2, .lscale)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dloc1 * dloc1.deta,
+ dl.dscale1 * dscale1.deta,
+ dl.dloc2 * dloc2.deta,
+ dl.dscale2 * dscale2.deta)
+ derivnew
+ }), list(.lscale=lscale, .llocation=llocation))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list(.lscale=lscale, .llocation=llocation))))
+}
+
+
+
+rbilogis4 = function(n, loc1=0, scale1=1, loc2=0, scale2=1) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
+ if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
+ if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
+ y1 = rlogis(n, loc=loc1, scale=scale1)
+ ezedd1 = exp(-(y1-loc1)/scale1)
+ y2 = loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
+ cbind(y1, y2)
+}
+
+pbilogis4 = function(q1, q2, loc1=0, scale1=1, loc2=0, scale2=1) {
+ if(!is.Numeric(q1)) stop("bad input for \"q1\"")
+ if(!is.Numeric(q2)) stop("bad input for \"q2\"")
+ if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
+ if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
+
+
+ 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
+}
+
+dbilogis4 = function(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1) {
+ if(!is.Numeric(x1)) stop("bad input for \"x1\"")
+ if(!is.Numeric(x2)) stop("bad input for \"x2\"")
+ if(!is.Numeric(scale1, posit=TRUE)) stop("bad input for \"scale1\"")
+ if(!is.Numeric(scale2, posit=TRUE)) stop("bad input for \"scale2\"")
+ ezedd1 = exp(-(x1-loc1)/scale1)
+ ezedd2 = exp(-(x2-loc2)/scale2)
+ 2 * ezedd1 * ezedd2 / (scale1 * scale2 * (1 + ezedd1 + ezedd2)^3)
+}
+
+
+
+
+freund61 = function(la="loge",
+ lap="loge",
+ lb="loge",
+ lbp="loge",
+ ia=NULL, iap=NULL, ib=NULL, ibp=NULL,
+ independent=FALSE,
+ zero=NULL) {
+ if(mode(la) != "character" && mode(la) != "name")
+ la = as.character(substitute(la))
+ if(mode(lap) != "character" && mode(lap) != "name")
+ lap = as.character(substitute(lap))
+ if(mode(lb) != "character" && mode(lb) != "name")
+ lb = as.character(substitute(lb))
+ if(mode(lbp) != "character" && mode(lbp) != "name")
+ lbp = as.character(substitute(lbp))
+
+ new("vglmff",
+ blurb=c("Freund (1961) Bivariate Exponential Distribution\n",
+ "Links: ",
+ namesof("a", la), ", ",
+ namesof("ap", lap), ", ",
+ namesof("b", lb), ", ",
+ namesof("bp", lbp)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,0,0, 0,0,1,1),M,2), x,
+ .independent, constraints, intercept.apply=TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.independent=independent, .zero=zero))),
+ initialize=eval(substitute(expression({
+ if(!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+ predictors.names = c(namesof("a", .la, short=TRUE),
+ namesof("ap", .lap, short=TRUE),
+ namesof("b", .lb, short=TRUE),
+ namesof("bp", .lbp, short=TRUE))
+ extra$y1.lt.y2 = y[,1] < y[,2]
+ if(!(arr <- sum(extra$y1.lt.y2)) || arr==n)
+ stop("identifiability problem: either all y1<y2 or y2<y1")
+ if(!length(etastart)) {
+ sumx = sum(y[extra$y1.lt.y2,1]); sumxp = sum(y[!extra$y1.lt.y2,1])
+ sumy = sum(y[extra$y1.lt.y2,2]); sumyp = sum(y[!extra$y1.lt.y2,2])
+ if(FALSE) { # Noise:
+ arr = min(arr + n/10, n*0.95)
+ sumx = sumx * 1.1; sumxp = sumxp * 1.2;
+ sumy = sumy * 1.2; sumyp = sumyp * 1.3;
+ }
+ ainit = if(length(.ia)) rep(.ia, len=n) else arr / (sumx + sumyp)
+ apinit = if(length(.iap)) rep(.iap,len=n) else (n-arr)/(sumxp-sumyp)
+ binit = if(length(.ib)) rep(.ib, len=n) else (n-arr)/(sumx +sumyp)
+ bpinit = if(length(.ib)) rep(.ibp,len=n) else arr / (sumy - sumx)
+ etastart = cbind(theta2eta(rep(ainit, len=n), .la),
+ theta2eta(rep(apinit, len=n), .lap),
+ theta2eta(rep(binit, len=n), .lb),
+ theta2eta(rep(bpinit, len=n), .lbp))
+ }
+ }), list(.la=la, .lap=lap, .lb=lb, .lbp=lbp, .ia=ia, .iap=iap,
+ .ib=ib, .ibp=ibp))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .la)
+ alphap = eta2theta(eta[,2], .lap)
+ beta = eta2theta(eta[,3], .lb)
+ betap = eta2theta(eta[,4], .lbp)
+ cbind((alphap+beta) / (alphap*(alpha+beta)),
+ (alpha+betap) / (betap*(alpha+beta)))
+ }, list(.la=la, .lap=lap, .lb=lb, .lbp=lbp))),
+ last=eval(substitute(expression({
+ misc$link = c("a"= .la, "ap"= .lap, "b"= .lb, "bp"= .lbp)
+ }), list(.la=la, .lap=lap, .lb=lb, .lbp=lbp))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .la)
+ alphap = eta2theta(eta[,2], .lap)
+ beta = eta2theta(eta[,3], .lb)
+ betap = eta2theta(eta[,4], .lbp)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ tmp88 = extra$y1.lt.y2
+ ell1 = log(alpha[tmp88]) + log(betap[tmp88]) -
+ betap[tmp88] * y[tmp88,2] -
+ (alpha+beta-betap)[tmp88] * y[tmp88,1]
+ ell2 = log(beta[!tmp88]) + log(alphap[!tmp88]) -
+ alphap[!tmp88] * y[!tmp88,1] -
+ (alpha+beta-alphap)[!tmp88] * y[!tmp88,2]
+ sum(w[tmp88] * ell1) + sum(w[!tmp88] * ell2) }
+ }, list(.la=la, .lap=lap, .lb=lb, .lbp=lbp))),
+ vfamily=c("freund61"),
+ deriv=eval(substitute(expression({
+ tmp88 = extra$y1.lt.y2
+ alpha = eta2theta(eta[,1], .la)
+ alphap = eta2theta(eta[,2], .lap)
+ beta = eta2theta(eta[,3], .lb)
+ betap = eta2theta(eta[,4], .lbp)
+ d1 = 1/alpha - y[,1]
+ d1[!tmp88] = -y[!tmp88,2]
+ d2 = 0 * alphap
+ d2[!tmp88] = 1/alphap[!tmp88] - y[!tmp88,1] + y[!tmp88,2]
+ d3 = -y[,1]
+ d3[!tmp88] = 1/beta[!tmp88] - y[!tmp88,2]
+ d4 = 1/betap - y[,2] + y[,1]
+ d4[!tmp88] = 0
+ w * cbind(d1 * dtheta.deta(alpha, .la),
+ d2 * dtheta.deta(alphap, .lap),
+ d3 * dtheta.deta(beta, .lb),
+ d4 * dtheta.deta(betap, .lbp))
+ }), list(.la=la, .lap=lap, .lb=lb, .lbp=lbp))),
+ weight=eval(substitute(expression({
+ py1.lt.y2 = alpha / (alpha+beta)
+ d11 = py1.lt.y2 / alpha^2
+ d22 = (1-py1.lt.y2) / alphap^2
+ d33 = (1-py1.lt.y2) / beta^2
+ d44 = py1.lt.y2 / betap^2
+ wz = matrix(0, n, M) # diagonal
+ wz[,iam(1,1,M)] = dtheta.deta(alpha, .la)^2 * d11
+ wz[,iam(2,2,M)] = dtheta.deta(alphap, .lap)^2 * d22
+ wz[,iam(3,3,M)] = dtheta.deta(beta, .lb)^2 * d33
+ wz[,iam(4,4,M)] = dtheta.deta(betap, .lbp)^2 * d44
+ w * wz
+ }), list(.la=la, .lap=lap, .lb=lb, .lbp=lbp))))
+}
+
+
+
+
+
+mckaygamma2 = function(la="loge",
+ lp="loge",
+ lq="loge",
+ ia=NULL,
+ ip=1,
+ iq=1,
+ zero=NULL) {
+ if(mode(la) != "character" && mode(la) != "name")
+ la = as.character(substitute(la))
+ if(mode(lp) != "character" && mode(lp) != "name")
+ lp = as.character(substitute(lp))
+ if(mode(lq) != "character" && mode(lq) != "name")
+ lq = as.character(substitute(lq))
+ if(!is.Numeric(ip, positive = TRUE) || !is.Numeric(iq, positive = TRUE))
+ stop("initial values for ip and iq must be positive")
+ if(is.Numeric(ia) && any(ia <= 0))
+ stop("ia must be positive or NULL")
+
+ new("vglmff",
+ blurb=c("McKay's Bivariate Gamma Distribution\n",
+ "Links: ",
+ namesof("a", la), ", ",
+ namesof("p", lp), ", ",
+ namesof("q", lq)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ if(!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+ sorty1 = pmin(y[,1], y[,2])
+ sorty2 = pmax(y[,1], y[,2])
+ if(any(sorty2-sorty1 <= 0))
+ stop("Delete those observations that are identical")
+ predictors.names = c(namesof("a", .la, short=TRUE),
+ namesof("p", .lp, short=TRUE),
+ namesof("q", .lq, short=TRUE))
+ if(!length(etastart)) {
+ pinit = rep(.ip, len=n)
+ qinit = rep(.iq, len=n)
+ # Computing ainit from pinit and qinit is easy
+ ainit = if(length(.ia)) rep(.ia, len=n) else (pinit+qinit)/(sorty1+0.1)
+ etastart = cbind(theta2eta(ainit, .la),
+ theta2eta(pinit, .lp),
+ theta2eta(qinit, .lq))
+ }
+ }), list(.la=la, .lp=lp, .lq=lq, .ia=ia, .ip=ip, .iq=iq))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ a = eta2theta(eta[,1], .la)
+ p = eta2theta(eta[,2], .lp)
+ q = eta2theta(eta[,3], .lq)
+ cbind("pmin(y1,y2)"=(p+q)/a, "pmax(y1,y2)"=NA)
+ }, list(.la=la, .lp=lp, .lq=lq))),
+ last=eval(substitute(expression({
+ misc$link = c("a"= .la, "p"= .lp, "q"= .lq)
+ }), list(.la=la, .lp=lp, .lq=lq))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ a = eta2theta(eta[,1], .la)
+ p = eta2theta(eta[,2], .lp)
+ q = eta2theta(eta[,3], .lq)
+ y = cbind(pmin(y[,1], y[,2]), pmax(y[,1], y[,2])) # Sort so y[,1]<y[,2]
+ # Note that, after sorting, y[,1] < y[,2] is needed:
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((p+q)*log(a) - lgamma(p) - lgamma(q) +
+ (p-1)*log(y[,1]) + (q-1)*log(y[,2]-y[,1]) - a*y[,2] ))
+ }, list(.la=la, .lp=lp, .lq=lq))),
+ vfamily=c("mckaygamma2"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta[,1], .la)
+ p = eta2theta(eta[,2], .lp)
+ q = eta2theta(eta[,3], .lq)
+ sorty = y
+ sorty[,1] = pmin(y[,1], y[,2])
+ sorty[,2] = pmax(y[,1], y[,2])
+ d1 = (p+q)/a - sorty[,2]
+ d2 = log(a) - digamma(p) + log(sorty[,1])
+ d3 = log(a) - digamma(q) + log(sorty[,2]-sorty[,1])
+ w * cbind(d1 * dtheta.deta(a, .la),
+ d2 * dtheta.deta(p, .lp),
+ d3 * dtheta.deta(q, .lq))
+ }), list(.la=la, .lp=lp, .lq=lq))),
+ weight=eval(substitute(expression({
+ d11 = (p+q)/a^2
+ d22 = trigamma(p)
+ d33 = trigamma(q)
+ d12 = -1/a
+ d13 = -1/a
+ d23 = 0
+ wz = matrix(0, n, dimm(M))
+ wz[,iam(1,1,M)] = dtheta.deta(a, .la)^2 * d11
+ wz[,iam(2,2,M)] = dtheta.deta(p, .lp)^2 * d22
+ wz[,iam(3,3,M)] = dtheta.deta(q, .lq)^2 * d33
+ wz[,iam(1,2,M)] = dtheta.deta(a, .la) * dtheta.deta(p, .lp) * d12
+ wz[,iam(1,3,M)] = dtheta.deta(a, .la) * dtheta.deta(q, .lq) * d13
+ wz[,iam(2,3,M)] = dtheta.deta(p, .lp) * dtheta.deta(q, .lq) * d23
+ w * wz
+ }), list(.la=la, .lp=lp, .lq=lq))))
+}
+
+
+
+rfrank = function(n, alpha) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE)) stop("bad input for n")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+ alpha = rep(alpha, len=n)
+ U = runif(n)
+ V = runif(n)
+ T = alpha^U + (alpha - alpha^U) * V
+ X = U
+ index = abs(alpha-1) < .Machine$double.eps
+ Y = U
+ if(any(!index))
+ Y[!index] = logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
+ base=alpha[!index])
+ ans = matrix(c(X,Y), nrow=n, ncol=2) # Want to suppress column names
+ if(any(index)) {
+ ans[index,1] = runif(sum(index)) # Uniform density for alpha==1
+ ans[index,2] = runif(sum(index))
+ }
+ ans
+}
+
+pfrank = function(q1, q2, alpha) {
+ if(!is.Numeric(q1)) stop("bad input for \"q1\"")
+ if(!is.Numeric(q2)) stop("bad input for \"q2\"")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+
+ L = max(length(q1), length(q2), length(alpha))
+ alpha = rep(alpha, len=L)
+ q1 = rep(q1, len=L)
+ q2 = rep(q2, len=L)
+
+ x=q1; y=q2
+ index = (x>=1 & y<1) | (y>=1 & x<1) | (x<=0 | y<=0) | (x>=1 & y>=1) |
+ (abs(alpha-1) < .Machine$double.eps)
+ ans = as.numeric(index)
+ if(any(!index))
+ ans[!index] = logb(1 + ((alpha[!index])^(x[!index])-1)*
+ ((alpha[!index])^(y[!index])-1)/(alpha[!index]-1),
+ base=alpha[!index])
+ ind2 = (abs(alpha-1) < .Machine$double.eps)
+ ans[ind2] = x[ind2] * y[ind2]
+ ans[x>=1 & y<1] = y[x>=1 & y<1] # P(Y2 < q2) = q2
+ ans[y>=1 & x<1] = x[y>=1 & x<1] # P(Y1 < q1) = q1
+ ans[x<=0 | y<=0] = 0
+ ans[x>=1 & y>=1] = 1
+ ans
+}
+
+dfrank = function(x1, x2, alpha) {
+ if(!is.Numeric(x1)) stop("bad input for \"x1\"")
+ if(!is.Numeric(x2)) stop("bad input for \"x2\"")
+ if(!is.Numeric(alpha, posit=TRUE)) stop("bad input for \"alpha\"")
+
+ L = max(length(x1), length(x2), length(alpha))
+ alpha = rep(alpha, len=L)
+ x1 = rep(x1, len=L)
+ x2 = rep(x2, len=L)
+
+ temp = (alpha-1) + (alpha^x1 - 1) * (alpha^x2 - 1)
+ index = (abs(alpha-1) < .Machine$double.eps)
+ ans = x1
+ if(any(!index))
+ ans[!index] = (alpha[!index]-1) * log(alpha[!index]) *
+ (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
+ ans[x1<=0 | x2<=0 | x1>=1 | x2>=1] = 0
+ ans[index] = 1
+ ans
+}
+
+
+
+
+frank = function(lcorp="loge", icorp=2) {
+ if(mode(lcorp) != "character" && mode(lcorp) != "name")
+ lcorp = as.character(substitute(lcorp))
+ if(!is.Numeric(icorp, positive = TRUE))
+ stop("\"icorp\" must be positive")
+
+ new("vglmff",
+ blurb=c("Frank's Bivariate Distribution\n",
+ "Links: ",
+ namesof("corp", lcorp)),
+ initialize=eval(substitute(expression({
+ if(!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+ if(any(y <= 0) || any(y >= 1))
+ stop("the response must have values between 0 and 1")
+ predictors.names = c(namesof("corp", .lcorp, short=TRUE))
+ if(!length(etastart)) {
+ corp.init = rep(.icorp, len=n)
+ etastart = cbind(theta2eta(corp.init, .lcorp))
+ }
+ }), list(.lcorp=lcorp, .icorp=icorp))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ corp = eta2theta(eta, .lcorp)
+ cbind(rep(0.5, len=length(eta)), rep(0.5, len=length(eta)))
+ }, list(.lcorp=lcorp))),
+ last=eval(substitute(expression({
+ misc$link = c("corp"= .lcorp)
+ misc$pooled.weight = pooled.weight
+ }), list(.lcorp=lcorp))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ corp = eta2theta(eta, .lcorp)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ denom = corp-1 + (corp^y[,1] -1) * (corp^y[,2] -1)
+ denom = abs(denom) # Needed; Genest (1987) uses this too, eqn (4.1)
+ sum(w * (log((corp-1) * log(corp)) + (y[,1]+y[,2])*log(corp) -
+ 2 * log(denom)))
+ }
+ }, list(.lcorp=lcorp))),
+ vfamily=c("frank"),
+ deriv=eval(substitute(expression({
+ corp = eta2theta(eta, .lcorp)
+ denom = corp-1 + (corp^y[,1] -1) * (corp^y[,2] -1)
+ tmp700 = 2*corp^(y[,1]+y[,2]) - corp^y[,1] - corp^y[,2]
+ numerator = 1 + y[,1] * corp^(y[,1]-1) * (corp^y[,2] -1) +
+ y[,2] * corp^(y[,2]-1) * (corp^y[,1] -1)
+ Dl.dcorp = 1/(corp-1) + 1/(corp*log(corp)) + (y[,1]+y[,2])/corp -
+ 2 * numerator / denom
+ dcorp.deta = dtheta.deta(corp, .lcorp)
+
+ w * Dl.dcorp * dcorp.deta
+ }), list(.lcorp=lcorp))),
+ weight=eval(substitute(expression({
+ nump = corp^(y[,1]+y[,2]-2) * (2 * y[,1] * y[,2] +
+ y[,1]*(y[,1]-1) + y[,2]*(y[,2]-1)) -
+ y[,1]*(y[,1]-1) * corp^(y[,1]-2) -
+ y[,2]*(y[,2]-1) * corp^(y[,2]-2)
+ D2l.dcorp2 = 1/(corp-1)^2 + (1+log(corp))/(corp*log(corp))^2 +
+ (y[,1]+y[,2])/corp^2 + 2 *
+ (nump / denom - (numerator/denom)^2)
+ d2corp.deta2 = d2theta.deta2(corp, .lcorp)
+ wz = w * (dcorp.deta^2 * D2l.dcorp2 - Dl.dcorp * d2corp.deta2)
+
+ if(TRUE && intercept.only) {
+ wz = cbind(wz)
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list(.lcorp=lcorp))))
+}
+
+
+
+gammahyp = function(ltheta="loge", itheta=NULL, expected=FALSE) {
+ if(mode(ltheta) != "character" && mode(ltheta) != "name")
+ ltheta = as.character(substitute(ltheta))
+ if(!is.logical(expected) || length(expected)!=1)
+ stop("\"expected\" must be a single logical")
+
+ new("vglmff",
+ blurb=c("Gamma Hyperbola Bivariate Distribution\n",
+ "Links: ",
+ namesof("theta", ltheta)),
+ initialize=eval(substitute(expression({
+ if(!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+ if(any(y[,1] <= 0) || any(y[,2] <= 1))
+ stop("the response has values that are out of range")
+ predictors.names = c(namesof("theta", .ltheta, short=TRUE))
+ if(!length(etastart)) {
+ theta.init = if(length( .itheta)) rep(.itheta, len=n) else {
+ 1 / (y[,2] - 1 + 0.01)
+ }
+ etastart = cbind(theta2eta(theta.init, .ltheta))
+ }
+ }), list(.ltheta=ltheta, .itheta=itheta))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ theta = eta2theta(eta, .ltheta)
+ cbind(theta*exp(theta), 1+1/theta)
+ }, list(.ltheta=ltheta))),
+ last=eval(substitute(expression({
+ misc$link = c("theta"= .ltheta)
+ misc$expected = .expected
+ }), list(.ltheta=ltheta, .expected=expected))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ theta = eta2theta(eta, .ltheta)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ sum(w * (-exp(-theta)*y[,1]/theta - theta*y[,2]))
+ }
+ }, list(.ltheta=ltheta))),
+ vfamily=c("gammahyp"),
+ deriv=eval(substitute(expression({
+ theta = eta2theta(eta, .ltheta)
+ Dl.dtheta = exp(-theta) * y[,1] * (1+theta) / theta^2 - y[,2]
+ Dtheta.deta = dtheta.deta(theta, .ltheta)
+ w * Dl.dtheta * Dtheta.deta
+ }), list(.ltheta=ltheta))),
+ weight=eval(substitute(expression({
+ temp300 = 2 + theta * (2 + theta)
+ if( .expected) {
+ D2l.dtheta2 = temp300 / theta^2
+ wz = w * Dtheta.deta^2 * D2l.dtheta2
+ } else {
+ D2l.dtheta2 = temp300 * y[,1] * exp(-theta) / theta^3
+ D2theta.deta2 = d2theta.deta2(theta, .ltheta)
+ wz = w * (Dtheta.deta^2 * D2l.dtheta2 - Dl.dtheta * D2theta.deta2)
+ }
+ wz
+ }), list( .expected=expected, .ltheta=ltheta))))
+}
+
+
+
+
+
diff --git a/R/family.categorical.q b/R/family.categorical.q
new file mode 100644
index 0000000..5e2c4bd
--- /dev/null
+++ b/R/family.categorical.q
@@ -0,0 +1,1107 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+process.categorical.data.vgam = expression({
+
+
+ if(!is.matrix(y)) {
+ yf = as.factor(y)
+ lev = levels(yf)
+ llev = length(lev)
+ nn = length(yf)
+ y = matrix(0, nn, llev)
+ y[cbind(1:nn,as.vector(unclass(yf)))] = 1
+ dimnames(y) = list(names(yf), lev)
+
+ if(llev <= 1)
+ stop("the response matrix does not have 2 or more columns")
+ } else
+ nn = nrow(y)
+
+ nvec = drop(y %*% rep(1,ncol(y)))
+
+ if(!exists("delete.zero.colns") ||
+ (exists("delete.zero.colns") && delete.zero.colns)) {
+ sumy2 = as.vector(t(y) %*% rep(1,nrow(y)))
+ if(any(index <- sumy2==0))
+ {
+ y = y[,!index,drop = FALSE]
+ sumy2 = sumy2[!index]
+ if(all(index) || ncol(y)<=1) stop("y matrix has 0 or 1 columns")
+ warning(paste("Deleted", sum(!index),
+ "columns of the response matrix (zero counts)"))
+ }
+ }
+
+ if(any(miss <- nvec==0))
+ {
+ warning(paste(sm <- sum(miss),
+ "observation(s) deleted (zero counts)"))
+ x = x[!miss,,drop = FALSE]
+ y = y[!miss,,drop = FALSE]
+ nvec = nvec[!miss]
+ nn = nn - sm
+ }
+
+ w = w * nvec
+ nvec[nvec == 0] = 1
+ y = y / nvec # Convert to proportions
+
+ if(!length(mustart)) {
+ mustart = (nvec * y + 1/ncol(y)) / (nvec+1) # This may be better
+ mustart = y + (1/ncol(y) - y)/nvec # This may be wrong
+ }
+})
+
+
+
+
+Deviance.categorical.data.vgam <-
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+{
+
+
+
+ if(ncol(y) == 1 || ncol(mu) == 1)
+ stop("y and mu must have at least 2 columns")
+
+ double.eps = .Machine$double.eps # ^0.75
+ devy = y
+ nz = y != 0
+ devy[nz] = y[nz] * log(y[nz])
+ devmu = y * log(mu)
+ if(any(small <- mu * (1 - mu) < double.eps)) {
+ warning("fitted values close to 0 or 1")
+ smu = mu[small]
+ sy = y[small]
+ smu = ifelse(smu < double.eps, double.eps, smu)
+ devmu[small] = sy * log(smu)
+ }
+ devi = 2 * (devy - devmu)
+
+ if(residuals) {
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ if(M > 1)
+ return(NULL)
+ devi = devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
+ return(c(sign(y[,1] - mu[,1]) * sqrt(abs(devi) * w)))
+ } else
+ sum(w * devi)
+}
+
+
+
+
+
+
+
+
+sratio = function(link="logit", parallel=FALSE, reverse=FALSE, zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Stopping Ratio model\n\n",
+ "Links: ",
+ namesof(if(reverse) "P[Y=j+1|Y<=j+1]" else "P[Y=j|Y>=j]", link),
+ "\n",
+ "Variance: mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance=Deviance.categorical.data.vgam,
+ initialize=eval(substitute(expression({
+ delete.zero.colns = TRUE
+ eval(process.categorical.data.vgam)
+ M = ncol(y) - 1
+ mynames = if( .reverse)
+ paste("P[Y=",2:(M+1),"|Y<=",2:(M+1),"]", sep="") else
+ paste("P[Y=",1:M,"|Y>=",1:M,"]", sep="")
+ predictors.names = namesof(mynames, .link, short=TRUE)
+ y.names = paste("mu", 1:(M+1), sep="")
+ extra = if( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ }), list( .link=link, .reverse=reverse ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ if(!is.matrix(eta))
+ eta = as.matrix(eta)
+ if( .reverse ) {
+ M = ncol(eta)
+ djr = eta2theta(eta, .link)
+ temp = tapplymat1(1-djr[,M:1], "cumprod")[,M:1]
+ cbind(1,djr) * cbind(temp,1)
+ } else {
+ dj = eta2theta(eta, .link)
+ temp = tapplymat1(1-dj, "cumprod")
+ cbind(dj,1) * cbind(1, temp)
+ }
+ }, list( .link=link, .reverse=reverse) )),
+ last=eval(substitute(expression({
+ misc$link = rep( .link, length=M)
+ names(misc$link) = mynames
+ misc$parameters = mynames
+ misc$reverse = .reverse
+ extra = list() # kill what was used
+ }), list( .link=link, .reverse=reverse ))),
+ link=eval(substitute( function(mu, extra=NULL) {
+ cump = tapplymat1(mu, "cumsum")
+ if( .reverse ) {
+ djr = mu[,-1] / cump[,-1]
+ theta2eta(djr, .link)
+ } else {
+ M = ncol(mu) - 1
+ dj = if(M==1) mu[,1] else mu[,1:M]/(1-cbind(0,cump[,1:(M-1)]))
+ theta2eta(dj, .link)
+ }
+ }, list( .link=link, .reverse=reverse) )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("sratio", "vcategorical"),
+ deriv=eval(substitute(expression({
+ if(!length(extra)) {
+ extra = if( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ }
+ if( .reverse ) {
+ djr = eta2theta(eta, .link)
+ Mp1 = ncol(extra)
+ w * (y[,-1]/djr - extra[,-Mp1]/(1-djr)) *
+ dtheta.deta(djr, .link)
+ } else {
+ dj = eta2theta(eta, .link)
+ w * (y[,-ncol(y)]/dj - extra[,-1]/(1-dj)) *
+ dtheta.deta(dj, .link)
+ }
+ }), list( .link=link, .reverse=reverse) )),
+ weight= eval(substitute(expression({
+ if( .reverse ) {
+ cump = tapplymat1(mu, "cumsum")
+ ddjr.deta = dtheta.deta(djr, .link)
+ wz = w * ddjr.deta^2 * (mu[,-1]/djr^2 + cump[,1:M]/(1-djr)^2)
+ } else {
+ ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[,ncol(mu):1]
+ ddj.deta = dtheta.deta(dj, .link)
+ wz = w * ddj.deta^2 * (mu[,1:M]/dj^2 + ccump[,-1]/(1-dj)^2)
+ }
+
+ wz
+ }), list( .link=link, .reverse=reverse ))))
+}
+
+
+
+
+cratio = function(link="logit", parallel=FALSE, reverse=FALSE, zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Continuation Ratio model\n\n",
+ "Links: ",
+ namesof(if(reverse) "P[Y<j+1|Y<=j+1]" else "P[Y>j|Y>=j]", link),
+ "\n",
+ "Variance: mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance=Deviance.categorical.data.vgam,
+ initialize=eval(substitute(expression({
+ delete.zero.colns = TRUE
+ eval(process.categorical.data.vgam)
+ M = ncol(y) - 1
+ mynames = if( .reverse )
+ paste("P[Y<",2:(M+1),"|Y<=",2:(M+1),"]", sep="") else
+ paste("P[Y>",1:M,"|Y>=",1:M,"]", sep="")
+ predictors.names = namesof(mynames, .link, short=TRUE)
+ y.names = paste("mu", 1:(M+1), sep="")
+ extra = if( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ }), list( .link=link, .reverse=reverse ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ if(!is.matrix(eta))
+ eta = as.matrix(eta)
+ if( .reverse ) {
+ M = ncol(eta)
+ djrs = eta2theta(eta, .link)
+ temp = tapplymat1(djrs[,M:1], "cumprod")[,M:1]
+ cbind(1,1-djrs) * cbind(temp,1)
+ } else {
+ djs = eta2theta(eta, .link)
+ temp = tapplymat1(djs, "cumprod")
+ cbind(1-djs,1) * cbind(1, temp)
+ }
+ }, list( .link=link, .reverse=reverse) )),
+ last=eval(substitute(expression({
+ misc$link = rep( .link, length=M)
+ names(misc$link) = mynames
+ misc$parameters = mynames
+ misc$reverse = .reverse
+ extra = list() # kill what was used
+ }), list( .link=link, .reverse=reverse ))),
+ link=eval(substitute( function(mu, extra=NULL) {
+ cump = tapplymat1(mu, "cumsum")
+ if( .reverse ) {
+ djrs = 1 - mu[,-1] / cump[,-1]
+ theta2eta(djrs, .link)
+ } else {
+ M = ncol(mu) - 1
+ djs = if(M==1) 1-mu[,1] else 1-mu[,1:M]/(1-cbind(0,cump[,1:(M-1)]))
+ theta2eta(djs, .link)
+ }
+ }, list( .link=link, .reverse=reverse) )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("cratio", "vcategorical"),
+ deriv=eval(substitute(expression({
+ if(!length(extra)) {
+ extra = if( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ }
+ if( .reverse ) {
+ djrs = eta2theta(eta, .link)
+ Mp1 = ncol(extra)
+ -w * (y[,-1]/(1-djrs) - extra[,-Mp1]/djrs) *
+ dtheta.deta(djrs, .link)
+ } else {
+ djs = eta2theta(eta, .link)
+ -w * (y[,-ncol(y)]/(1-djs) - extra[,-1]/djs) *
+ dtheta.deta(djs, .link)
+ }
+ }), list( .link=link, .reverse=reverse) )),
+ weight= eval(substitute(expression({
+ if( .reverse ) {
+ cump = tapplymat1(mu, "cumsum")
+ ddjrs.deta = dtheta.deta(djrs, .link)
+ wz = w * ddjrs.deta^2 * (mu[,-1]/(1-djrs)^2 + cump[,1:M]/djrs^2)
+ } else {
+ ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[,ncol(mu):1]
+ ddjs.deta = dtheta.deta(djs, .link)
+ wz = w * ddjs.deta^2 * (mu[,1:M]/(1-djs)^2 + ccump[,-1]/djs^2)
+ }
+
+ wz
+ }), list( .link=link, .reverse=reverse ))))
+}
+
+
+
+
+vglm.multinomial.deviance.control = function(maxit=21, panic=FALSE, ...)
+{
+ if(maxit < 1) {
+ warning("bad value of maxit; using 21 instead")
+ maxit = 21
+ }
+ list(maxit=maxit, panic=as.logical(panic)[1])
+}
+
+vglm.multinomial.control = function(maxit=21, panic=FALSE,
+ criterion=c("aic1", "aic2", names( .min.criterion.VGAM )), ...)
+{
+
+ if(mode(criterion) != "character" && mode(criterion) != "name")
+ criterion = as.character(substitute(criterion))
+ criterion = match.arg(criterion,
+ c("aic1", "aic2", names( .min.criterion.VGAM )))[1]
+
+ if(maxit < 1) {
+ warning("bad value of maxit; using 21 instead")
+ maxit = 21
+ }
+ list(maxit=maxit, panic=as.logical(panic)[1],
+ criterion=criterion,
+ min.criterion=c("aic1"=FALSE, "aic2"=TRUE, .min.criterion.VGAM))
+}
+
+
+vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic=TRUE, ...)
+{
+ if(maxit < 1) {
+ warning("bad value of maxit; using 200 instead")
+ maxit = 200
+ }
+ list(maxit=maxit, trace=as.logical(trace)[1], panic=as.logical(panic)[1])
+}
+
+
+
+multinomial = function(zero=NULL, parallel=FALSE, nointercept=NULL)
+{
+
+ new("vglmff",
+ blurb=c("Multinomial logit model\n\n",
+ "Links: log(mu[,j]/mu[,M+1])\n",
+ "Variance: mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]"),
+ constraints=eval(substitute(expression({
+
+
+
+
+
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
+ intercept.apply=FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints = cm.nointercept.vgam(constraints, x, .nointercept, M)
+ }), list( .parallel=parallel, .zero=zero, .nointercept=nointercept ))),
+ deviance=Deviance.categorical.data.vgam,
+ initialize=expression({
+ delete.zero.colns = TRUE
+ eval(process.categorical.data.vgam)
+ M = ncol(y)-1
+ predictors.names = paste("log(mu[,",1:M,"]/mu[,",M+1,"])",sep="")
+ y.names = paste("mu", 1:(M+1), sep="")
+ }),
+ inverse=function(eta, extra=NULL) {
+ if(any(is.na(eta)))
+ warning("there are NAs in eta in slot inverse")
+ phat = cbind(exp(eta), 1)
+ ans = phat / as.vector(phat %*% rep(1, ncol(phat)))
+ if(any(is.na(ans)))
+ warning("there are NAs here in slot inverse")
+ ans
+ },
+ last=expression({
+ dy = dimnames(y)
+ if(!is.null(dy[[2]]))
+ dimnames(fit$fitted.values) = dy
+ }),
+ link=function(mu, extra=NULL)
+ log(mu[,-ncol(mu)]/mu[,ncol(mu)]),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("multinomial", "vcategorical"),
+ deriv=expression({
+ w * (y[,-ncol(y)] - mu[,-ncol(y)])
+ }),
+ weight= expression({
+ tiny = (mu < .Machine$double.eps^.5) |
+ (mu > 1 - .Machine$double.eps^.5)
+
+ if(M==1) wz = mu[,1] * (1-mu[,1]) else {
+ index = iam(NA, NA, M, both=TRUE, diag=TRUE)
+ wz = -mu[,index$row] * mu[,index$col]
+ wz[,1:M] = wz[,1:M] + mu[,1:M]
+ }
+
+ atiny = (tiny %*% rep(1, ncol(mu))) > 0 # apply(tiny, 1, any)
+ if(any(atiny)) {
+ if(M==1) wz[atiny] = wz[atiny] * (1 + .Machine$double.eps^0.5) +
+ .Machine$double.eps else
+ wz[atiny,1:M] = wz[atiny,1:M] * (1 + .Machine$double.eps^0.5) +
+ .Machine$double.eps
+ }
+ w * wz
+ }))
+}
+
+
+
+cumulative = function(link="logit", parallel=FALSE, reverse=FALSE,
+ earg = list(), mv=FALSE,
+ intercept.apply = FALSE)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(!is.logical(mv) || length(mv)!=1) stop("\"mv\" must be a single logical")
+ if(!is.list(earg)) earg = list()
+
+ new("vglmff",
+ blurb=if( mv ) c(paste("Multivariate cumulative", link, "model\n\n"),
+ "Links: ",
+ namesof(if(reverse) "P[Y1>=j+1]" else "P[Y1<=j]", link, earg=earg),
+ ", ...") else
+ c(paste("Cumulative", link, "model\n\n"),
+ "Links: ",
+ namesof(if(reverse) "P[Y>=j+1]" else "P[Y<=j]", link, earg=earg)),
+ constraints=eval(substitute(expression({
+ if( .mv ) {
+ Llevels = extra$Llevels
+ NOS = extra$NOS
+ Hk.matrix = kronecker(diag(NOS), matrix(1,Llevels-1,1))
+ constraints = cm.vgam(Hk.matrix, x, .parallel, constraints,
+ intercept.apply = .intercept.apply)
+ } else {
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
+ intercept.apply = .intercept.apply)
+ }
+ }), list( .parallel=parallel, .mv=mv, .intercept.apply=intercept.apply ))),
+ deviance=eval(substitute(
+ function(mu, y, w, residuals=FALSE, eta, extra=NULL) {
+ answer =
+ if( .mv ) {
+ totdev = 0
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ for(iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels)
+ totdev = totdev + Deviance.categorical.data.vgam(
+ mu=mu[,aindex,drop=FALSE],
+ y=y[,aindex,drop=FALSE], w=w, residuals=residuals,
+ eta=eta[,cindex,drop=FALSE], extra=extra)
+ }
+ totdev
+ } else {
+ Deviance.categorical.data.vgam(mu=mu, y=y, w=w, residuals=residuals,
+ eta=eta, extra=extra)
+ }
+ answer
+ }, list( .link=link, .mv = mv ) )),
+ initialize=eval(substitute(expression({
+ extra$mv = .mv
+ if( .mv ) {
+ checkCut(y) # Check the input; stops if there is an error.
+ if(any(w != 1) || ncol(cbind(w)) != 1)
+ stop("the 'weights' argument must be a vector of all ones")
+ Llevels = max(y)
+ delete.zero.colns = FALSE
+ orig.y = cbind(y) # Convert y into a matrix if necessary
+ NOS = ncol(cbind(orig.y))
+ use.y = use.mustart = NULL
+ for(iii in 1:NOS) {
+ y = as.factor(orig.y[,iii])
+ eval(process.categorical.data.vgam)
+ use.y = cbind(use.y, y)
+ use.mustart = cbind(use.mustart, mustart)
+ }
+ mustart = use.mustart
+ y = use.y # n x (Llevels*NOS)
+ M = NOS * (Llevels-1)
+ mynames = y.names = NULL
+ for(iii in 1:NOS) {
+ Y.names = paste("Y", iii, sep="")
+ mu.names = paste("mu", iii, ".", sep="")
+ mynames = c(mynames, if( .reverse )
+ paste("P[",Y.names,">=",2:Llevels,"]", sep="") else
+ paste("P[",Y.names,"<=",1:(Llevels-1),"]", sep=""))
+ y.names = c(y.names, paste(mu.names, 1:Llevels, sep=""))
+ }
+ predictors.names = namesof(mynames, .link, short=TRUE, earg= .earg)
+ extra$NOS = NOS
+ extra$Llevels = Llevels
+ } else {
+ delete.zero.colns = TRUE
+ eval(process.categorical.data.vgam)
+ M = ncol(y)-1
+ mynames = if( .reverse ) paste("P[Y>=",2:(1+M),"]", sep="") else
+ paste("P[Y<=",1:M,"]", sep="")
+ predictors.names = namesof(mynames, .link, short=TRUE, earg= .earg)
+ y.names = paste("mu", 1:(M+1), sep="")
+ }
+ }), list( .link=link, .reverse=reverse, .mv = mv, .earg = earg ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ answer =
+ if( .mv ) {
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ fv.matrix = matrix(0, nrow(eta), NOS*Llevels)
+ for(iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels)
+ if( .reverse ) {
+ ccump = cbind(1,eta2theta(eta[,cindex,drop=FALSE], .link,
+ earg= .earg))
+ fv.matrix[,aindex] =
+ cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
+ } else {
+ cump = cbind(eta2theta(eta[,cindex,drop=FALSE], .link,
+ earg= .earg), 1)
+ fv.matrix[,aindex] =
+ cbind(cump[,1], tapplymat1(cump, "diff"))
+ }
+ }
+ fv.matrix
+ } else {
+ if( .reverse ) {
+ ccump = cbind(1, eta2theta(eta, .link, earg= .earg))
+ cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
+ } else {
+ cump = cbind(eta2theta(eta, .link, earg= .earg), 1)
+ cbind(cump[,1], tapplymat1(cump, "diff"))
+ }
+ }
+ answer
+ }, list( .link=link, .reverse=reverse, .earg= earg, .mv = mv ))),
+ last=eval(substitute(expression({
+ misc$link = rep( .link, length=M)
+ names(misc$link) = mynames
+ misc$parameters = mynames
+ misc$reverse = .reverse
+ misc$parallel = .parallel
+ misc$mv = .mv
+ misc$earg = vector("list", M)
+ for(iii in 1:M) misc$earg[[iii]] = .earg
+ }), list( .link=link, .reverse=reverse, .parallel=parallel,
+ .mv = mv, .earg= earg ))),
+ link=eval(substitute( function(mu, extra=NULL) {
+ answer =
+ if( .mv ) {
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ eta.matrix = matrix(0, nrow(mu), NOS*(Llevels-1))
+ for(iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels)
+ cump = tapplymat1(as.matrix(mu[,aindex]), "cumsum")
+ eta.matrix[,cindex] =
+ theta2eta(if( .reverse) 1-cump[,1:(Llevels-1)] else
+ cump[,1:(Llevels-1)], .link, earg= .earg)
+ }
+ eta.matrix
+ } else {
+ cump = tapplymat1(as.matrix(mu), "cumsum")
+ M = ncol(as.matrix(mu)) - 1
+ theta2eta(if( .reverse ) 1-cump[,1:M] else cump[,1:M], .link,
+ earg= .earg)
+ }
+ answer
+ }, list( .link=link, .reverse=reverse, .earg=earg, .mv=mv ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("cumulative", "vcategorical"),
+ deriv=eval(substitute(expression({
+ deriv.answer =
+ if( .mv ) {
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ dcump.deta = answer.matrix = matrix(0, n, NOS * (Llevels-1))
+ for(iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
+ cump = eta2theta(eta[,cindex,drop=FALSE], .link, earg= .earg)
+ dcump.deta[,cindex] = dtheta.deta(cump, .link, earg= .earg)
+ answer.matrix[,cindex] =
+ (y[,aindex,drop=FALSE]/mu[,aindex,drop=FALSE] -
+ y[,1+aindex,drop=FALSE]/mu[,1+aindex,drop=FALSE])
+ }
+ (if( .reverse) -w else w) * dcump.deta * answer.matrix
+ } else {
+ cump = eta2theta(eta, .link, earg= .earg)
+ dcump.deta = dtheta.deta(cump, .link, earg= .earg)
+ (if( .reverse) -w else w) * dcump.deta *
+ (y[,1:M]/mu[,1:M] - y[,-1]/mu[,-1])
+ }
+ deriv.answer
+ }), list( .link=link, .reverse=reverse, .earg= earg, .mv=mv ))),
+ weight= eval(substitute(expression({
+ if( .mv ) {
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ wz = matrix(0, n, NOS*(Llevels-1)) # Diagonal elts only for a start
+ for(iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
+ wz[,cindex] = w * dcump.deta[,cindex,drop=FALSE]^2 *
+ (1/mu[,aindex,drop=FALSE] + 1/mu[,1+aindex,drop=FALSE])
+ }
+ if(Llevels-1 > 1) {
+ iii = 1
+ oindex = (iii-1)*(Llevels-1) + 1:(Llevels-2)
+ wz = cbind(wz, -w * dcump.deta[,oindex] * dcump.deta[,1+oindex])
+
+
+ if(NOS > 1) {
+ cptrwz = ncol(wz) # Like a pointer
+ wz = cbind(wz, matrix(0, nrow(wz), (NOS-1) * (Llevels-1)))
+ for(iii in 2:NOS) {
+ oindex = (iii-1)*(Llevels-1) + 1:(Llevels-2)
+ wz[,cptrwz + 1 + (1:(Llevels-2))] =
+ -w * dcump.deta[,oindex] *
+ dcump.deta[,1+oindex]
+ cptrwz = cptrwz + Llevels - 1 # Move it along a bit
+ }
+ }
+
+
+
+ }
+ } else {
+ wz = w * dcump.deta[,1:M]^2 * (1/mu[,1:M] + 1/mu[,-1])
+ if(M > 1)
+ wz = cbind(wz, -w * dcump.deta[,1:(M-1)] *
+ dcump.deta[,2:M] / mu[,2:M])
+ }
+ wz
+ }), list( .link=link, .mv=mv ))))
+}
+
+
+
+acat = function(link="loge", parallel=FALSE, reverse=FALSE, zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Adjacent-categories model\n\n",
+ "Links: ",
+ namesof(if(reverse) "P[Y=j]/P[Y=j+1]" else "P[Y=j+1]/P[Y=j]", link),
+ "\n",
+ "Variance: mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+
+ deviance=Deviance.categorical.data.vgam,
+ initialize=eval(substitute(expression({
+ delete.zero.colns = TRUE
+ eval(process.categorical.data.vgam)
+ M = ncol(y) - 1
+ mynames = if( .reverse )
+ paste("P[Y=",1:M,"]/P[Y=",2:(M+1),"]", sep="") else
+ paste("P[Y=",2:(M+1),"]/P[Y=",1:M,"]", sep="")
+ predictors.names = namesof(mynames, .link, short=TRUE)
+ y.names = paste("mu", 1:(M+1), sep="")
+ }), list( .link=link, .reverse=reverse ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ if(!is.matrix(eta))
+ eta = as.matrix(eta)
+ M = ncol(eta)
+ if( .reverse ) {
+ zetar = eta2theta(eta, .link)
+ temp = tapplymat1(zetar[,M:1], "cumprod")[,M:1,drop=FALSE]
+ cbind(temp,1) / drop(1 + temp %*% rep(1,ncol(temp)))
+ } else {
+ zeta = eta2theta(eta, .link)
+ temp = tapplymat1(zeta, "cumprod")
+ cbind(1,temp) / drop(1 + temp %*% rep(1,ncol(temp)))
+ }
+ }, list( .link=link, .reverse=reverse) )),
+ last=eval(substitute(expression({
+ misc$link = rep( .link, length=M)
+ names(misc$link) = mynames
+ misc$parameters = mynames
+ misc$reverse = .reverse
+ }), list( .link=link, .reverse=reverse ))),
+ link=eval(substitute( function(mu, extra=NULL) {
+ M = ncol(mu) - 1
+ theta2eta(if( .reverse ) mu[,1:M]/mu[,-1] else mu[,-1]/mu[,1:M], .link)
+ }, list( .link=link, .reverse=reverse) )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("acat", "vcategorical"),
+ deriv=eval(substitute(expression({
+ zeta = eta2theta(eta, .link) # May be zetar
+ d1 = acat.deriv(zeta, M=M, n=n, reverse=.reverse)
+ score = attr(d1, "gradient") / d1
+ dzeta.deta = dtheta.deta(zeta, .link)
+ if( .reverse ) {
+ cumy = tapplymat1(y, "cumsum")
+ w * dzeta.deta * (cumy[,1:M]/zeta - score)
+ } else {
+ ccumy = tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ w * dzeta.deta * (ccumy[,-1]/zeta - score)
+ }
+ }), list( .link=link, .reverse=reverse) )),
+ weight= eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M))
+
+ hess = attr(d1, "hessian") / d1
+
+ if(M>1)
+ for(j in 1:(M-1))
+ for(k in (j+1):M)
+ wz[,iam(j,k,M)] = (hess[,j,k] - score[,j] * score[,k]) *
+ dzeta.deta[,j] * dzeta.deta[,k]
+ if( .reverse ) {
+ cump = tapplymat1(mu, "cumsum")
+ wz[,1:M] = (cump[,1:M]/zeta^2 - score^2) * dzeta.deta^2
+ } else {
+ ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[,ncol(mu):1]
+ wz[,1:M] = (ccump[,-1]/zeta^2 - score^2) * dzeta.deta^2
+ }
+ w * wz
+ }), list( .link=link, .reverse=reverse ))))
+}
+
+acat.deriv = function(zeta, reverse, M, n)
+{
+
+ alltxt = NULL
+ for(i in 1:M) {
+ index = if(reverse) i:M else 1:i
+ vars = paste("zeta", index, sep="")
+ txt = paste(vars, collapse="*")
+ alltxt = c(alltxt, txt)
+ }
+ alltxt = paste(alltxt, collapse=" + ")
+ alltxt = paste(" ~ 1 +", alltxt)
+ txt = as.formula(alltxt)
+
+ allvars = paste("zeta", 1:M, sep="")
+ d1 = deriv3(txt, allvars, hessian=TRUE) # deriv3() computes the Hessian
+
+ zeta = as.matrix(zeta)
+ for(i in 1:M)
+ assign(paste("zeta", i, sep=""), zeta[,i])
+
+ ans = eval(d1)
+ ans
+}
+
+
+
+
+brat = function(refgp="last",
+ refvalue = 1,
+ init.alpha = 1)
+{
+ if(!is.Numeric(init.alpha, posit=TRUE))
+ stop("\"init.alpha\" must contain positive values only")
+ if(!is.Numeric(refvalue, allow=1, posit=TRUE))
+ stop("\"refvalue\" must be a single positive value")
+ if(!is.character(refgp) &&
+ !is.Numeric(refgp, allow=1, integer=TRUE, posit=TRUE))
+ stop("\"refgp\" must be a single positive integer")
+ new("vglmff",
+ blurb=c(paste("Bradley-Terry model (without ties)\n\n"),
+ "Links: ",
+ namesof("alpha's", "loge")),
+ initialize=eval(substitute(expression({
+ are.ties = attr(y, "are.ties") # If Brat() was used
+ if(is.logical(are.ties) && are.ties)
+ stop("use bratt(), not brat(), when there are ties")
+
+ try.index = 1:400
+ M = (1:length(try.index))[(try.index+1)*(try.index) == ncol(y)]
+ if(!is.finite(M)) stop("can't determine M")
+ init.alpha = matrix( rep( .init.alpha, len=M), n, M, byrow=TRUE)
+ etastart = matrix(theta2eta(init.alpha, "loge"), n, M, byrow=TRUE)
+ refgp = .refgp
+ if(!intercept.only)
+ warning("this function only works with intercept only models")
+ extra$ybrat.indices = .brat.indices(NCo=M+1, are.ties=FALSE)
+ uindex = if( .refgp =="last") 1:M else (1:(M+1))[-( .refgp ) ]
+
+ predictors.names=namesof(paste("alpha",uindex,sep=""),"loge",short=TRUE)
+ }), list( .refgp=refgp, .init.alpha=init.alpha ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ probs = NULL
+ eta = as.matrix(eta) # in case M=1
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge"), .refvalue, .refgp)
+ alpha1 = alpha[extra$ybrat.indices[,"rindex"]]
+ alpha2 = alpha[extra$ybrat.indices[,"cindex"]]
+ probs = rbind(probs, alpha1/(alpha1+alpha2))
+ }
+ dimnames(probs) = dimnames(eta)
+ probs
+ }, list( .refgp=refgp, .refvalue=refvalue) )),
+ last=eval(substitute(expression({
+ misc$link = rep( "loge", length=M)
+ names(misc$link) = paste("alpha",uindex,sep="")
+ misc$refgp = .refgp
+ misc$refvalue = .refvalue
+ }), list( .refgp=refgp, .refvalue=refvalue ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * y * log(mu)),
+ vfamily=c("brat"),
+ deriv=eval(substitute(expression({
+ ans = NULL
+ uindex = if( .refgp =="last") 1:M else (1:(M+1))[-( .refgp ) ]
+ eta = as.matrix(eta) # in case M=1
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge"), .refvalue, .refgp)
+ ymat = InverseBrat(y[ii,], NCo=M+1, diag=0)
+ answer = rep(0, len=M)
+ for(aa in 1:(M+1)) {
+ answer = answer + (1-(aa==uindex)) *
+ (ymat[uindex,aa] * alpha[aa] - ymat[aa,uindex] *
+ alpha[uindex]) / (alpha[aa] + alpha[uindex])
+ }
+ ans = rbind(ans, w[ii] * answer)
+ }
+ dimnames(ans) = dimnames(eta)
+ ans
+ }), list( .refvalue=refvalue, .refgp=refgp) )),
+ weight= eval(substitute(expression({
+ wz = matrix(0, n, dimm(M))
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge"), .refvalue, .refgp)
+ ymat = InverseBrat(y[ii,], NCo=M+1, diag=0)
+ for(aa in 1:(M+1)) {
+ wz[ii,1:M] = wz[ii,1:M] + (1-(aa==uindex)) *
+ (ymat[aa,uindex] + ymat[uindex,aa]) * alpha[aa] *
+ alpha[uindex] / (alpha[aa] + alpha[uindex])^2
+ }
+ if(M > 1) {
+ ind5 = iam(1,1,M, both=TRUE, diag=FALSE)
+ wz[ii,(M+1):ncol(wz)] =
+ -(ymat[cbind(uindex[ind5$row],uindex[ind5$col])] +
+ ymat[cbind(uindex[ind5$col],uindex[ind5$row])]) *
+ alpha[uindex[ind5$col]] * alpha[uindex[ind5$row]] /
+ (alpha[uindex[ind5$row]] + alpha[uindex[ind5$col]])^2
+ }
+ }
+ wz = wz * w
+ wz
+ }), list( .refvalue=refvalue, .refgp=refgp ))))
+}
+
+
+
+
+bratt = function(refgp="last",
+ refvalue = 1,
+ init.alpha = 1,
+ i0 = 0.01)
+{
+ if(!is.Numeric(i0, allow=1, positi=TRUE))
+ stop("\"i0\" must be a single positive value")
+ if(!is.Numeric(init.alpha, positi=TRUE))
+ stop("\"init.alpha\" must contain positive values only")
+ if(!is.Numeric(refvalue, allow=1, positi=TRUE))
+ stop("\"refvalue\" must be a single positive value")
+ if(!is.character(refgp) &&
+ !is.Numeric(refgp, allow=1, integer=TRUE, positi=TRUE))
+ stop("\"refgp\" must be a single positive integer")
+ new("vglmff",
+ blurb=c(paste("Bradley-Terry model (with ties)\n\n"),
+ "Links: ",
+ namesof("alpha's", "loge"), ", log(alpha0)"),
+ initialize=eval(substitute(expression({
+ try.index = 1:400
+ M = (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
+ if(!is.Numeric(M, allow=1, integ=TRUE)) stop("can't determine M")
+ NCo = M # number of contestants
+
+ are.ties = attr(y, "are.ties") # If Brat() was used
+ if(is.logical(are.ties)) {
+ if(!are.ties)
+ stop("use brat(), not bratt(), when there are no ties")
+ ties = attr(y, "ties")
+ } else {
+ are.ties = FALSE
+ ties = 0 * y
+ }
+
+ init.alpha = rep( .init.alpha, len=NCo-1)
+ ialpha0 = .i0
+ etastart = cbind(matrix(theta2eta(init.alpha, "loge"),
+ n, NCo-1, byrow=TRUE),
+ theta2eta( rep(ialpha0, len=n), "loge"))
+ refgp = .refgp
+ if(!intercept.only)
+ warning("this function only works with intercept only models")
+ extra$ties = ties # Flat (1-row) matrix
+ extra$ybrat.indices = .brat.indices(NCo=NCo, are.ties=FALSE)
+ extra$tbrat.indices = .brat.indices(NCo=NCo, are.ties=TRUE) # unused
+ extra$dnties = dimnames(ties)
+ uindex = if(refgp=="last") 1:(NCo-1) else (1:(NCo))[-refgp ]
+
+ predictors.names=c(
+ namesof(paste("alpha",uindex,sep=""),"loge",short=TRUE),
+ namesof("alpha0", "loge", short=TRUE))
+ }), list( .refgp=refgp,
+ .i0 = i0,
+ .init.alpha=init.alpha ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ probs = qprobs = NULL
+ M = ncol(eta)
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha0 = eta2theta(eta[ii,M], "loge")
+ alpha1 = alpha[extra$ybrat.indices[,"rindex"]]
+ alpha2 = alpha[extra$ybrat.indices[,"cindex"]]
+ probs = rbind(probs, alpha1/(alpha1+alpha2+alpha0)) #
+ qprobs = rbind(qprobs, alpha0/(alpha1+alpha2+alpha0)) #
+ }
+ if(length(extra$dnties))
+ dimnames(qprobs) = extra$dnties
+ attr(probs, "probtie") = qprobs
+ probs
+ }, list( .refgp=refgp, .refvalue=refvalue) )),
+ last=eval(substitute(expression({
+ misc$link = rep( "loge", length=M)
+ names(misc$link) = c(paste("alpha",uindex,sep=""), "alpha0")
+ misc$refgp = .refgp
+ misc$refvalue = .refvalue
+ misc$alpha = alpha # zz; last one saved (ok since n=1)
+ misc$alpha0 = alpha0 # zz; last one saved (ok since n=1)
+ }), list( .refgp=refgp, .refvalue=refvalue ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (y * log(mu) + 0.5 * extra$ties * log(attr(mu, "probtie")))),
+ vfamily=c("bratt"),
+ deriv=eval(substitute(expression({
+ ans = NULL
+ ties = extra$ties
+ NCo = M
+ uindex = if( .refgp =="last") 1:(M-1) else (1:(M))[-( .refgp )]
+ eta = as.matrix(eta)
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
+ ymat = InverseBrat(y[ii,], NCo=M, diag=0)
+ tmat = InverseBrat(ties[ii,], NCo=M, diag=0)
+ answer = rep(0, len=NCo-1) # deriv wrt eta[-M]
+ for(aa in 1:NCo) {
+ Daj = alpha[aa] + alpha[uindex] + alpha0
+ pja = alpha[uindex] / Daj
+ answer = answer + alpha[uindex] *
+ (-ymat[aa,uindex] + ymat[uindex,aa]*(1-pja)/pja -
+ tmat[uindex,aa]) / Daj
+ }
+ deriv0 = 0 # deriv wrt eta[M]
+ for(aa in 1:(NCo-1))
+ for(bb in (aa+1):NCo) {
+ Dab = alpha[aa] + alpha[bb] + alpha0
+ qab = alpha0 / Dab
+ deriv0 = deriv0 + alpha0 *
+ (-ymat[aa,bb] - ymat[bb,aa] +
+ tmat[aa,bb]*(1-qab)/qab) / Dab
+ }
+ ans = rbind(ans, w[ii] * c(answer, deriv0))
+ }
+ dimnames(ans) = dimnames(eta)
+ ans
+ }), list( .refvalue=refvalue, .refgp=refgp) )),
+ weight= eval(substitute(expression({
+ wz = matrix(0, n, dimm(M)) # includes diagonal
+ for(ii in 1:nrow(eta)) {
+ alpha = .brat.alpha(eta2theta(eta[ii,-M], "loge"), .refvalue, .refgp)
+ alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
+ ymat = InverseBrat(y[ii,], NCo=M, diag=0)
+ tmat = InverseBrat(ties[ii,], NCo=M, diag=0)
+ for(aa in 1:(NCo)) {
+ Daj = alpha[aa] + alpha[uindex] + alpha0
+ pja = alpha[uindex] / Daj
+ nja = ymat[aa,uindex] + ymat[uindex,aa] + tmat[uindex,aa]
+ wz[ii,1:(NCo-1)] = wz[ii,1:(NCo-1)] +
+ alpha[uindex]^2 * nja * (1-pja)/(pja * Daj^2)
+ if(aa < NCo)
+ for(bb in (aa+1):(NCo)) {
+ nab = ymat[aa,bb] + ymat[bb,aa] + tmat[bb,aa]
+ Dab = alpha[aa] + alpha[bb] + alpha0
+ qab = alpha0 / Dab
+ wz[ii,NCo] = wz[ii,NCo] + alpha0^2 * nab *
+ (1-qab) / (qab * Dab^2)
+ }
+ }
+ if(NCo > 2) {
+ ind5 = iam(1,1, M=NCo, both=TRUE, diag=FALSE)
+ alphajunk = c(alpha, junk=NA)
+ mat4 = cbind(uindex[ind5$row],uindex[ind5$col])
+ wz[ii,(M+1):ncol(wz)] =
+ -(ymat[mat4] + ymat[mat4[,2:1]] + tmat[mat4]) *
+ alphajunk[uindex[ind5$col]] * alphajunk[uindex[ind5$row]] /
+ (alpha0 + alphajunk[uindex[ind5$row]] +
+ alphajunk[uindex[ind5$col]])^2
+ }
+ for(j in 1:length(uindex)) {
+ jay = uindex[j]
+ naj = ymat[,jay] + ymat[jay,] + tmat[,jay]
+ Daj = alpha[jay] + alpha + alpha0
+ wz[ii,iam(j, NCo, M=NCo, diag=TRUE)] =
+ -alpha[jay] * alpha0 * sum(naj / Daj^2)
+ }
+ }
+ wz = wz * w
+ wz
+ }), list( .refvalue=refvalue, .refgp=refgp ))))
+}
+
+
+.brat.alpha = function(vec, value, posn) {
+ if(is.character(posn))
+ if(posn!="last")
+ stop("can only handle \"last\"") else return(c(vec, value))
+ c(if(posn==1) NULL else vec[1:(posn-1)], value,
+ if(posn==length(vec)+1) NULL else vec[posn:length(vec)])
+}
+
+.brat.indices = function(NCo, are.ties=FALSE) {
+ if(!is.Numeric(NCo, allow=1, integ=TRUE) || NCo < 2)
+ stop("bad input for NCo")
+ m = diag(NCo)
+ if(are.ties) {
+ cbind(rindex=row(m)[col(m) < row(m)], cindex=col(m)[col(m) < row(m)])
+ } else
+ cbind(rindex=row(m)[col(m) != row(m)], cindex=col(m)[col(m) != row(m)])
+}
+
+
+Brat = function(mat, ties=0*mat, string=c(" > "," == ")) {
+ allargs = list(mat) # ,...
+ callit = if(length(names(allargs))) names(allargs) else
+ as.character(1:length(allargs))
+ ans = ans.ties = NULL
+ for(i in 1:length(allargs)) {
+ m = allargs[[i]]
+ if(!is.matrix(m) || dim(m)[1] != dim(m)[2])
+ stop("m must be a square matrix")
+
+ diag(ties) = 0
+ if(!all(ties == t(ties)))
+ stop("ties must be a symmetric matrix")
+ are.ties = any(ties > 0)
+ diag(ties) = NA
+
+ diag(m) = 0 # Could have been NAs
+ if(any(is.na(m)))
+ stop("missing values not allowed (except on the diagonal)")
+ diag(m) = NA
+
+ dm = as.data.frame.table(m)
+ dt = as.data.frame.table(ties)
+ dm = dm[!is.na(dm$Freq),]
+ dt = dt[!is.na(dt$Freq),]
+ usethis1 = paste(dm[,1], string[1], dm[,2], sep="")
+ usethis2 = paste(dm[,1], string[2], dm[,2], sep="")
+ ans = rbind(ans, matrix(dm$Freq, nrow=1))
+ ans.ties = rbind(ans.ties, matrix(dt$Freq, nrow=1))
+ }
+ dimnames(ans) = list(callit, usethis1)
+ dimnames(ans.ties) = list(callit, usethis2)
+ attr(ans, "ties") = ans.ties
+ attr(ans, "are.ties") = are.ties
+ ans
+}
+
+
+InverseBrat = function(yvec, NCo=
+ (1:900)[(1:900)*((1:900)-1)==ncol(rbind(yvec))],
+ multiplicity=if(is.matrix(yvec)) nrow(yvec) else 1,
+ diag=NA, string=c(" > "," == ")) {
+ ans = array(diag, c(NCo, NCo, multiplicity))
+ yvec.orig = yvec
+ yvec = c(yvec)
+ ptr = 1
+ for(mul in 1:multiplicity)
+ for(i1 in 1:(NCo))
+ for(i2 in 1:(NCo))
+ if(i1 != i2) {
+ ans[i2,i1,mul] = yvec[ptr]
+ ptr = ptr + 1
+ }
+ ans = if(multiplicity>1) ans else matrix(ans, NCo, NCo)
+
+ if(is.array(yvec.orig) || is.matrix(yvec.orig)) {
+ names.yvec = dimnames(yvec.orig)[[2]]
+ i = strsplit(names.yvec, string[1])
+ cal = NULL
+ for(k in c(NCo, 1:(NCo-1))) cal = c(cal, (i[[k]])[1])
+ if(multiplicity>1) {
+ dimnames(ans) = list(cal, cal, dimnames(yvec.orig)[[1]])
+ } else
+ dimnames(ans) = list(cal, cal)
+ }
+ ans
+}
+
+
+
diff --git a/R/family.censored.q b/R/family.censored.q
new file mode 100644
index 0000000..87bc389
--- /dev/null
+++ b/R/family.censored.q
@@ -0,0 +1,447 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+cexpon = function(link="loge", location=0)
+{
+ if(!is.Numeric(location, allow=1))
+ stop("bad input for \"location\"")
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Censored exponential distribution\n\n",
+ "Link: ", namesof("rate", link, tag= TRUE), "\n",
+ "Mean: ", "mu =", location, "+ 1 / ",
+ namesof("rate", link, tag= TRUE), "\n",
+ "Variance: ",
+ if(location==0) "Exponential: mu^2" else
+ paste("(mu-", location, ")^2", sep="")),
+ initialize=eval(substitute(expression({
+ extra$location = .location # This is passed into, e.g., link, deriv etc.
+ if(any(y <= extra$location))
+ stop(paste("all responses must be greater than", extra$location))
+ predictors.names = namesof("rate", .link, tag= FALSE)
+ mu = y + (abs(y - extra$location) < 0.001) / 8
+ if(!length(etastart))
+ etastart = theta2eta(1/(mu-extra$location), .link)
+ if(!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len=n)
+ if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if(any(extra$rightcensored & extra$leftcensored))
+ stop("some observations are both right and left censored!")
+ }), list( .location=location, .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL)
+ extra$location + 1 / eta2theta(eta, .link),
+ list( .link=link ) )),
+ last=eval(substitute(expression({
+ misc$location = extra$location
+ misc$link = c("rate" = .link)
+ }), list( .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL)
+ theta2eta(1/(mu-extra$location), .link),
+ list( .link=link ) )),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ rate = 1 / (mu - extra$location)
+ cen0 = !extra$leftcensored & !extra$rightcensored # uncensored obsns
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cenL] * log(1 - exp(-rate[cenL]*(y[cenL]-extra$location)))) +
+ sum(w[cenU] * (-rate[cenU]*(y[cenU]-extra$location))) +
+ sum(w[cen0] * (log(rate[cen0]) - rate[cen0]*(y[cen0]-extra$location)))
+ }, list( .link=link ))),
+ vfamily=c("cexpon"),
+ deriv=eval(substitute(expression({
+ rate = 1 / (mu - extra$location)
+ cen0 = !extra$leftcensored & !extra$rightcensored # uncensored obsns
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ dl.drate = 1/rate - (y-extra$location) # uncensored
+ tmp200 = exp(-rate*(y-extra$location))
+ if(any(cenL))
+ dl.drate[cenL] = (y[cenL]-extra$location) * tmp200[cenL] /
+ (1 - tmp200[cenL])
+ if(any(cenU))
+ dl.drate[cenU] = -(y[cenU]-extra$location)
+ drate.deta = dtheta.deta(rate, .link)
+ w * dl.drate * drate.deta
+ }), list( .link=link ) )),
+ weight=eval(substitute(expression({
+ A123 = ((mu-extra$location)^2) # uncensored d2l.drate2
+ Lowpt = ifelse(cenL, y, extra$location)
+ Upppt = ifelse(cenU, y, Inf)
+ tmp300 = exp(-rate*(Lowpt - extra$location))
+ d2l.drate2 = 0 * y
+ ind50 = Lowpt > extra$location
+ d2l.drate2[ind50] = (Lowpt[ind50]-extra$location)^2 * tmp300[ind50] /
+ (1-tmp300[ind50])
+ d2l.drate2 = d2l.drate2 + (exp(-rate*(Lowpt-extra$location)) -
+ exp(-rate*(Upppt-extra$location))) * A123
+ wz = w * (drate.deta^2) * d2l.drate2
+ wz
+ }), list( .link=link ))))
+}
+
+
+
+cnormal1 = function(lmu="identity", lsd="loge", imethod=1, zero=2)
+{
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
+ stop("imethod must be 1 or 2")
+
+ new("vglmff",
+ blurb=c("Censored univariate normal\n\n",
+ "Links: ", namesof("mu", lmu, tag= TRUE), "; ",
+ namesof("sd", lsd, tag= TRUE), "\n",
+ "Conditional variance: sd^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y)>1) stop("the response must be a vector or a 1-column matrix")
+
+ if(!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len=n)
+ if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if(any(extra$rightcensored & extra$leftcensored))
+ stop("some observations are both right and left censored!")
+
+ predictors.names = c(namesof("mu", .lmu, tag= FALSE),
+ namesof("sd", .lsd, tag= FALSE))
+ if(!length(etastart)) {
+ anyc = extra$leftcensored | extra$rightcensored
+ i11 = if( .imethod == 1) anyc else FALSE # can be all data
+ junk=if(is.R()) lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11]) else
+ lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11],method="qr")
+ sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
+ etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd), length=n))
+ if(any(anyc)) etastart[anyc,1] = x[anyc,,drop=FALSE] %*% junk$coeff
+ }
+ }), list( .lmu=lmu, .lsd=lsd, .imethod=imethod ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$link = c("mu"= .lmu, "sd"= .lsd)
+ misc$expected = TRUE
+ }), list( .lmu=lmu, .lsd=lsd ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+ mum = eta2theta(eta[,1], .lmu)
+ sd = eta2theta(eta[,2], .lsd)
+ Lower = ifelse(cenL, y, -Inf)
+ Upper = ifelse(cenU, y, Inf)
+ ell1 = -log(sd[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sd[cen0])^2
+ ell2 = log(1 - pnorm((mum[cenL] - Lower[cenL])/sd[cenL]))
+ ell3 = log(1 - pnorm(( Upper[cenU] - mum[cenU])/sd[cenU]))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ }, list( .lmu=lmu, .lsd=lsd ))),
+ vfamily=c("tobit"),
+ deriv=eval(substitute(expression({
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+ Lower = ifelse(cenL, y, -Inf)
+ Upper = ifelse(cenU, y, Inf)
+ mum = eta2theta(eta[,1], .lmu)
+ sd = eta2theta(eta[,2], .lsd)
+ dl.dmu = (y-mum) / sd^2
+ dl.dsd = (((y-mum)/sd)^2 - 1) / sd
+ dmu.deta = dtheta.deta(mum, .lmu)
+ dsd.deta = dtheta.deta(sd, .lsd)
+ if(any(cenL)) {
+ mumL = mum - Lower
+ temp21L = mumL[cenL] / sd[cenL]
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ fred21 = phiL / (1 - PhiL)
+ dl.dmu[cenL] = -fred21 / sd[cenL]
+ dl.dsd[cenL] = mumL[cenL] * fred21 / sd[cenL]^2
+ rm(fred21)
+ }
+ if(any(cenU)) {
+ mumU = Upper - mum
+ temp21U = mumU[cenU] / sd[cenU]
+ PhiU = pnorm(temp21U)
+ phiU = dnorm(temp21U)
+ fred21 = phiU / (1 - PhiU)
+ dl.dmu[cenU] = fred21 / sd[cenU] # Negated
+ dl.dsd[cenU] = mumU[cenU] * fred21 / sd[cenU]^2
+ rm(fred21)
+ }
+ w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
+ }), list( .lmu=lmu, .lsd=lsd ))),
+ weight=eval(substitute(expression({
+ A1 = 1 - pnorm((mum - Lower) / sd) # Lower
+ A3 = 1 - pnorm(( Upper - mum) / sd) # Upper
+ A2 = 1 - A1 - A3 # Middle; uncensored
+ wz = matrix(0, n, 3)
+ wz[,iam(1,1,M)] = A2 * 1 / sd^2 # ed2l.dmu2
+ wz[,iam(2,2,M)] = A2 * 2 / sd^2 # ed2l.dsd2
+ mumL = mum - Lower
+ temp21L = mumL / sd
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ temp31L = ((1-PhiL) * sd)^2
+ wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
+ wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
+ mumL * phiL / sd) / (sd * temp31L)
+ wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) - temp21L*phiL) / temp31L
+ wz.cenL11[!is.finite(wz.cenL11)] = 0
+ wz.cenL22[!is.finite(wz.cenL22)] = 0
+ wz.cenL12[!is.finite(wz.cenL12)] = 0
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A1 * wz.cenL11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A1 * wz.cenL22
+ wz[,iam(1,2,M)] = A1 * wz.cenL12
+ mumU = Upper - mum # often Inf
+ temp21U = mumU / sd # often Inf
+ PhiU = pnorm(temp21U) # often 1
+ phiU = dnorm(temp21U) # often 0
+ temp31U = ((1-PhiU) * sd)^2 # often 0
+ tmp8 = (1-PhiU)*temp21U
+ wzcenU11 = phiU * (phiU - tmp8) / temp31U
+ tmp9 = (1-PhiU) * (2 - temp21U^2)
+ wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sd) / (sd * temp31U)
+ wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) - temp21U*phiU) / temp31U
+ wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when Upper==Inf
+ wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when Upper==Inf
+ wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when Upper==Inf
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A3 * wzcenU11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A3 * wzcenU22
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + A3 * wzcenU12
+ wz[,iam(1,1,M)] = w * wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dsd.deta^2
+ wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dmu.deta * dsd.deta
+ wz
+ }), list( .lmu=lmu, .lsd=lsd ))))
+}
+
+
+
+crayleigh = function(link="loge", expected=FALSE) {
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument \"expected\"")
+
+ new("vglmff",
+ blurb=c("Censored Rayleigh distribution",
+ "f(y) = y*exp(-0.5*(y/a)^2)/a^2, y>0, a>0\n",
+ "Link: ",
+ namesof("a", link), "\n", "\n",
+ "Mean: a * sqrt(pi / 2)"),
+ initialize=eval(substitute(expression({
+ if(length(extra$leftcensored)) stop("cannot handle left-censored data")
+ if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ predictors.names = namesof("a", .link, tag= FALSE)
+ if(!length(etastart)) {
+ a.init = (y+1/8) / sqrt(pi/2)
+ etastart = theta2eta(a.init, .link)
+ }
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ a * sqrt(pi/2)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c("a"= .link)
+ misc$expected = .expected
+ }), list( .link=link, .expected=expected ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ cen0 = !extra$rightcensored # uncensored obsns
+ cenU = extra$rightcensored
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cen0]*(log(y[cen0]) - 2*log(a[cen0]) - 0.5*(y[cen0]/a[cen0])^2)) -
+ 0.5 * sum(w[cenU] * (y[cenU]/a[cenU])^2)
+ }, list( .link=link ))),
+ vfamily=c("crayleigh"),
+ deriv=eval(substitute(expression({
+ cen0 = !extra$rightcensored # uncensored obsns
+ cenU = extra$rightcensored
+ a = eta2theta(eta, .link)
+ dl.da = ((y/a)^2 - 2) / a
+ da.deta = dtheta.deta(a, .link)
+ dl.da[cenU] = y[cenU]^2 / a[cenU]^3
+ w * dl.da * da.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = 4 / a^2
+ wz = da.deta^2 * ed2l.da2
+ if( .expected) {
+ ed2l.da2[cenU] = 6 / (a[cenU])^2
+ wz[cenU] = (da.deta[cenU])^2 * ed2l.da2[cenU]
+ } else {
+ d2l.da2 = 3 * (y[cenU])^2 / (a[cenU])^4
+ d2a.deta2 = d2theta.deta2(a[cenU], .link)
+ wz[cenU] = (da.deta[cenU])^2 * d2l.da2 - dl.da[cenU] * d2a.deta2
+ }
+ w * wz
+ }), list( .link=link, .expected=expected ))))
+}
+
+
+weibull = function(lshape="logoff", lscale="loge",
+ eshape=if(lshape == "logoff") list(offset=-2) else list(),
+ escale=list(),
+ ishape=NULL, iscale=NULL,
+ imethod=1, zero=2)
+{
+
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
+ stop("argument \"imethod\" must be 1 or 2")
+ if(!is.list(eshape)) eshape = list()
+ if(!is.list(escale)) escale = list()
+
+ new("vglmff",
+ blurb=c("Censored Weibull distribution\n\n",
+ "Links: ",
+ namesof("shape", lshape, earg= eshape), ", ",
+ namesof("scale", lscale, earg= escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape)\n",
+ "Variance: scale^2 * (gamma(1 + 2/shape) - gamma(1 + 1/shape)^2)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y)>1) stop("the response must be a vector or a 1-column matrix")
+
+ if(length(extra$leftcensored)) stop("left-censoring not allowed") else
+ extra$leftcensored = rep(FALSE, len=n)
+ if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if(any(extra$rightcensored & extra$leftcensored))
+ stop("some observations are both right and left censored!")
+
+ predictors.names = c(namesof("shape", .lshape, earg= .eshape, tag=FALSE),
+ namesof("scale", .lscale, earg= .escale, tag=FALSE))
+ if(!length(.ishape) || !length(.iscale)) {
+ anyc = extra$leftcensored | extra$rightcensored
+ i11 = if( .imethod == 1) anyc else FALSE # can be all data
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.shape = if(length( .ishape)) .ishape else 1
+ xvec = log(-log(1-qvec))
+ fit0 = lsfit(x=xvec, y=log(quantile(y[!i11], qvec)))
+ }
+
+ if(!length(etastart)) {
+ shape = rep(if(length(.ishape)) .ishape else 1/fit0$coef["X"],len=n)
+ scale = rep(if(length(.iscale)) .iscale else
+ exp(fit0$coef["Intercept"]), len=n)
+ etastart =
+ cbind(theta2eta(shape, .lshape, earg= .eshape ),
+ theta2eta(scale, .lscale, earg= .escale ))
+ }
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape,
+ .iscale=iscale, .ishape=ishape, .imethod=imethod ) )),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta[,1], .lshape, earg= .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ scale * gamma(1+1/shape)
+ }, list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ) )),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape, scale= .lscale)
+ misc$earg= list(shape= .eshape, scale= .escale)
+ misc$expected = TRUE # all(cen0)
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ) )),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+ shape = eta2theta(eta[,1], .lshape, earg= .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ ell1 = (log(shape[cen0]) - log(scale[cen0]) + (shape[cen0]-1) *
+ log(y[cen0]/scale[cen0]) - (y[cen0] / scale[cen0])^shape[cen0])
+ ell3 = -((y[cenU] / scale[cenU])^shape[cenU])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cen0] * ell1) + sum(w[cenU] * ell3)
+ }, list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ) )),
+ vfamily=c("cweibull"),
+ deriv=eval(substitute(expression({
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+ shape = eta2theta(eta[,1], .lshape, earg= .eshape )
+ scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ dl.dshape = 1/shape + log(y/scale) - (y/scale)^shape * log(y/scale)
+ dl.dscale = (shape/scale) * (-1 + (y/scale)^shape)
+ dshape.deta = dtheta.deta(shape, .lshape, earg= .eshape )
+ dscale.deta = dtheta.deta(scale, .lscale, earg= .escale )
+ if(any(cenU)) {
+ fred21 = (y[cenU] / scale[cenU])
+ temp21 = fred21^shape[cenU]
+ dl.dshape[cenU] = -temp21 * log(fred21)
+ dl.dscale[cenU] = temp21 * shape[cenU] / scale[cenU]
+ }
+ w * cbind( dl.dshape * dshape.deta, dl.dscale * dscale.deta )
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ) )),
+ weight=eval(substitute(expression({
+ Euler = 0.57721566490153286
+ if(any(cen0)) {
+ if(any(shape[cen0] <= 2))
+ cat("warning: Fisher info matrices invalid\n")
+ }
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ ed2l.dshape = (6*(Euler-1)^2 + pi^2) / (6*shape^2)
+ ed2l.dscale = (shape/scale)^2
+ ed2l.dshapescale = (Euler-1)/scale
+ wz[,iam(1,1,M)] = ed2l.dshape * dshape.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dshapescale * dscale.deta * dshape.deta
+ if(any(cenU)) {
+ Integrand11 = function(x) exp(-x) * (1 + log(x))^2
+ Integrand12 = function(x) exp(-x) * (1 + log(x))
+ ptilde = 1 - exp(-(y/scale)^shape)
+ index2 = (1:n)[cenU]
+ ed2l.dshape2 = ed2l.dscale2 = ed2l.dshapescale =
+ rep(as.numeric(NA), len=sum(cenU))
+ icount = 1
+ for(iii in index2) {
+ integral11 = integrate(Integrand11, low=0, upp=-log(1-ptilde[iii]))
+ if(integral11$message != "OK")
+ warning("problem numerically integrating elt (1,1)")
+ integral12 = integrate(Integrand12, low=0, upp=-log(1-ptilde[iii]))
+ if(integral12$message != "OK")
+ warning("problem numerically integrating elt (1,2)")
+ ed2l.dshape2[icount] = integral11$value / shape[iii]^2
+ ed2l.dshapescale[icount] = integral12$value * scale[iii]
+ icount = icount + 1
+ }
+ ed2l.dscale2 = (shape[cenU]/scale[cenU])^2 * ptilde[cenU]
+ wz[cenU,iam(1,1,M)] = ed2l.dshape2 * dshape.deta[cenU]^2
+ wz[cenU,iam(2,2,M)] = ed2l.dscale2 * dscale.deta[cenU]^2
+ wz[cenU,iam(1,2,M)] = ed2l.dshapescale *
+ dshape.deta[cenU] * dscale.deta[cenU]
+ }
+ wz = w * wz
+ wz
+ }), list( .eshape=eshape ))))
+}
+
+
diff --git a/R/family.extremes.q b/R/family.extremes.q
new file mode 100644
index 0000000..39a0e4c
--- /dev/null
+++ b/R/family.extremes.q
@@ -0,0 +1,2256 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+gev <- function(llocation="identity",
+ lscale="loge",
+ lshape="logoff",
+ elocation = list(),
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
+ percentiles=c(95,99),
+ iscale=NULL, ishape=NULL,
+ method.init=1, gshape=c(-0.45, 0.45), tshape0=0.001,
+ zero=3)
+{
+
+
+ mean = FALSE
+ if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument \"iscale\"")
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!mean && length(percentiles) &&
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for argument \"percentiles\"")
+ if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ method.init > 2.5)
+ stop("argument \"method.init\" must be 1 or 2")
+ if(length(ishape) && !is.Numeric(ishape))
+ stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
+ stop("bad input for argument \"tshape0\"")
+ if(!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
+ stop("bad input for argument \"gshape\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+ if(!is.list(eshape)) eshape = list()
+
+ new("vglmff",
+ blurb=c("Generalized Extreme Value Distribution\n",
+ "Links: ",
+ namesof("location", link=llocation, earg= elocation), ", ",
+ namesof("scale", link=lscale, earg= escale), ", ",
+ namesof("shape", link=lshape, earg= eshape)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", .llocation, earg= .elocation, short=TRUE),
+ namesof("scale", .lscale, earg= .escale, short=TRUE),
+ namesof("shape", .lshape, earg= .eshape, short=TRUE))
+ y = as.matrix(y)
+ if(ncol(y) > 1)
+ y = -t(apply(-y, 1, sort, na.last=TRUE))
+
+ sum1 = function(mat) {
+ if(!is.matrix(mat)) mat = as.matrix(mat)
+ apply(mat, 1, sum, na.rm=TRUE)
+ }
+ r.vec = sum1(!is.na(y))
+ if(any(r.vec == 0))
+ stop("A row contains all missing values")
+
+ extra$percentiles = .percentiles
+ if(!length(etastart)) {
+ siginit= if(length( .iscale)) rep( .iscale, len=nrow(y)) else NULL
+ xiinit = if(length( .ishape)) rep( .ishape, len=nrow(y)) else NULL
+ eshape = .eshape
+ if( .lshape=="elogit" && length(xiinit) &&
+ (any(xiinit <= eshape$min | xiinit >= eshape$max)))
+ stop("bad input for argument \"eshape\"")
+ if( .method.init == 1) {
+ nvector = 4:10 # Arbitrary; could be made an argument
+ ynvector = quantile(y[,1], probs = 1-1/nvector)
+ objecFunction = -Inf # Actually the log-likelihood
+ est.sigma = !length(siginit)
+ gshape = .gshape
+ temp234 = if(length(xiinit)) xiinit[1] else
+ seq(gshape[1], gshape[2], len=12)
+ for(xi.try in temp234) {
+ xvec = if(abs(xi.try) < .tshape0) log(nvector) else
+ (nvector^xi.try - 1) / xi.try
+ fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
+ if(est.sigma) {
+ sigmaTry = rep(fit0$coef["X"], len=nrow(y))
+ } else {
+ sigmaTry = siginit
+ }
+ muTry = rep(fit0$coef["Intercept"], len=nrow(y))
+ llTry = egev()@loglikelihood(mu=NULL,y=y[,1],w=w,
+ residuals=FALSE, extra=list(giveWarning=FALSE),
+ eta=cbind(theta2eta(muTry, .llocation, earg= .elocation),
+ theta2eta(sigmaTry, .lscale, earg= .escale),
+ theta2eta(xi.try, link= .lshape,
+ earg= .eshape)))
+ if(llTry >= objecFunction) {
+ if(est.sigma)
+ siginit = sigmaTry
+ muinit = rep(muTry, len=nrow(y))
+ objecFunction = llTry
+ bestxi = xi.try
+ }
+ }
+ if(!length(xiinit))
+ xiinit = rep(bestxi, len=nrow(y))
+ } else {
+ xiinit = rep(0.05, len=nrow(y))
+ if(!length(siginit))
+ siginit = rep(sqrt(6 * var(y[,1]))/pi, len=nrow(y))
+ Euler = 0.57721566490153286
+ muinit = rep(median(y[,1]) - Euler*siginit, len=nrow(y))
+ }
+
+ bad = ((1 + xiinit*(y-muinit)/siginit) <= 0)
+ if(fred <- sum(sum1(bad))) {
+ warning(paste(fred, "observations violating boundary",
+ "constraints while initializing. Taking corrective action."))
+ xiinit[bad] = ifelse(y[bad] > muinit[bad], 0.1, -0.1)
+ }
+
+ etastart = cbind(theta2eta(muinit, .llocation, earg= .elocation),
+ theta2eta(siginit, .lscale, earg= .escale),
+ theta2eta(xiinit, .lshape, earg= .eshape))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .percentiles=percentiles,
+ .elocation = elocation, .escale = escale,
+ .eshape= eshape, .tshape0=tshape0,
+ .method.init=method.init,
+ .iscale=iscale, .ishape=ishape, .gshape=gshape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale)
+ xi = eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero = (abs(xi) < .tshape0)
+ cent = extra$percentiles
+ lp = length(cent)
+ fv = matrix(as.numeric(NA), nrow(eta), lp)
+ if(lp) {
+ for(i in 1:lp) {
+ yp = -log(cent[i]/100)
+ fv[!iszero,i] = loc[!iszero] - sigma[!iszero] *
+ (1 - yp^(-xi[!iszero])) / xi[!iszero]
+ fv[iszero,i] = loc[iszero] - sigma[iszero] * log(yp)
+ }
+ dimnames(fv) = list(dimnames(eta)[[1]],
+ paste(as.character(cent), "%", sep=""))
+ } else {
+ Euler = 0.57721566490153286
+ fv = loc + sigma * Euler # When xi=0, is Gumbel
+ fv[!iszero] = loc[!iszero] + sigma[!iszero] *
+ (gamma(1-xi[!iszero])-1) / xi[!iszero]
+ fv[xi >= 1] = NA # Mean exists only if xi < 1.
+ }
+ fv
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .eshape= eshape, .tshape0=tshape0 ))),
+ last=eval(substitute(expression({
+ misc$links = c(location = .llocation, scale = .lscale, shape =.lshape)
+ misc$true.mu = !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles = .percentiles
+ misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
+ misc$expected = TRUE
+ misc$tshape0 = .tshape0
+ if(ncol(y)==1)
+ y = as.vector(y)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0, .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale)
+ xi = eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero = (abs(xi) < .tshape0)
+ sum1 = function(mat) {
+ if(!is.matrix(mat)) mat = as.matrix(mat)
+ apply(mat, 1, sum, na.rm=TRUE)
+ }
+ zedd = (y-mmu) / sigma
+ r.vec = sum1(!is.na(y))
+ A = 1 + xi * (y-mmu)/sigma
+ ii = 1:nrow(eta)
+ A1 = A[cbind(ii, r.vec)]
+ mytolerance = 0 # .Machine$double.eps
+ if(any(bad <- (A1 <= mytolerance), na.rm=TRUE)) {
+ cat("There are",sum(bad),"range violations in @loglikelihood\n")
+ if(exists("flush.console")) flush.console()
+ }
+ igev = !iszero & !bad
+ igum = iszero & !bad
+ pow = 1 + 1/xi[igev]
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(bad) * (-1.0e10) +
+ sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
+ exp(-zedd[igum,r.vec]) - sum1(zedd))) +
+ sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
+ pow*sum1(log(A[igev])) -
+ A1[igev]^(-1/xi[igev])))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0 ))),
+ vfamily=c("gev", "vextremes"),
+ deriv=eval(substitute(expression({
+ sum1 = function(mat) {
+ if(!is.matrix(mat)) mat = as.matrix(mat)
+ apply(mat, 1, sum, na.rm=TRUE)
+ }
+ r.vec = sum1(!is.na(y))
+ mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale)
+ xi = eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero = (abs(xi) < .tshape0)
+ ii = 1:nrow(eta)
+ zedd = (y-mmu) / sigma
+ A = 1 + xi * zedd
+ dA.dxi = zedd # matrix
+ dA.dmu = -xi/sigma # vector
+ dA.dsigma = -xi*zedd/sigma # matrix
+ pow = 1 + 1/xi
+ A1 = A[cbind(ii, r.vec)]
+
+ AAr1 = dA.dmu/(xi * A1^pow) - pow * sum1(dA.dmu/A)
+ AAr2 = dA.dsigma[cbind(ii,r.vec)] / (xi * A1^pow) -
+ pow * sum1(dA.dsigma/A)
+ AAr3 = 1/(xi * A1^pow) - pow * sum1(dA.dsigma/A)
+ dl.dmu = AAr1
+ dl.dsi = AAr2 - r.vec/sigma
+ dl.dxi = sum1(log(A))/xi^2 - pow * sum1(dA.dxi/A) - (log(A1) /
+ xi^2 - dA.dxi[cbind(ii,r.vec)] /(xi*A1)) * A1^(-1/xi)
+
+ if(any(iszero)) {
+ zorro = c(zedd[cbind(1:n,r.vec)])
+ zorro = zorro[iszero]
+ ezedd = exp(-zorro)
+ dl.dmu[iszero] = (1-ezedd) / sigma[iszero]
+ dl.dsi[iszero] = (zorro * (1-ezedd) - 1) / sigma[iszero]
+ dl.dxi[iszero] = zorro * ((1 - ezedd) * zorro / 2 - 1)
+ }
+ dmu.deta = dtheta.deta(mmu, .llocation, earg= .elocation)
+ dsi.deta = dtheta.deta(sigma, .lscale, earg= .escale)
+ dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape)
+ w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi * dxi.deta)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0 ))),
+ weight=eval(substitute(expression({
+ kay = -xi
+ dd = digamma(r.vec-kay+1)
+ ddd = digamma(r.vec+1) # Unnecessarily evaluated at each iteration
+ temp13 = -kay * dd + (kay^2 - kay + 1) / (1-kay)
+ temp33 = 1 - 2 * kay * ddd + kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
+ temp23 = -kay * dd + (1+(1-kay)^2) / (1-kay)
+ GR.gev = function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
+ tmp2 = (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
+ tmp1 = (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
+ k0 = (1-2*kay)
+ k1 = k0 * kay
+ k2 = k1 * kay
+ k3 = k2 * kay # kay^3 * (1-2*kay)
+ wz = matrix(as.numeric(NA), n, 6)
+ wz[,iam(1,1,M)] = tmp2 / (sigma^2 * k0)
+ wz[,iam(1,2,M)] = (tmp2 - tmp1) / (sigma^2 * k1)
+ wz[,iam(1,3,M)] = (tmp1 * temp13 - tmp2) / (sigma * k2)
+ wz[,iam(2,2,M)] = (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
+ wz[,iam(2,3,M)] = (r.vec*k1*ddd + tmp1 *
+ temp23 - tmp2 - r.vec*k0) / (sigma * k3)
+ wz[,iam(3,3,M)] = (2*tmp1*(-temp13) + tmp2 + r.vec*k0*temp33)/(k3*kay)
+
+ if(any(iszero)) {
+ if(ncol(y) > 1)
+ stop("cannot handle xi==0 with a multivariate response")
+
+ Euler = 0.57721566490153286
+ wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-Euler)^2) / sigma^2
+ wz[iszero,iam(3,3,M)] = 2.4236 # Solved numerically zz
+ wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(Euler-1)) / sigma^2
+ wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[iszero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
+ 2*dgammadx(1,2) + 2*dgammadx(1,3)/3) / sigma
+
+ if(FALSE ) {
+ wz[,iam(1,2,M)] = 2 * r.vec / sigma^2
+ wz[,iam(2,2,M)] = -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
+ (4 * dgammadx(r.vec+1, der=1) -
+ 3 * dgammadx(r.vec+1, der=2)) / gamma(r.vec) # Not checked
+ }
+ }
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsi.deta^2
+ wz[,iam(3,3,M)] = wz[,iam(3,3,M)] * dxi.deta^2
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] * dmu.deta * dsi.deta
+ wz[,iam(1,3,M)] = wz[,iam(1,3,M)] * dmu.deta * (-dxi.deta)
+ wz[,iam(2,3,M)] = wz[,iam(2,3,M)] * dsi.deta * (-dxi.deta)
+ w * wz
+ }), list( .eshape = eshape ))))
+}
+
+
+
+
+
+dgammadx = function(x, deriv.arg=1) {
+ if(deriv.arg==0) {
+ gamma(x)
+ } else if(deriv.arg == 1) {
+ digamma(x) * gamma(x)
+ } else if(deriv.arg == 2) {
+ gamma(x) * (trigamma(x) + digamma(x)^2)
+ } else if(deriv.arg == 3) {
+ gamma(x) * (psigamma(x, der=2) + 2 * digamma(x) * trigamma(x)) +
+ dgammadx(x, der=1) * (trigamma(x) + digamma(x)^2)
+ } else if(deriv.arg == 4) {
+ dgammadx(x, der=2) * (trigamma(x) + digamma(x)^2) +
+ 2 * dgammadx(x, der=1) * (psigamma(x, der=2) + 2*digamma(x) * trigamma(x)) +
+ gamma(x) * (psigamma(x, der=3) + 2*trigamma(x)^2 +
+ 2 * digamma(x) * psigamma(x, der=2))
+ } else stop("cannot handle deriv>4")
+}
+
+
+
+egev <- function(llocation="identity",
+ lscale="loge",
+ lshape="logoff",
+ elocation = list(),
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
+ percentiles=c(95,99),
+ iscale=NULL, ishape=NULL,
+ method.init=1, gshape=c(-0.45, 0.45),
+ tshape0=0.001,
+ zero=3)
+{
+ if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument \"iscale\"")
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation <- as.character(substitute(llocation))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape <- as.character(substitute(lshape))
+ if(!is.Numeric(gshape, allow=2) || gshape[1] >= gshape[2])
+ stop("bad input for argument \"gshape\"")
+ if(length(percentiles) &&
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for argument \"percentiles\"")
+ if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ method.init > 2.5)
+ stop("argument \"method.init\" must be 1 or 2")
+ if(length(ishape) && !is.Numeric(ishape))
+ stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
+ stop("bad input for argument \"tshape0\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+ if(!is.list(eshape)) eshape = list()
+
+ new("vglmff",
+ blurb=c("Generalized Extreme Value Distribution\n",
+ "Links: ",
+ namesof("location", link=llocation, earg= elocation), ", ",
+ namesof("scale", link=lscale, earg= escale), ", ",
+ namesof("shape", link=lshape, earg= eshape)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names =
+ c(namesof("location", .llocation, earg= .elocation, short=TRUE),
+ namesof("scale", .lscale, earg= .escale, short=TRUE),
+ namesof("shape", .lshape, earg= .eshape, short=TRUE))
+ if(ncol(as.matrix(y)) != 1)
+ stop("response must be a vector or one-column matrix")
+ if(!length(etastart)) {
+ siginit= if(length( .iscale)) rep( .iscale, len=length(y)) else NULL
+ xiinit = if(length( .ishape)) rep( .ishape, len=length(y)) else NULL
+ eshape = .eshape
+ if( .lshape == "elogit" && length(xiinit) &&
+ (any(xiinit <= eshape$min | xiinit >= eshape$max)))
+ stop("bad input for argument \"eshape\"")
+ if( .method.init == 1) {
+ nvector = 4:10 # Arbitrary; could be made an argument
+ ynvector = quantile(y, probs = 1-1/nvector)
+ objecFunction = -Inf # Actually the log-likelihood
+ est.sigma = !length(siginit)
+ gshape = .gshape
+ temp234 = if(length(xiinit)) xiinit[1] else
+ seq(gshape[1], gshape[2], len=12)
+ for(xi.try in temp234) {
+ xvec = if(abs(xi.try) < .tshape0) log(nvector) else
+ (nvector^xi.try - 1) / xi.try
+ fit0 = lsfit(x=xvec, y=ynvector, intercept=TRUE)
+ if(est.sigma) {
+ sigmaTry = rep(fit0$coef["X"], len=length(y))
+ } else {
+ sigmaTry = siginit
+ }
+ muTry = rep(fit0$coef["Intercept"], len=length(y))
+ llTry = egev()@loglikelihood(mu=NULL,y=y,w=w,
+ residuals=FALSE, extra=list(giveWarning=FALSE),
+ eta=cbind(theta2eta(muTry, .llocation, earg= .elocation),
+ theta2eta(sigmaTry, .lscale, earg= .escale),
+ theta2eta(xi.try, .lshape, earg= .eshape)))
+ if(llTry >= objecFunction) {
+ if(est.sigma)
+ siginit = sigmaTry
+ muinit = rep(muTry, len=length(y))
+ objecFunction = llTry
+ bestxi = xi.try
+ }
+ }
+ if(!length(xiinit))
+ xiinit = rep(bestxi, len=length(y))
+
+ } else {
+ xiinit = rep(if(length(xiinit)) xiinit else 0.05, len=length(y))
+ if(!length(siginit))
+ siginit = rep(sqrt(6*var(y))/pi, len=length(y))
+ Euler = 0.57721566490153286
+ muinit <- rep(median(y) - Euler * siginit, len=length(y))
+ }
+ bad <- (1 + xiinit*(y-muinit)/siginit <= 0)
+ if(fred <- sum(bad, na.rm=TRUE)) {
+ warning(paste(fred, "observations violating boundary",
+ "constraints while initializing. Taking corrective action."))
+ xiinit[bad] = ifelse(y[bad] > muinit[bad], 0.01, -0.01)
+ }
+
+ extra$percentiles = .percentiles
+
+ etastart = cbind(theta2eta(muinit, .llocation, earg= .elocation),
+ theta2eta(siginit, .lscale, earg= .escale),
+ theta2eta(xiinit, .lshape, earg= .eshape))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .percentiles=percentiles, .tshape0=tshape0,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .method.init=method.init,
+ .iscale=iscale, .ishape=ishape, .gshape=gshape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ loc <- eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma <- eta2theta(eta[,2], .lscale, earg= .escale)
+ xi <- eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero <- (abs(xi) < .tshape0)
+ cent = extra$percentiles
+ lp <- length(cent)
+ fv <- matrix(as.numeric(NA), nrow(eta), lp)
+ if(lp) {
+ for(i in 1:lp) {
+ yp = -log(cent[i]/100)
+ fv[!iszero,i] = loc[!iszero] - sigma[!iszero] *
+ (1 - yp^(-xi[!iszero])) / xi[!iszero]
+ fv[iszero,i] = loc[iszero] - sigma[iszero] * log(yp)
+ }
+ dimnames(fv) = list(dimnames(eta)[[1]],
+ paste(as.character(cent), "%", sep=""))
+ } else {
+ Euler = 0.57721566490153286
+ fv = loc + sigma * Euler # When xi=0, is Gumbel
+ fv[!iszero] = loc[!iszero] + sigma[!iszero] *
+ (gamma(1-xi[!iszero])-1) / xi[!iszero]
+ fv[xi >= 1] = NA # Mean exists only if xi < 1.
+ }
+ fv
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0 ))),
+ last=eval(substitute(expression({
+ misc$links <- c(location = .llocation, scale = .lscale, shape = .lshape)
+ misc$true.mu = !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles <- .percentiles
+ misc$earg= list(location= .elocation, scale= .escale, shape= .eshape)
+ misc$tshape0 = .tshape0
+ misc$expected = TRUE
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0, .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ giveWarning = if(length(extra$giveWarning)) extra$giveWarning else
+ FALSE
+ mmu <- eta2theta(eta[,1], .llocation, earg= .elocation )
+ sigma <- eta2theta(eta[,2], .lscale, earg= .escale )
+ xi <- eta2theta(eta[,3], .lshape, earg= .eshape )
+ zedd <- (y-mmu) / sigma
+ A <- 1 + xi * zedd
+ if(any(iszero <- (abs(xi) < .tshape0))) {
+ }
+ mytolerance = 0 # .Machine$double.eps
+ bad <- (A<=mytolerance) # Range violation
+ if(any(bad) && giveWarning) {
+ cat("There are",sum(bad),"range violations in @loglikelihood\n")
+ if(exists("flush.console")) flush.console()
+ }
+ igev = !iszero & !bad
+ igum = iszero & !bad
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(bad) * (-1.0e10) +
+ sum(w[igum] * (-log(sigma[igum]) - zedd[igum] - exp(-zedd[igum]))) +
+ sum(w[igev] * (-log(sigma[igev]) - (1 + 1/xi[igev])*log(A[igev]) -
+ A[igev]^(-1/xi[igev])))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0 ))),
+ vfamily=c("egev", "vextremes"),
+ deriv=eval(substitute(expression({
+ mmu = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ xi = eta2theta(eta[,3], .lshape, earg= .eshape)
+ iszero <- (abs(xi) < .tshape0)
+ zedd = (y-mmu) / sigma
+ A = 1 + xi * zedd
+ dA.dxi = zedd
+ dA.dmu = -xi / sigma
+ dA.dsigma = -xi * zedd / sigma
+ pow = 1 + 1/xi
+ if(any(bad <- A<=0, na.rm=TRUE)) stop(paste(sum(bad, na.rm=TRUE),
+ "observations violating boundary constraints in @deriv"))
+ AA = 1/(xi*A^pow)- pow/A
+ dl.dmu = dA.dmu * AA
+ dl.dsi = dA.dsigma * AA - 1/sigma
+ dl.dxi = log(A)/xi^2 - pow * dA.dxi / A -
+ (log(A)/xi^2 - dA.dxi /(xi*A)) * A^(-1/xi)
+ if(any(iszero)) {
+ ezedd = exp(-zedd[iszero])
+ dl.dmu[iszero] = (1-ezedd) / sigma[iszero]
+ dl.dsi[iszero] = (zedd[iszero] * (1-ezedd) - 1) / sigma[iszero]
+ dl.dxi[iszero] = zedd[iszero] * ((1 - ezedd) * zedd[iszero] / 2 - 1)
+ }
+ dmu.deta = dtheta.deta(mmu, .llocation, earg= .elocation)
+ dsi.deta = dtheta.deta(sigma, .lscale, earg= .escale )
+ dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape)
+ w * cbind(dl.dmu * dmu.deta, dl.dsi * dsi.deta, dl.dxi*dxi.deta)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .elocation = elocation, .escale = escale, .eshape= eshape,
+ .tshape0=tshape0 ))),
+ weight=eval(substitute(expression({
+ bad <- A <= 0
+ if(any(bad, na.rm = TRUE)) stop(paste(sum(bad, na.rm = TRUE),
+ "observations violating boundary constraints in @weight"))
+ kay = -xi # for the formulae
+ temp100 = gamma(2-kay)
+ pp = (1-kay)^2 * gamma(1-2*kay)
+ qq = temp100 * (digamma(1-kay) - (1-kay)/kay)
+ wz = matrix(as.numeric(NA), n, 6)
+ wz[,iam(1,1,M)] = pp / sigma^2
+ wz[,iam(2,2,M)] = (1-2*temp100 + pp) / (sigma * kay)^2
+ Euler = 0.57721566490153286
+ wz[,iam(3,3,M)] = (pi^2 / 6 + (1-Euler-1/kay)^2 +
+ (2*qq + pp/kay)/kay) / kay^2
+ wz[,iam(1,2,M)] = (pp - temp100) / (sigma^2 * kay)
+ wz[,iam(1,3,M)] = -(qq + pp/kay) / (sigma * kay)
+ wz[,iam(2,3,M)] = (1-Euler - (1-temp100)/kay - qq -
+ pp/kay) / (sigma * kay^2)
+
+ if(any(iszero)) {
+ wz[iszero,iam(2,2,M)] = (pi^2/6 + (1-Euler)^2) / sigma^2
+ wz[iszero,iam(3,3,M)] = 2.4236 # Solved numerically zz
+ wz[iszero,iam(1,2,M)] = (digamma(2) + 2*(Euler-1)) / sigma^2
+ wz[iszero,iam(1,3,M)]= -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[iszero,iam(2,3,M)] = (-dgammadx(2,3)/6 + dgammadx(1,1) +
+ 2*dgammadx(1,2) + 2*dgammadx(1,3)/3) / sigma
+ }
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsi.deta^2
+ wz[,iam(3,3,M)] = wz[,iam(3,3,M)] * dxi.deta^2
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] * dmu.deta * dsi.deta
+ wz[,iam(1,3,M)] = wz[,iam(1,3,M)] * dmu.deta * (-dxi.deta)
+ wz[,iam(2,3,M)] = wz[,iam(2,3,M)] * dsi.deta * (-dxi.deta)
+ w * wz
+ }), list( .eshape= eshape, .tshape0=tshape0 ))))
+}
+
+
+
+
+
+
+rgumbel = function(n, location=0, scale=1) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ location - scale * log(-log(runif(n)))
+}
+
+dgumbel = function(x, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ temp = exp(-(x-location) / scale)
+ temp * exp(-temp) / scale
+}
+
+qgumbel = function(p, location=0, scale=1) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ location - scale * log(-log(p))
+}
+
+pgumbel = function(q, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ exp(-exp(-(q-location) / scale))
+}
+
+
+gumbel <- function(llocation="identity",
+ lscale="loge",
+ elocation = list(),
+ escale = list(),
+ iscale=NULL,
+ R=NA, percentiles=c(95,99),
+ mpv=FALSE, zero=NULL)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.logical(mpv) || length(mpv) != 1)
+ stop("bad input for argument \"mpv\"")
+ if(length(percentiles) &&
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for argument \"percentiles\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument \"iscale\"")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+
+ new("vglmff",
+ blurb=c("Gumbel Distribution for Extreme Value Regression\n",
+ "Links: ",
+ namesof("location", link=llocation, earg= elocation), ", ",
+ namesof("scale", link=lscale, earg= escale )),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", .llocation, earg= .elocation, short=TRUE),
+ namesof("scale", .lscale, earg= .escale , short=TRUE))
+ y = as.matrix(y)
+ if(ncol(y) > 1)
+ y = if(is.R()) -t(apply(-y,1,sort, na.last = TRUE)) else {
+ y[is.na(y)] = -Inf
+ y = -t(apply(-y,1,sort, na.last = TRUE))
+ y[y == -Inf] = NA
+ as.matrix(y)
+ }
+ r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
+ if(any(r.vec==0))
+ stop("There is at least one row of the response containing all NAs")
+ if(ncol(y) > 1) {
+ yiri = y[cbind(1:nrow(y), r.vec)]
+ sc.init = if(is.Numeric( .iscale, posit=TRUE))
+ .iscale else {3*(apply(y, 1, mean, na.rm=TRUE) - yiri)}
+ sc.init = rep(sc.init, length=nrow(y))
+ sc.init[sc.init <= 0.0001] = 1 # Used to be .iscale
+ loc.init = yiri + sc.init * log(r.vec)
+ } else {
+ sc.init = if(is.Numeric( .iscale, posit=TRUE))
+ .iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
+ sc.init = rep(sc.init, len=n)
+ Euler = 0.57721566490153286
+ loc.init = (y - sc.init * Euler)
+ loc.init[loc.init <= 0] = min(y)
+ }
+
+ extra$R = .R
+ extra$mpv = .mpv
+ extra$percentiles = .percentiles
+
+ if(!length(etastart))
+ etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
+ theta2eta(sc.init, .lscale, earg= .escale ))
+ }), list( .llocation=llocation, .lscale=lscale, .iscale=iscale,
+ .elocation = elocation, .escale = escale,
+ .R=R, .mpv=mpv, .percentiles=percentiles ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale ) # sigma
+ Percentiles = extra$percentiles
+ lp = length(Percentiles) # may be 0
+ if(lp) {
+ mpv = extra$mpv
+ mu = matrix(as.numeric(NA), nrow(eta), lp + mpv) # lp could be 0
+ Rvec = extra$R
+ if(1 <= lp)
+ for(i in 1:lp) {
+ ci = if(is.Numeric(Rvec)) Rvec * (1 - Percentiles[i] / 100) else
+ -log( Percentiles[i] / 100)
+ mu[,i] = loc - sigma * log(ci)
+ }
+ if(mpv)
+ mu[,ncol(mu)] = loc - sigma * log(log(2))
+ dmn2 = if(lp>=1) paste(as.character(Percentiles), "%", sep="") else NULL
+ if(mpv)
+ dmn2 = c(dmn2, "MPV")
+ dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
+ } else {
+ Euler = 0.57721566490153286
+ mu = loc + sigma * Euler
+ }
+ mu
+ }, list( .llocation=llocation, .lscale=lscale,
+ .elocation = elocation, .escale = escale ))),
+ last=eval(substitute(expression({
+ misc$R = .R
+ misc$links = c(location = .llocation, scale = .lscale)
+ misc$earg= list(location= .elocation, scale= .escale )
+ misc$mpv = .mpv
+ misc$true.mu = !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles = .percentiles
+ }), list( .llocation=llocation, .lscale=lscale, .percentiles=percentiles,
+ .elocation = elocation, .escale = escale,
+ .mpv=mpv, .R=R ))),
+ vfamily=c("gumbel", "vextremes"),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
+ yiri = y[cbind(1:nrow(y),r.vec)]
+ ans = -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
+ max.r.vec = max(r.vec)
+ for(jay in 1:max.r.vec) {
+ index = (jay <= r.vec)
+ ans[index] = ans[index] - (y[index,jay]-loc[index]) / sigma[index]
+ }
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ans)
+ }, list( .llocation=llocation, .lscale=lscale,
+ .elocation = elocation, .escale = escale ))),
+ deriv=eval(substitute(expression({
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ r.vec = as.vector((!is.na(y)) %*% rep(1,ncol(y))) # r_i vector
+ yiri = y[cbind(1:nrow(y),r.vec)]
+ yi.bar = apply(y, 1, mean, na.rm=TRUE)
+ temp2 = (yiri - loc) / sigma
+ term2 = exp(-temp2)
+ dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation)
+ dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
+ dl.dloc = (r.vec - term2) / sigma
+ dl.dsigma = (apply((y - loc) / sigma, 1, sum, na.rm=TRUE) - r.vec -
+ temp2 * term2) / sigma
+ w * cbind(dl.dloc * dloc.deta, dl.dsigma * dsigma.deta)
+ }), list( .llocation=llocation, .lscale=lscale,
+ .elocation = elocation, .escale = escale ))),
+ weight=eval(substitute(expression({
+ temp6 = digamma(r.vec) # , integer=T
+ temp5 = digamma(1:max(r.vec)) # , integer=T
+ temp5 = matrix(temp5, n, max(r.vec), byrow=TRUE)
+ temp5[col(temp5) > r.vec] = 0
+ temp5 = temp5 %*% rep(1, ncol(temp5))
+ wz = matrix(as.numeric(NA), n, dimm(M=2)) # 3=dimm(M=2)
+ wz[,iam(1,1,M)] = r.vec / sigma^2
+ wz[,iam(2,1,M)] = -(1 + r.vec * temp6) / sigma^2
+ wz[,iam(2,2,M)] = (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
+ temp6^2) + 2 - r.vec - 2*temp5) / sigma^2
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dloc.deta^2
+ wz[,iam(2,1,M)] = wz[,iam(2,1,M)] * dsigma.deta * dloc.deta
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsigma.deta^2
+ w * wz
+ }), list( .lscale=lscale ))))
+}
+
+
+
+
+
+
+rgpd = function(n, location=0, scale=1, shape=0) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase)
+ ans[!scase] = location[!scase] +
+ scale[!scase]*((runif(n-nscase))^(-shape[!scase])-1) / shape[!scase]
+ if(nscase)
+ ans[scase] = location[scase] - scale[scase] * log(runif(nscase))
+ ans
+}
+
+dgpd = function(x, location=0, scale=1, shape=0) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(x), length(location), length(scale), length(shape))
+ ans = rep(as.numeric(NA), len=n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); x = rep(x-location, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ pos = (1+shape*x/scale > 0 & x > 0)
+ ind8 = pos & !scase
+ ans[ind8] = (1+shape[ind8]*x[ind8]/scale[ind8])^(-1/shape[ind8]-1) /
+ scale[ind8]
+ ind8 = !pos & !scase
+ ans[ind8] = 0
+ }
+ if(nscase) {
+ pos = x>0
+ ind5 = pos & scase
+ ans[ind5] = exp(-x[ind5]/scale[ind5]) / scale[ind5]
+ ans[!pos & scase] = 0
+ }
+ ans
+}
+
+pgpd = function(q, location=0, scale=1, shape=0) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(q), length(location), length(scale), length(shape))
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); q = rep(q-location, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ q[q<0] = 0
+ ans = 1 - pmax(0,(1+shape*q/scale))^(-1/shape)
+ }
+ if(nscase) {
+ pos = q>=0
+ ind9 = pos & scase
+ ans[ind9] = 1 - exp(-q[ind9]/scale[ind9])
+ ind9 = !pos & scase
+ ans[ind9] = 0
+ }
+ ans
+}
+
+qgpd = function(p, location=0, scale=1, shape=0) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(p), length(location), length(scale), length(shape))
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); p = rep(p, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((1-p[!scase])^(-shape[!scase]) - 1) / shape[!scase]
+ }
+ if(nscase) {
+ ans[scase] = location[scase] - scale[scase] * log(1-p[scase])
+ }
+ ans
+}
+
+
+
+
+gpd = function(threshold=0,
+ lscale="loge",
+ lshape="logoff",
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
+ percentiles=c(90,95),
+ iscale=NULL,
+ ishape=NULL,
+ tshape0=0.001,
+ method.init=1,
+ zero=2) {
+ if(!is.Numeric(threshold, allow=1))
+ stop("bad input for argument \"threshold\"")
+ if(!is.Numeric(method.init, allow=1, posit=TRUE, integer=TRUE) ||
+ method.init > 2.5)
+ stop("argument \"method.init\" must be 1 or 2")
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(length(percentiles) &&
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for argument \"percentiles\"")
+ if(!is.Numeric(tshape0, allow=1, posit=TRUE) || tshape0 > 0.1)
+ stop("bad input for argument \"tshape0\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.list(escale)) escale = list()
+ if(!is.list(eshape)) eshape = list()
+
+ new("vglmff",
+ blurb=c("Generalized Pareto Distribution\n",
+ "Links: ",
+ namesof("scale", link=lscale, earg= escale ), ", ",
+ namesof("shape", link=lshape, earg= eshape)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(as.matrix(y)) != 1)
+ stop("response must be a vector or one-column matrix")
+ extra$orig.n = length(y)
+ keep = (y > .threshold)
+ orig.y = y[keep]
+ y = orig.y - .threshold
+ a = attr(x,"assign")
+ x = x[keep,,drop=FALSE]
+ attr(x,"assign") = a
+ extra$sumkeep = sum(keep)
+ extra$threshold = .threshold
+ w = w[keep] # -> origw
+ n = length(w)
+ predictors.names= c(namesof("scale", .lscale, earg= .escale, short=TRUE),
+ namesof("shape", .lshape, earg= .eshape, short=TRUE ))
+ if(!length(etastart)) {
+ meany = mean(y)
+ vary = var(y)
+ xiinit = if(length(.ishape)) .ishape else {
+ if( .method.init == 1) -0.5*(meany^2/vary - 1) else
+ 0.5 * (1 - median(y)^2 / vary)
+ }
+ siginit = if(length(.iscale)) .iscale else {
+ if(.method.init==1) 0.5*meany*(meany^2/vary + 1) else
+ abs(1-xiinit) * median(y)
+ }
+ siginit[siginit <= 0] = 0.01 # sigma > 0
+ xiinit[xiinit <= -0.5] = -0.40 # Fisher scoring works if xi > -0.5
+ xiinit[xiinit >= 1.0] = 0.90 # Mean/var exists if xi < 1 / 0.5
+ if( .lshape == "loge") xiinit[xiinit <= 0.0] = 0.05
+ siginit = rep(siginit, leng=length(y))
+ xiinit = rep(xiinit, leng=length(y))
+ etastart = cbind(theta2eta(siginit, .lscale, earg= .escale ),
+ theta2eta(xiinit, .lshape, earg= .eshape ))
+ }
+ }), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
+ .iscale=iscale, .ishape=ishape,
+ .escale=escale, .eshape=eshape,
+ .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ sigma = eta2theta(eta[,1], .lscale, earg= .escale )
+ xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ cent = .percentiles
+ lp = length(cent) # NULL means lp==0 and the mean is returned
+ if(lp) {
+ fv = matrix(as.numeric(NA), nrow(eta), lp)
+ iszero = (abs(xi) < .tshape0)
+ for(i in 1:lp) {
+ temp = 1-cent[i]/100
+ fv[!iszero,i] = .threshold + (temp^(-xi[!iszero]) -1) *
+ sigma[!iszero] / xi[!iszero]
+ fv[iszero,i] = .threshold - sigma[iszero] * log(temp)
+ }
+ dimnames(fv) = list(dimnames(eta)[[1]],
+ paste(as.character(.percentiles), "%", sep=""))
+ } else {
+ fv = .threshold + sigma / (1 - xi) # This is the mean, E(Y)
+ fv[xi >= 1] = NA # Mean exists only if xi < 1.
+ }
+ fv
+ }, list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
+ .escale=escale, .eshape=eshape,
+ .tshape0=tshape0, .percentiles=percentiles ))),
+ last=eval(substitute(expression({
+ y = orig.y # Put in @y, i.e., y slot of the fitted object
+ misc$links = c(scale = .lscale, shape = .lshape)
+ misc$true.mu = FALSE # @fitted is not a true mu
+ misc$earg= list(scale= .escale , shape= .eshape )
+ misc$percentiles = .percentiles
+ misc$threshold = .threshold
+ misc$expected = TRUE
+ misc$tshape0 = .tshape0
+ }), list( .lscale=lscale, .lshape=lshape, .threshold=threshold,
+ .escale=escale, .eshape=eshape,
+ .tshape0=tshape0, .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ sigma = eta2theta(eta[,1], .lscale, earg= .escale )
+ xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ if(any(iszero <- (abs(xi) < .tshape0))) {
+ }
+ A = 1 + xi*y/sigma
+ mytolerance = .Machine$double.eps
+ bad <- (A<=mytolerance) # Range violation
+ if(any(bad)) {
+ cat("There are some range violations\n")
+ if(exists("flush.console")) flush.console()
+ }
+ igpd = !iszero & !bad
+ iexp = iszero & !bad
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(bad) * (-1.0e10) +
+ sum(w[igpd] * (-log(sigma[igpd]) - (1+1/xi[igpd])*log(A[igpd]))) +
+ sum(w[iexp] * (-log(sigma[iexp]) - y[iexp]/sigma[iexp]))
+ }, list( .tshape0=tshape0, .lscale=lscale,
+ .escale=escale, .eshape=eshape,
+ .lshape=lshape ))),
+ vfamily=c("gpd", "vextremes"),
+ deriv=eval(substitute(expression({
+ sigma = eta2theta(eta[,1], .lscale, earg= .escale )
+ xi = eta2theta(eta[,2], .lshape, earg= .eshape )
+ A = 1 + xi*y/sigma
+ mytolerance = .Machine$double.eps
+ bad <- (A <= mytolerance)
+ if(any(bad)) {
+ cat(sum(bad,na.rm=TRUE), # "; ignoring them"
+ "observations violating boundary constraints\n")
+ if(exists("flush.console")) flush.console()
+ }
+ if(any(iszero <- (abs(xi) < .tshape0))) {
+ }
+ igpd = !iszero & !bad
+ iexp = iszero & !bad
+ dl.dxi = dl.dsigma = rep(0, len=length(y))
+ dl.dsigma[igpd] = ((1 + xi[igpd]) * y[igpd] / (sigma[igpd] +
+ xi[igpd]*y[igpd]) - 1) / sigma[igpd]
+ dl.dxi[igpd] = log(A[igpd])/xi[igpd]^2 - (1 + 1/xi[igpd]) * y[igpd] /
+ (A[igpd] * sigma[igpd])
+ dl.dxi[iexp] = y[iexp] * (0.5*y[iexp]/sigma[iexp] - 1) / sigma[iexp]
+ dsigma.deta = dtheta.deta(sigma, .lscale, earg= .escale )
+ dxi.deta = dtheta.deta(xi, .lshape, earg= .eshape )
+ w * cbind(dl.dsigma * dsigma.deta, dl.dxi * dxi.deta)
+ }), list( .tshape0=tshape0, .lscale=lscale,
+ .escale=escale, .eshape=eshape,
+ .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ n <- length(w) # needed!
+ wz = matrix(as.numeric(NA), n, 3)
+ wz[,iam(1,1,M)] = 1 / ((1+2*xi) * sigma^2)
+ wz[,iam(2,2,M)] = 2 / ((1+2*xi) * (1+xi))
+ wz[,iam(1,2,M)] = 1 / ((1+2*xi) * (1+xi) * sigma) # Positive!!!
+ wz[,iam(1,1,M)] = w * wz[,iam(1,1,M)] * dsigma.deta^2
+ wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dxi.deta^2
+ wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dsigma.deta * dxi.deta
+ wz
+ }), list( .lscale=lscale ))))
+}
+
+
+
+
+
+meplot.default = function(y, main="Mean Excess Plot",
+ xlab="Threshold", ylab="Mean Excess", lty=c(2,1:2),
+ conf=0.95, col=c("blue","black","blue"), type="l", ...) {
+ if(!is.Numeric(y)) stop("bad input for argument \"y\"")
+ n = length(y)
+ sy = sort(y)
+ dsy = rev(sy) # decreasing sequence
+ me = rev(cumsum(dsy))/(n:1) - sy
+ me2 = rev(cumsum(dsy^2))
+ var = (me2 - (n:1)*(me+sy)^2) / (n:1)
+ ci = qnorm((1+conf)/2) * sqrt(abs(var)) / sqrt(n:1)
+ mymat = cbind(me-ci, me, me+ci)
+ sy = sy - sqrt(.Machine$double.eps)
+ matplot(sy, mymat, main=main, xlab=xlab, ylab=ylab,
+ lty=lty, col=col, type=type, ...)
+ invisible(list(threshold=sy, meanExcess=me))
+}
+
+meplot.vlm = function(object, ...) {
+ if(!length(y <- object at y)) stop("y slot is empty")
+ ans = meplot(as.numeric(y), ...)
+ invisible(ans)
+}
+
+if(!isGeneric("meplot"))
+ setGeneric("meplot", function(object, ...) standardGeneric("meplot"))
+
+setMethod("meplot", "numeric",
+ function(object, ...)
+ meplot.default(y=object, ...))
+
+setMethod("meplot", "vlm",
+ function(object, ...)
+ meplot.vlm(object, ...))
+
+
+
+guplot.default = function(y, main="Gumbel Plot",
+ xlab="Reduced data", ylab="Observed data", type="p", ...) {
+ if(!is.Numeric(y)) stop("bad input for argument \"y\"")
+ n = length(y)
+ sy = sort(y)
+ x = -log(-log(((1:n) - 0.5) / n))
+ plot(x, sy, main=main, xlab=xlab, ylab=ylab, type=type, ...)
+ invisible(list(x=x, y=sy))
+}
+
+guplot.vlm = function(object, ...) {
+ if(!length(y <- object at y)) stop("y slot is empty")
+ ans = guplot(as.numeric(y), ...)
+ invisible(ans)
+}
+
+if(!isGeneric("guplot"))
+ setGeneric("guplot", function(object, ...) standardGeneric("guplot"))
+
+setMethod("guplot", "numeric",
+ function(object, ...)
+ guplot.default(y=object, ...))
+
+setMethod("guplot", "vlm",
+ function(object, ...)
+ guplot.vlm(object, ...))
+
+
+
+if(FALSE)
+bvevd.log.qn <- function(lscale="loge",
+ ldep="logit",
+ exchangeable=FALSE, zero=NULL)
+{
+ if(mode(ldep) != "character" && mode(ldep) != "name")
+ ldep <- as.character(substitute(ldep))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+
+ new("vglmff",
+ blurb=c("Bivariate logistic extreme value distribution\n\n",
+ "Link: ", "loc1, ", namesof("scale1", lscale), ", shape1, ",
+ "loc2, ", namesof("scale2", lscale), ", shape2, ",
+ namesof("dep", ldep), "\n"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(cbind(rbind(diag(3),diag(3),matrix(0,1,3)),
+ ei(7,7)), x, .exchangeable, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable=exchangeable, .zero=zero ))),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ nz <- y > 0
+ devi <- - (y - mu)
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ M = if(is.matrix(y)) ncol(y) else 1
+ if(M != 1) stop("response must be a vector or a one-column matrix")
+ predictors.names = c("loc1", namesof("scale1", lscale), "shape1",
+ "loc2", namesof("scale2", lscale), "shape2",
+ namesof("dep", ldep))
+ if(!length(etastart)) {
+ etastart <- theta2eta(mu, link=.link)
+ }
+ }), list( .link=link, .estimated.dispersion=estimated.dispersion) )),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mu = eta2theta(eta, link=.link)
+ mu
+ }, list( .link=link) )),
+ last=eval(substitute(expression({
+ misc$link = c(loc1="identity", scale1= .lscale, shape1="identity",
+ loc2="identity", scale2= .lscale, shape2="identity",
+ dep= .ldep)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list( .lscale=lscale, .ldep=ldep) )),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link=.link)
+ }, list( .link=link) )),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ pdf = deriv3( ~ w * ifelse(1+xi1*(y1-mu1)/sigma1>0, y1, 0),
+ name=c("y1"), hessian= TRUE)
+ pdf = deriv3( ~ w * exp(-((pmax(0,1+xi1*(y1-mu1)/sigma1)^(-1/xi1))^(1/dep) +
+ (pmax(0,1+xi2*(y2-mu2)/sigma2)^(-1/xi2))^(1/dep))^(dep)),
+ name=c("y1","y2"), hessian= TRUE)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ pdf
+
+ },
+ vfamily="bvevd.log.qn",
+ deriv=eval(substitute(expression({
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+
+ derivnew =
+ if(.link == "loge" && (any(mu < .Machine$double.eps))) {
+ w * (y - mu)
+ } else {
+ lambda <- mu
+ dl.dlambda <- (y-lambda) / lambda
+ dlambda.deta <- dtheta.deta(theta=lambda, link=.link)
+ w * dl.dlambda * dlambda.deta
+ }
+ derivnew
+ }), list( .link=link) )),
+ weight=eval(substitute(expression({
+ if( .wwts == "qn") {
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold-derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ }
+ wznew
+ }), list( .lscale=lscale, .ldep=ldep) )))
+}
+
+
+
+
+rgev = function(n, location=0, scale=1, shape=0) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(location))
+ stop("bad input for argument argument \"location\"")
+ if(!is.Numeric(shape)) stop("bad input for argument argument \"shape\"")
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase)
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(runif(n-nscase)))^(-shape[!scase]) -1) / shape[!scase]
+ if(nscase)
+ ans[scase] = rgumbel(nscase, location[scase], scale[scase])
+ ans
+}
+
+dgev = function(x, location=0, scale=1, shape=0) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(x), length(location), length(scale), length(shape))
+ ans = rep(as.numeric(NA), len=n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); x = rep(x-location, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ zedd = pmax(0,(1+shape*x/scale))
+ ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase])) *
+ zedd[!scase]^(-1/shape[!scase] -1) / scale[!scase]
+ }
+ if(nscase)
+ ans[scase] = dgumbel(x[scase], location[scase], scale[scase])
+ ans
+}
+
+pgev = function(q, location=0, scale=1, shape=0) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(q), length(location), length(scale), length(shape))
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); q = rep(q-location, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ zedd = pmax(0,(1+shape*q/scale))
+ ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase]))
+ }
+ if(nscase)
+ ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
+ ans
+}
+
+qgev = function(p, location=0, scale=1, shape=0) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if(!is.Numeric(location)) stop("bad input for argument \"location\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ n = max(length(p), length(location), length(scale), length(shape))
+ ans = numeric(n)
+ shape = rep(shape, len=n); location = rep(location, len=n);
+ scale = rep(scale, len=n); p = rep(p, len=n)
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if(n - nscase) {
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(p[!scase]))^(-shape[!scase]) -1) / shape[!scase]
+ }
+ if(nscase)
+ ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
+ ans
+}
+
+
+
+egumbel = function(llocation="identity",
+ lscale="loge",
+ elocation = list(),
+ escale = list(),
+ iscale=NULL,
+ R=NA, percentiles=c(95,99),
+ mpv=FALSE, zero=NULL)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.logical(mpv) || length(mpv) != 1)
+ stop("bad input for argument \"mpv\"")
+ if(length(percentiles) &&
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for argument \"percentiles\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(length(iscale) && !is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument \"iscale\"")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+
+ new("vglmff",
+ blurb=c("Gumbel distribution (univariate response)\n\n",
+ "Links: ",
+ namesof("location", llocation, earg= elocation, tag= TRUE), ", ",
+ namesof("scale", lscale, earg= escale , tag= TRUE), "\n",
+ "Mean: location + scale*0.5772..\n",
+ "Variance: pi^2 * scale^2 / 6"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y) > 1)
+ stop("Use gumbel() to handle multivariate responses")
+ if(min(y) <= 0)
+ stop("all response values must be positive")
+ predictors.names = c(namesof("location", .llocation, earg= .elocation, tag= FALSE),
+ namesof("scale", .lscale, earg= .escale , tag= FALSE))
+
+ extra$R = .R
+ extra$mpv = .mpv
+ extra$percentiles = .percentiles
+
+ if(!length(etastart)) {
+ sc.init = if(is.Numeric( .iscale, posit=TRUE))
+ .iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
+ sc.init = rep(sc.init, len=n)
+ Euler = 0.57721566490153286
+ loc.init = (y - sc.init * Euler)
+ etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation),
+ theta2eta(sc.init, .lscale, earg= .escale ))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .iscale=iscale,
+ .elocation=elocation, .escale=escale,
+ .R=R, .mpv=mpv, .percentiles=percentiles ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sigma = eta2theta(eta[,2], .lscale, earg= .escale )
+ Euler = 0.57721566490153286
+ Percentiles = extra$percentiles
+ mpv = extra$mpv
+ lp = length(Percentiles) # may be 0
+ if(!lp) return(loc + sigma * Euler)
+ mu = matrix(as.numeric(NA), nrow(eta), lp + mpv)
+ Rvec = extra$R
+ if(1 <= lp)
+ for(i in 1:lp) {
+ ci = if(is.Numeric(Rvec)) Rvec * (1 - Percentiles[i] / 100) else
+ -log( Percentiles[i] / 100)
+ mu[,i] = loc - sigma * log(ci)
+ }
+ if(mpv)
+ mu[,ncol(mu)] = loc - sigma * log(log(2))
+ dmn2 = if(lp>=1) paste(as.character(Percentiles), "%", sep="") else NULL
+ if(mpv)
+ dmn2 = c(dmn2, "MPV")
+ dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
+ mu
+ }, list( .llocation=llocation, .lscale=lscale,
+ .elocation=elocation, .escale=escale ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation, scale = .lscale)
+ misc$earg= list(location= .elocation, scale= .escale)
+ misc$true.mu = !length( .percentiles) # @fitted is not a true mu
+ misc$R = .R
+ misc$mpv = .mpv
+ misc$percentiles = .percentiles
+ }), list( .llocation=llocation, .lscale=lscale, .mpv=mpv,
+ .elocation=elocation, .escale=escale,
+ .R=R, .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ zedd = (y-loc) / sc
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(sc) - zedd - exp(-zedd)))
+ }, list( .llocation=llocation, .lscale=lscale,
+ .elocation=elocation, .escale=escale ))),
+ vfamily="egumbel",
+ deriv=eval(substitute(expression({
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation)
+ sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ zedd = (y-loc) / sc
+ temp2 = 1 - exp(-zedd)
+ dl.dloc = temp2 / sc
+ dl.dsc = -1/sc + temp2 * zedd / sc
+ dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation)
+ dsc.deta = dtheta.deta(sc, .lscale, earg= .escale )
+ w * cbind(dl.dloc * dloc.deta, dl.dsc * dsc.deta)
+ }), list( .llocation=llocation, .lscale=lscale,
+ .elocation=elocation, .escale=escale ))),
+ weight=expression({
+ digamma1 = digamma(1)
+ ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
+ ed2l.dloc2 = 1 / sc^2
+ ed2l.dscloc = -(1 + digamma1) / sc^2
+ wz = matrix(as.numeric(NA), n, dimm(M=2))
+ wz[,iam(1,1,M)] = ed2l.dloc2 * dloc.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsc2 * dsc.deta^2
+ wz[,iam(1,2,M)] = ed2l.dscloc * dloc.deta * dsc.deta
+ w * wz
+ }))
+}
+
+
+
+
+cgumbel = function(llocation="identity",
+ lscale="loge",
+ elocation = list(),
+ escale = list(), iscale=NULL,
+ mean=TRUE, percentiles=NULL, zero=2)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.logical(mean) || length(mean) != 1)
+ stop("mean must be a single logical value")
+ if(!mean && (!is.Numeric(percentiles, posit=TRUE) ||
+ any(percentiles>=100)))
+ stop("valid percentiles values must be given when mean=FALSE")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.list(elocation)) elocation = list()
+ if(!is.list(escale)) escale = list()
+
+ new("vglmff",
+ blurb=c("Censored Gumbel distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg= elocation, tag= TRUE), ", ",
+ namesof("scale", lscale, earg= escale, tag= TRUE),
+ "\n",
+ "Mean: location + scale*0.5772..\n",
+ "Variance: pi^2 * scale^2 / 6"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y) > 1)
+ stop("Use gumbel.block() to handle multivariate responses")
+ if(any(y) <= 0)
+ stop("all response values must be positive")
+
+ if(!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len=n)
+ if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len=n)
+ if(any(extra$rightcensored & extra$leftcensored))
+ stop("some observations are both right and left censored!")
+
+ predictors.names =
+ c(namesof("location", .llocation, earg= .elocation, tag= FALSE),
+ namesof("scale", .lscale, earg= .escale , tag= FALSE))
+ if(!length(etastart)) {
+ sc.init = if(is.Numeric( .iscale, posit=TRUE))
+ .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
+ sc.init = rep(sc.init, len=n)
+ Euler = 0.57721566490153286
+ loc.init = (y - sc.init * Euler)
+ loc.init[loc.init <= 0] = min(y)
+ etastart = cbind(theta2eta(loc.init, .llocation, earg= .elocation ),
+ theta2eta(sc.init, .lscale, earg= .escale ))
+ }
+ }), list( .lscale=lscale, .iscale=iscale,
+ .llocation = llocation,
+ .elocation = elocation, .escale = escale ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation)
+ sc = eta2theta(eta[,2], .lscale)
+ Euler = 0.57721566490153286
+ if(.mean) loc + sc * Euler else {
+ lp = length(.percentiles) # 0 if NULL
+ mu = matrix(as.numeric(NA), nrow(eta), lp)
+ for(i in 1:lp) {
+ ci = -log( .percentiles[i] / 100)
+ mu[,i] = loc - sc * log(ci)
+ }
+ dmn2 = paste(as.character(.percentiles), "%", sep="")
+ dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
+ mu
+ }
+ }, list( .lscale=lscale, .percentiles=percentiles,
+ .llocation = llocation,
+ .elocation = elocation, .escale = escale ,
+ .mean=mean ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation, scale = .lscale)
+ misc$earg= list(location= .elocation, scale= .escale )
+ misc$true.mu = .mean # if FALSE then @fitted is not a true mu
+ misc$percentiles = .percentiles
+ }), list( .lscale=lscale, .mean=mean,
+ .llocation = llocation,
+ .elocation = elocation, .escale = escale ,
+ .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation )
+ sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ zedd = (y-loc) / sc
+
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+ Fy = exp(-exp(-zedd))
+ ell1 = -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
+ ell2 = log(Fy[cenL])
+ ell3 = log(1 - Fy[cenU])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ }, list( .lscale=lscale,
+ .llocation = llocation,
+ .elocation = elocation, .escale = escale ))),
+ vfamily="cgumbel",
+ deriv=eval(substitute(expression({
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cen0 = !cenL & !cenU # uncensored obsns
+
+ loc = eta2theta(eta[,1], .llocation, earg= .elocation )
+ sc = eta2theta(eta[,2], .lscale, earg= .escale )
+ zedd = (y-loc) / sc
+ temp2 = 1 - exp(-zedd)
+ dl.dloc = temp2 / sc
+ dl.dsc = -1/sc + temp2 * zedd / sc
+ dloc.deta = dtheta.deta(loc, .llocation, earg= .elocation )
+ dsc.deta = dtheta.deta(sc, .lscale, earg= .escale )
+
+ ezedd = exp(-zedd)
+ Fy = exp(-ezedd)
+ dFy.dloc = -ezedd * Fy / sc
+ dFy.dsc = zedd * dFy.dloc # -zedd * exp(-zedd) * Fy / sc
+ if(any(cenL)) {
+ dl.dloc[cenL] = -ezedd[cenL] / sc[cenL]
+ dl.dsc[cenL] = -zedd[cenL] * ezedd[cenL] / sc[cenL]
+ }
+ if(any(cenU)) {
+ dl.dloc[cenU] = -dFy.dloc[cenU] / (1-Fy[cenU])
+ dl.dsc[cenU] = -dFy.dsc[cenU] / (1-Fy[cenU])
+ }
+ w * cbind(dl.dloc * dloc.deta, dl.dsc * dsc.deta)
+ }), list( .lscale=lscale,
+ .llocation = llocation,
+ .elocation = elocation, .escale = escale ))),
+ weight=expression({
+ A1 = ifelse(cenL, Fy, 0)
+ A3 = ifelse(cenU, 1-Fy, 0)
+ A2 = 1 - A1 - A3 # Middle; uncensored
+ digamma1 = digamma(1)
+ ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
+ ed2l.dloc2 = 1 / sc^2
+ ed2l.dlocsc = -(1 + digamma1) / sc^2
+ wz = matrix(as.numeric(NA), n, dimm(M=2))
+ wz[,iam(1,1,M)] = A2 * ed2l.dloc2 * dloc.deta^2
+ wz[,iam(2,2,M)] = A2 * ed2l.dsc2 * dsc.deta^2
+ wz[,iam(1,2,M)] = A2 * ed2l.dlocsc * dloc.deta * dsc.deta
+ d2l.dloc2 = -ezedd / sc^2
+ d2l.dsc2 = (2 - zedd) * zedd * ezedd / sc^2
+ d2l.dlocsc = (1 - zedd) * ezedd / sc^2
+ wz[,iam(1,1,M)]=wz[,iam(1,1,M)]-A1^2 * d2l.dloc2 * dloc.deta^2
+ wz[,iam(2,2,M)]=wz[,iam(2,2,M)]-A1^2 * d2l.dsc2 * dsc.deta^2
+ wz[,iam(1,2,M)]=wz[,iam(1,2,M)]-A1^2 * d2l.dlocsc * dloc.deta * dsc.deta
+ d2Fy.dloc2 = dFy.dloc * dl.dloc + Fy * d2l.dloc2
+ d2Fy.dsc2 = dFy.dsc * dl.dsc + Fy * d2l.dsc2
+ d2Fy.dlocsc = dFy.dsc * dl.dloc + Fy * d2l.dlocsc
+ d2l.dloc2 = -((1-Fy) * d2Fy.dloc2 - dFy.dloc^2) / (1-Fy)^2
+ d2l.dsc2 = -((1-Fy) * d2Fy.dsc2 - dFy.dsc^2) / (1-Fy)^2
+ d2l.dlocsc =-((1-Fy) * d2Fy.dlocsc - dFy.dloc * dFy.dsc) / (1-Fy)^2
+ wz[,iam(1,1,M)]=wz[,iam(1,1,M)]-A3^2 * d2l.dloc2 * dloc.deta^2
+ wz[,iam(2,2,M)]=wz[,iam(2,2,M)]-A3^2 * d2l.dsc2 * dsc.deta^2
+ wz[,iam(1,2,M)]=wz[,iam(1,2,M)]-A3^2 * d2l.dlocsc * dloc.deta * dsc.deta
+ w * wz
+ }))
+}
+
+
+
+dfrechet = function(x, location=0, scale=1, shape) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
+ if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+ rzedd = scale / (x - location)
+ ans = rzedd^2 * shape * exp(-(rzedd^shape)) * rzedd^(shape-1) / scale
+ ans[x <= location] = 0
+ ans
+}
+
+pfrechet = function(q, location=0, scale=1, shape) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
+ if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+ rzedd = scale / (q - location)
+ ans = exp(-(rzedd^shape))
+ ans[q <= location] = 0
+ ans
+}
+
+qfrechet = function(p, location=0, scale=1, shape) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1)) stop("0 < p < 1 is required")
+ if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
+ if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+ location + scale * (-log(p))^(-1/shape)
+}
+
+rfrechet = function(n, location=0, scale=1, shape) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("scale must be positive")
+ if(!is.Numeric(shape, posit=TRUE)) stop("shape must be positive")
+ location + scale * (-log(runif(n)))^(-1/shape)
+}
+
+frechet2.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+frechet2 = function(location=0,
+ lscale="loge",
+ lshape="loglog",
+ escale = list(),
+ eshape = list(),
+ iscale=NULL, ishape=3,
+ zero=NULL)
+{
+ if(!is.Numeric(location))
+ stop("bad input for argument \"location\"")
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape <- as.character(substitute(lshape))
+ if(!is.list(escale)) escale = list()
+ if(!is.list(eshape)) eshape = list()
+
+ new("vglmff",
+ blurb=c("2-parameter Frechet Distribution\n",
+ "Links: ",
+ namesof("scale", link=lscale, earg=escale ), ", ",
+ namesof("shape", link=lshape, earg=eshape )),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .lscale, earg=.escale, short=TRUE),
+ namesof("shape", .lshape, earg=.eshape, short=TRUE))
+ extra$location = rep( .location, len=n) # stored here
+ if(!length(etastart)) {
+ # Initial values for limiting case as xi --> 0, r_i==1
+ locinit = extra$location
+ if(any(y <= locinit))
+ stop("initial values for location are out of range")
+ shape.init = if(length( .ishape)) rep( .ishape, len=n) else {
+ rep(3.0, len=n) # variance exists if shape>2
+ }
+ Scale.init = if(length( .iscale)) rep( .iscale, len=n) else {
+ if(all(shape.init > 1))
+ abs( (y-locinit+0.001) / (gamma(1-1/shape.init)) ) else
+ rep( 1.0, len=n)
+ }
+ etastart = cbind(theta2eta(Scale.init, .lscale, earg=.escale ),
+ theta2eta(shape.init, .lshape, earg=.escale ))
+ }
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale = escale, .eshape= eshape,
+ .location=location, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ loc = extra$location
+ Scale = eta2theta(eta[,1], .lscale, earg= .escale )
+ shape = eta2theta(eta[,2], .lshape, earg= .eshape )
+ ans = rep(as.numeric(NA), len=length(shape))
+ ok = shape > 1
+ ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
+ ans
+ }, list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ))),
+ last=eval(substitute(expression({
+ misc$links <- c("scale"= .lscale, "shape"= .lshape)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ loc = extra$location
+ Scale = eta2theta(eta[,1], .lscale, earg= .escale )
+ shape = eta2theta(eta[,2], .lshape, earg= .eshape )
+ rzedd = Scale / (y-loc)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) + log(Scale) - 2 * log(y-loc) -
+ rzedd^shape + (shape-1) * log(rzedd)))
+ }, list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ))),
+ vfamily=c("frechet2", "vextremes"),
+ deriv=eval(substitute(expression({
+ loc = extra$location
+ Scale = eta2theta(eta[,1], .lscale, earg= .escale )
+ shape = eta2theta(eta[,2], .lshape, earg= .eshape )
+ rzedd = Scale / (y-loc) # reciprocial of zedd
+ dl.dloc = (shape+1)/(y-loc) - (shape / (y-loc)) * (rzedd)^shape
+ dl.dScale = shape * (1-rzedd^shape) / Scale
+ dl.dshape = 1/shape + log(rzedd) * (1 - rzedd^shape)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w *
+ cbind(dl.dScale * dtheta.deta(Scale, .lscale, earg= .escale ),
+ dl.dshape * dtheta.deta(shape, .lshape, earg= .eshape ))
+ derivnew
+ }), list( .lscale=lscale, .lshape=lshape,
+ .escale=escale, .eshape=eshape ))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .lscale=lscale, .lshape=lshape ))))
+}
+
+
+
+frechet3.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+
+frechet3= function(anchor=NULL,
+ ldifference="loge",
+ lscale="loge",
+ lshape="loglog",
+ edifference = list(),
+ escale = list(),
+ eshape = list(),
+ ilocation=NULL, iscale=NULL, ishape=3, zero=NULL,
+ effpos = .Machine$double.eps^0.75)
+{
+ if(mode(ldifference) != "character" && mode(ldifference) != "name")
+ ldifference <- as.character(substitute(ldifference))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape <- as.character(substitute(lshape))
+ if(!is.Numeric(ishape, allo=1, posi=TRUE)) stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(effpos, allo=1)|| effpos<0) stop("bad input for argument \"effpos\"")
+ if(!is.list(edifference)) edifference = list()
+ if(!is.list(escale)) escale = list()
+ if(!is.list(eshape)) eshape = list()
+
+ new("vglmff",
+ blurb=c("3-parameter Frechet Distribution\n",
+ "Links: ",
+ namesof("difference", link=ldifference, earg=edifference), ", ",
+ namesof("scale", link=lscale, earg=escale), ", ",
+ namesof("shape", link=lshape, earg=eshape)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names =
+ c(namesof("difference", .ldifference, earg= .edifference, short=TRUE),
+ namesof("scale", .lscale, earg= .escale, short=TRUE),
+ namesof("shape", .lshape, earg= .eshape, short=TRUE))
+ anchorpt = if(is.Numeric( .anchor, allow=1)) .anchor else min(y)
+ if(min(y) < anchorpt) stop("anchor point is too large")
+ extra$LHSanchor = anchorpt
+ if(!length(etastart)) {
+ # Initial values for limiting case as xi --> 0, r_i==1
+ locinit = if(length( .ilocation)) rep( .ilocation, len=n) else
+ rep(anchorpt - 0.01*diff(range(y)), len=n)
+ if(any(y <= locinit))
+ stop("initial values for location are out of range")
+ if(any(anchorpt <= locinit))
+ stop("require anchor point > initial location parameter value")
+ shape.init = if(length( .ishape)) rep( .ishape, len=n) else {
+ rep(3.0, len=n) # variance exists if shape>2
+ }
+ Scale.init = if(length( .iscale)) rep( .iscale, len=n) else {
+ if(all(shape.init > 1))
+ abs( (y-locinit+0.001) / (gamma(1-1/shape.init)) ) else
+ rep( 1.0, len=n)
+ }
+ etastart = cbind(theta2eta(anchorpt - locinit, .ldifference),
+ theta2eta(Scale.init, .lscale),
+ theta2eta(shape.init, .lshape))
+ }
+ }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape,
+ .anchor=anchor,
+ .ilocation=ilocation, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ loc = extra$LHSanchor - eta2theta(eta[,1], .ldifference, earg= .edifference)
+ Scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ shape = eta2theta(eta[,3], .lshape, earg= .eshape )
+ ans = rep(as.numeric(NA), len=length(shape))
+ ok = shape > 1
+ ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
+ ans
+ }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape ))),
+ last=eval(substitute(expression({
+ misc$links <- c("difference"= .ldifference, "scale"= .lscale,
+ "shape"= .lshape)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ loc = extra$LHSanchor - eta2theta(eta[,1], .ldifference, earg= .edifference)
+ Scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ shape = eta2theta(eta[,3], .lshape, earg= .eshape )
+ rzedd = Scale / (y-loc)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) + log(Scale) - 2 * log(y-loc) -
+ rzedd^shape + (shape-1) * log(rzedd)))
+ }, list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape ))),
+ vfamily=c("frechet3", "vextremes"),
+ deriv=eval(substitute(expression({
+ difference = eta2theta(eta[,1], .ldifference, earg= .edifference )
+ Scale = eta2theta(eta[,2], .lscale, earg= .escale )
+ shape = eta2theta(eta[,3], .lshape, earg= .eshape )
+ loc = extra$LHSanchor - difference
+ extra$location = loc # Store the location parameter estimate here
+ rzedd = Scale / (y-loc) # reciprocial of zedd
+ dl.dloc = (shape+1)/(y-loc) - (shape / (y-loc)) * (rzedd)^shape
+ dl.ddiff = -dl.dloc
+ dl.dScale = shape * (1-rzedd^shape) / Scale
+ dl.dshape = 1/shape + log(rzedd) * (1 - rzedd^shape)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w *
+ cbind(dl.ddiff * dtheta.deta(difference, .ldifference, earg= .edifference ),
+ dl.dScale * dtheta.deta(Scale, .lscale, earg= .escale ),
+ dl.dshape * dtheta.deta(shape, .lshape, earg= .eshape ))
+ derivnew
+ }), list( .ldifference=ldifference, .lscale=lscale, .lshape=lshape,
+ .edifference=edifference, .escale=escale, .eshape=eshape ))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M, effpos = .effpos,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .effpos=effpos ))))
+}
+
+
+recnormal1.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+recnormal1 = function(lmean="identity", lsd="loge",
+ imean=NULL, isd=NULL, method.init=1, zero=NULL)
+{
+
+ if(mode(lmean) != "character" && mode(lmean) != "name")
+ lmean = as.character(substitute(lmean))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
+ method.init > 3.5)
+ stop("argument \"method.init\" must be 1 or 2 or 3")
+
+ new("vglmff",
+ blurb=c("Upper record values from a univariate normal distribution\n\n",
+ "Links: ",
+ namesof("mean", lmean, tag= TRUE), "; ",
+ namesof("sd", lsd, tag= TRUE),
+ "\n",
+ "Variance: sd^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("mean", .lmean, tag= FALSE),
+ namesof("sd", .lsd, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(any(diff(y) <= 0))
+ stop("response must have increasingly larger and larger values")
+ if(any(w != 1))
+ warning("weights should have unit values only")
+ if(!length(etastart)) {
+ mean.init = if(length( .imean)) rep( .imean, len=n) else {
+ if(.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
+ sd.init = if(length( .isd)) rep( .isd, len=n) else {
+ if(.method.init == 1) 1*(sd(y)) else
+ if(.method.init == 2) 5*(sd(y)) else
+ .5*(sd(y))
+ }
+ etastart = cbind(theta2eta(rep(mean.init, len=n), .lmean),
+ theta2eta(rep(sd.init, len=n), .lsd))
+ }
+ }), list( .lmean=lmean, .lsd=lsd, .imean=imean, .isd=isd,
+ .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmean)
+ }, list( .lmean=lmean ))),
+ last=eval(substitute(expression({
+ misc$link = c("mu"= .lmean, "sd"= .lsd)
+ misc$expected = FALSE
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sd = eta2theta(eta[,2], .lsd)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ zedd = (y - mu) / sd
+ NN = nrow(eta)
+ sum(w * (-log(sd) - 0.5 * zedd^2)) -
+ sum(w[-NN] * (log(1 - pnorm(zedd[-NN]))))
+ }
+ }, list( .lsd=lsd ))),
+ vfamily=c("recnormal1"),
+ deriv=eval(substitute(expression({
+ NN = nrow(eta)
+ mymu = eta2theta(eta[,1], .lmean)
+ sd = eta2theta(eta[,2], .lsd)
+ zedd = (y - mymu) / sd
+ temp200 = dnorm(zedd) / (1-pnorm(zedd))
+ dl.dmu = (zedd - temp200) / sd
+ dl.dmu[NN] = zedd[NN] / sd[NN]
+ dl.dsd = (-1 + zedd^2 - zedd * temp200) / sd
+ dl.dsd[NN] = (-1 + zedd[NN]^2) / sd[NN]
+ dmu.deta = dtheta.deta(mymu, .lmean)
+ dsd.deta = dtheta.deta(sd, .lsd)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
+ derivnew
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ weight=expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }))
+}
+
+
+
+recexp1.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+recexp1 = function(lrate="loge", irate=NULL, method.init=1)
+{
+
+ if(mode(lrate) != "character" && mode(lrate) != "name")
+ lrate = as.character(substitute(lrate))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, positi=TRUE) ||
+ method.init > 3.5)
+ stop("argument \"method.init\" must be 1 or 2 or 3")
+
+ new("vglmff",
+ blurb=c("Upper record values from a 1-parameter exponential distribution\n\n",
+ "Links: ",
+ namesof("rate", lrate, tag= TRUE),
+ "\n",
+ "Variance: 1/rate^2"),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("rate", .lrate, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(any(diff(y) <= 0))
+ stop("response must have increasingly larger and larger values")
+ if(any(w != 1))
+ warning("weights should have unit values only")
+ if(!length(etastart)) {
+ rate.init = if(length( .irate)) rep( .irate, len=n) else {
+ init.rate =
+ if(.method.init == 1) length(y) / y[length(y),1] else
+ if(.method.init == 2) 1/mean(y) else 1/median(y)
+ if(.lrate == "loge") pmax(1/1024, init.rate) else init.rate}
+ etastart = cbind(theta2eta(rep(rate.init, len=n), .lrate))
+ }
+ }), list( .lrate=lrate, .irate=irate, .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, .lrate)
+ }, list( .lrate=lrate ))),
+ last=eval(substitute(expression({
+ misc$link = c("rate"= .lrate)
+ misc$expected = TRUE
+ }), list( .lrate=lrate ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ rate = eta2theta(eta, .lrate)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ NN = length(eta)
+ y = cbind(y)
+ sum(w * log(rate)) - w[NN] * rate[NN] * y[NN,1]
+ }
+ }, list( .lrate=lrate ))),
+ vfamily=c("recexp1"),
+ deriv=eval(substitute(expression({
+ NN = length(eta)
+ rate = c(eta2theta(eta, .lrate))
+ dl.drate = 1 / rate
+ dl.drate[NN] = 1/ rate[NN] - y[NN,1]
+ drate.deta = dtheta.deta(rate, .lrate)
+ w * cbind(dl.drate * drate.deta)
+ }), list( .lrate=lrate ))),
+ weight=expression({
+ ed2l.drate2 = -1 / rate^2
+ wz = -w * drate.deta^2 * ed2l.drate2
+ wz
+ }))
+}
+
+
+
+if(FALSE)
+exbilogi <- function(zero=c(3,6,7),
+ llocation="identity",
+ lscale="loge",
+ lshape="identity",
+ ldependency="logit",
+ percentiles=c(95,99),
+ ishape1=-0.1, ishape2=-0.1, idependency=1)
+{
+ stop("not working")
+
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+
+ if(mode(ldependency) != "character" && mode(ldependency) != "name")
+ ldependency = as.character(substitute(ldependency))
+ if(!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100)
+ stop("bad input for argument \"percentiles\"")
+
+ new("vglmff",
+ blurb=c("Extreme Bivariate Logistic Distribution\n",
+ "Links: ",
+ namesof("loc1", link=llocation), ", ",
+ namesof("scale1", link=lscale), ", ",
+ namesof("shape1", link=lshape), ", ",
+ namesof("loc2", link=llocation), ", ",
+ namesof("scale2", link=lscale), ", ",
+ namesof("shape2", link=lshape), ", ",
+ namesof("dependency", link=ldependency)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("loc1", .llocation, short=TRUE),
+ namesof("scale1", .lscale, short=TRUE),
+ namesof("shape1", .lshape, short=TRUE),
+ namesof("loc2", .llocation, short=TRUE),
+ namesof("scale2", .lscale, short=TRUE),
+ namesof("shape2", .lshape, short=TRUE),
+ namesof("dependency", .ldependency, short=TRUE))
+ if(ncol(as.matrix(y)) != 2)
+ stop("the response must be a two-column matrix")
+
+ if(!length(etastart)) {
+ Euler = 0.57721566490153286
+ # Initial values for limiting case as xi --> 0, r_i==1
+ sig1.init = sqrt(6 * var(y[,1]))/pi
+ mu1.init = median(y[,1]) - Euler * sig1.init
+ xi1.init = if(length(.ishape1)) .ishape1 else 0.1
+ sig2.init = sqrt(6 * var(y[,2]))/pi
+ mu2.init = median(y[,2]) - Euler * sig2.init
+ xi2.init = if(length(.ishape2)) .ishape2 else 0.1
+ alpha.init = if(length(.idependency)) .idependency else 1
+ etastart = cbind(rep(theta2eta(mu1.init, .llocation), nrow(y)),
+ theta2eta(sig1.init, .lscale),
+ theta2eta(xi1.init, .lshape),
+ rep(theta2eta(mu2.init, .llocation), nrow(y)),
+ theta2eta(sig2.init, .lscale),
+ theta2eta(xi2.init, .lshape),
+ theta2eta(alpha.init, .ldependency))
+ bad1 = 1 + xi1.init*(y[,1]-mu1.init)/sig1.init <= 0
+ bad2 = 1 + xi2.init*(y[,2]-mu2.init)/sig2.init <= 0
+ if(any(bad1, na.rm = TRUE) || any(bad2, na.rm = TRUE))
+ stop(paste(sum(bad1, na.rm = TRUE) + sum(bad2, na.rm = TRUE),
+ "observations violating",
+ "boundary constraints while initializing"))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .ldependency=ldependency,
+ .ishape1=ishape1, .ishape2=ishape2, .idependency=idependency ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mmus = eta2theta(eta[,c(1,4)], .llocation)
+ sigmas = eta2theta(eta[,c(2,5)], .lscale)
+ xis = eta2theta(eta[,c(3,6)], .lshape)
+ alpha = eta2theta(eta[,7], .ldependency)
+ cent = .percentiles
+ lp = length(cent)
+ fv = matrix(as.numeric(NA), nrow(eta), lp)
+ dimnames(fv) = list(dimnames(eta)[[1]],
+ paste(as.character(.percentiles), "%", sep=""))
+ fv
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .ldependency=ldependency, .percentiles=percentiles ))),
+ last=eval(substitute(expression({
+ misc$links = c(loc1 = .llocation, lscale1 = .lscale, shape1 =.lshape,
+ loc2 = .llocation, lscale2 = .lscale, shape2 =.lshape,
+ dependency= .ldependency)
+ misc$true.mu = FALSE # @fitted.value is not a true mu
+ misc$percentiles = .percentiles
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .ldependency=ldependency, .percentiles=percentiles ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals=FALSE, eta, extra=NULL) {
+ mmus = eta2theta(eta[,c(1,4)], .llocation)
+ sigmas = eta2theta(eta[,c(2,5)], .lscale)
+ xis = eta2theta(eta[,c(3,6)], .lshape)
+ alpha = eta2theta(eta[,7], .ldependency)
+ zedds = (y - mmus) / sigmas
+ V = (zedds[,1]^(1/alpha) + zedds[,2]^(1/alpha))^alpha
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(1-alpha) - log(alpha) -
+ log(sigmas[,1]) - log(sigmas[,2]) +
+ (xis[,1] + 1/alpha) * log(zedds[,1]) +
+ (xis[,2] + 1/alpha) * log(zedds[,2]) +
+ (1 - 2/alpha) * log(V) - V))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .ldependency=ldependency ))),
+ vfamily=c("exbilogi", "vextremes"),
+ deriv=eval(substitute(expression({
+ mmus = eta2theta(eta[,c(1,4)], .llocation)
+ sigmas = eta2theta(eta[,c(2,5)], .lscale)
+ xis = eta2theta(eta[,c(3,6)], .lshape)
+ alpha = eta2theta(eta[,7], .ldependency)
+ zedds = (y - mmus) / sigmas
+ Zee = (1 + zedds)^(-1/xis)
+ dZee.dmus =
+ dZee.dsigmas =
+ dZee.dxis =
+ A = (zedds[,1]^(1/alpha) + zedds[,2]^(1/alpha))
+ V = A^alpha
+ dV.dalpha = V * (log(A) - (zedds[,1]^(1/alpha) *log(zedds[,1]) +
+ zedds[,2]^(1/alpha) *log(zedds[,2])) / (alpha*A))
+ temp400 = (1/alpha + xis)
+ temp300 = ((1-2/alpha)/V - 1)
+
+ dzedds.dmus = Zee^(1+xis) / sigmas
+ dzedds.dsigmas = (y- zedds) * Zee^(1+xis) / sigmas
+ dzedds.dxis = Zee * (log(Zee)/xis - zedds/(1+xis*(y-mmmu)/sigmas)) / xis
+ dV.dmus = alpha * V^(1-1/alpha) * dZee.dmus
+ dV.dsigmas = alpha * V^(1-1/alpha) * dZee.dsigmas
+ dV.dxis = alpha * V^(1-1/alpha) * dZee.dxis
+
+ dl.dalpha = -1/(1-alpha) - 1/alpha - (log(zedds[,1]) + log(zedds[,2])) /
+ alpha^2 + 2*log(V)/alpha^2 + temp300 * dV.dalpha
+ dl.dmus = temp400 * dzedds.dmus / zedds + temp300 * dV.dmus
+ dl.dsigmas = -1/sigmas + temp400 * dzedds.dsigmas / zedds +
+ temp300 * dV.dsigmas
+ dl.dxis = log(zedds) + temp400 * dzedds.dxis / zedds + temp300 * dV.dxis
+ dmus.dtheta = dtheta.deta(mmus, .llocation)
+ dsigmas.dtheta = dtheta.deta(sigmas, .lscale)
+ dxis.dtheta = dtheta.deta(xis, .lshape)
+
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dmus[,1] * dmus.dtheta[,1],
+ dl.dsigmas[,1] * dsigmas.dtheta[,1],
+ dl.dxis[,1] * dxi.dtheta[,1],
+ dl.dmus[,2] * dmus.dtheta[,2],
+ dl.dsigmas[,2] * dsigmas.dtheta[,2],
+ dl.dxis[,2] * dxi.dtheta[,2],
+ dl.dalpha * dtheta.deta(alpha, .ldependency))
+ derivnew
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .ldependency=ldependency ))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .llocation=llocation ))))
+}
+
+
+
+
diff --git a/R/family.functions.q b/R/family.functions.q
new file mode 100644
index 0000000..9975da5
--- /dev/null
+++ b/R/family.functions.q
@@ -0,0 +1,274 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+fill =
+fill1 = fill2 = fill3 =
+function(x, values=0, ncolx=ncol(x)) {
+ x = as.matrix(x)
+ ans = matrix(values, nrow=nrow(x), ncol=ncolx)
+ ans
+}
+
+
+extract.arg <- function(a) {
+ s <- substitute(a)
+ as.character(s)
+}
+
+
+
+remove.arg <- function(string)
+{
+
+ nc <- nchar(string)
+ bits <- substring(string, 1:nc, 1:nc)
+ b1 <- (1:nc)[bits=="("]
+ b1 <- if(length(b1)) b1[1]-1 else nc
+ if(b1==0)
+ return("")
+ string <- paste(bits[1:b1], collapse="")
+ string
+}
+
+
+add.arg <- function(string, arg.string)
+{
+
+ if(arg.string=="")
+ return(string)
+ nc <- nchar(string)
+ lastc <- substring(string, nc, nc)
+ if(lastc == ")")
+ {
+ if(substring(string, nc-1, nc-1) == "(")
+ {
+ paste(substring(string, 1, nc-2), "(", arg.string, ")", sep="")
+ } else
+ paste(substring(string, 1, nc-1), ", ", arg.string, ")", sep="")
+ } else
+ paste(string, "(", arg.string, ")", sep="")
+}
+
+
+get.arg <- function(string)
+{
+
+ nc <- nchar(string)
+ bits <- substring(string, 1:nc, 1:nc)
+ b1 <- (1:nc)[bits=="("]
+ b2 <- (1:nc)[bits==")"]
+ b1 <- if(length(b1)) min(b1) else return("") # stop("no \"(\" in string")
+ b2 <- if(length(b2)) max(b2) else return("") # stop("no \")\" in string")
+ if(b2-b1==1) "" else paste(bits[(1+b1):(b2-1)], collapse="")
+}
+
+
+
+
+ei <- function(i,n)
+ cbind(as.numeric((1:n)==i))
+
+ei = function(i, n)
+ diag(n)[,i,drop=FALSE]
+
+eij = function(i, n) {
+ temp = matrix(0, n, 1)
+ if(length(i))
+ temp[i,] = 1
+ temp
+}
+
+
+dneg.binomial <- function(x, k, prob)
+{
+
+ care.exp(x * log(1-prob) + k * log(prob) + lgamma(x+k) - lgamma(k) -
+ lgamma(x+1))
+}
+
+
+tapplymat1 <- function(mat, function.arg=c("cumsum", "diff", "cumprod"))
+{
+
+
+ if(!missing(function.arg))
+ function.arg <- as.character(substitute(function.arg))
+ function.arg <- match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
+
+ type <- switch(function.arg,
+ cumsum=1,
+ diff=2,
+ cumprod=3,
+ stop("function.arg not matched"))
+
+ if(!is.matrix(mat))
+ mat <- as.matrix(mat)
+ nr <- nrow(mat)
+ nc <- ncol(mat)
+ fred <- dotC(name="tapplymat1", mat=as.double(mat),
+ as.integer(nr), as.integer(nc), as.integer(type))
+
+ dim(fred$mat) <- c(nr, nc)
+ dimnames(fred$mat) <- dimnames(mat)
+ switch(function.arg,
+ cumsum=fred$mat,
+ diff=fred$mat[,-1,drop=FALSE],
+ cumprod=fred$mat)
+}
+
+
+
+matrix.power <- function(wz, M, power, fast=TRUE)
+{
+
+
+
+
+ n <- nrow(wz)
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- if(is.matrix(wz)) ncol(wz) else 1
+ if(dimm.value > M*(M+1)/2)
+ stop("too many columns")
+
+
+ if(M == 1 || dimm.value==M) {
+ WW <- wz^power # May contain NAs
+ return(t(WW))
+ }
+
+ if(fast) {
+ k <- veigen(t(wz), M=M) # matrix.arg)
+ evals <- k$values # M x n
+ evects <- k$vectors # M x M x n
+ } else {
+ stop("sorry, can't handle matrix-band form yet")
+ k <- unlist(apply(wz,3,eigen), use.names=FALSE)
+ dim(k) <- c(M,M+1,n)
+ evals <- k[,1,,drop=TRUE] # M x n
+ evects <- k[,-1,,drop=TRUE] # M x M x n
+ }
+
+ temp <- evals^power # Some values may be NAs
+
+
+ index <- as.vector( matrix(1, 1, M) %*% is.na(temp) )
+
+
+ index <- index == 0 # zz <=?
+ if(!all(index)) {
+ warning(paste("Some weight matrices have negative",
+ "eigenvalues. They\nwill be assigned NAs"))
+ temp[,!index] <- 1 # zz; Replace by a pos value
+ }
+
+ WW <- mux55(evects, temp, M=M)
+ WW[,!index] <- NA
+ WW
+}
+
+
+
+rss.vgam <- function(z, wz, M)
+{
+
+
+ if(M==1)
+ return(sum(c(wz) * c(z^2)))
+ wzz <- mux22(t(wz), z, M, as.mat=TRUE) # else mux2(wz, z)
+ ans <- sum(wzz * z)
+ ans
+}
+
+wweighted.mean <- function(y, w = NULL, matrix.arg=TRUE)
+{
+ if(!matrix.arg)
+ stop("currently, matrix.arg must be TRUE")
+ y <- as.matrix(y)
+ M <- ncol(y)
+ n <- nrow(y)
+ if(M==1) {
+ if(missing(w)) mean(y) else sum(w * y)/sum(w)
+ } else {
+ if(missing(w)) y %*% rep(1, n) else {
+ numer <- mux22(t(w), y, M, as.matrix=TRUE) # matrix.arg=matrix.arg,
+ numer <- t(numer) %*% rep(1, n)
+ denom <- t(w) %*% rep(1, n)
+ denom <- matrix(denom, 1, length(denom))
+ if(matrix.arg)
+ denom <- m2adefault(denom, M=M)[,,1]
+ c(solve(denom, numer))
+ }
+ }
+}
+
+
+
+
+veigen <- function(x, M)
+{
+
+
+ n <- ncol(x)
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- nrow(x) # usually M or M(M+1)/2
+
+ z <- dotFortran(name="veigen",
+ as.integer(M),
+ as.integer(n),
+ as.double(x),
+ values = double(M * n),
+ as.integer(1),
+ vectors = double(M*M*n),
+ double(M),
+ double(M),
+ wk=double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(dimm.value),
+ error.code = integer(1))
+
+ if(z$error.code)
+ stop(paste("Eigen algorithm (rs) returned error code", z$error.code))
+ ord <- M:1
+ dim(z$values) <- c(M,n)
+ z$values <- z$values[ord,,drop=FALSE]
+ dim(z$vectors) <- c(M,M,n)
+ z$vectors <- z$vectors[,ord,,drop=FALSE]
+ return(list(values = z$values, vectors = z$vectors))
+}
+
+
+
+
+
+ima <- function(j,k,M)
+{
+ if(length(M)>1 || M<=0 || j<=0 || k<=0 || j>M || k>M)
+ stop("input wrong")
+ m <- diag(M)
+ m[col(m)<=row(m)] <- 1:(M*(M+1)/2)
+ if(j>=k) m[j,k] else m[k,j]
+}
+
+
+
+checkwz <- function(wz, M, trace=FALSE, wzepsilon=.Machine$double.eps^0.75) {
+ if(wzepsilon > 0.5) warning("wzepsilon is probably too large")
+ if(!is.matrix(wz)) wz = as.matrix(wz)
+ if((temp <- sum(wz[,1:M,drop=FALSE] < wzepsilon)))
+ warning(paste(temp, "elements replaced by", signif(wzepsilon, 5)))
+ wz[,1:M] = pmax(wzepsilon, wz[,1:M])
+ wz
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.genetic.q b/R/family.genetic.q
new file mode 100644
index 0000000..2dabad3
--- /dev/null
+++ b/R/family.genetic.q
@@ -0,0 +1,571 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+G1G2G3 <- function(link="logit", ip1=NULL, ip2=NULL, iF=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("G1/G2/G3 zzphenotype\n\n",
+ "Links: ",
+ namesof("p1", link), ", ",
+ namesof("p2", link), ", ",
+ namesof("f", link, tag=FALSE),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- c(namesof("p1", .link, tag=FALSE),
+ namesof("p2", .link, tag=FALSE),
+ namesof("f", .link, tag=FALSE))
+ if(is.null(etastart)) {
+ p1 <- if(is.numeric(.ip1)) rep(.ip1, n) else
+ sqrt(mustart[,1])
+ f <- if(is.numeric(.iF)) rep(.iF, n) else
+ rep(0.01, n) # close to zero
+ p2 <- if(is.numeric(.ip2)) rep(.ip2, n) else
+ mustart[,2] / (sqrt(mustart[,1]) * 2)
+ if(any(p1 <= 0) || any(p1 >= 1))
+ stop("bad initial value for p1")
+ if(any(p2 <= 0) || any(p2 >= 1))
+ stop("bad initial value for p2")
+ etastart <- cbind(theta2eta(p1, .link),
+ theta2eta(p2, .link),
+ theta2eta(f, .link))
+ }
+ }), list(.link=link, .ip1=ip1, .ip2=ip2, .iF=iF))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ p1 <- eta2theta(eta[,1], link=.link)
+ p2 <- eta2theta(eta[,2], link=.link)
+ p3 <- 1-p1-p2
+ f <- eta2theta(eta[,3], link=.link)
+ cbind("G1/G1"=f*p1+(1-f)*p1^2,
+ "G1/G2"=2*p1*p2*(1-f),
+ "G1/G3"=2*p1*p3*(1-f),
+ "G2/G2"=f*p2+(1-f)*p2^2,
+ "G2/G3"=2*p2*p3*(1-f),
+ "G3/G3"=f*p3+(1-f)*p3^2)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(p1= .link, p2= .link, f= .link)
+ }), list(.link=link))),
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("G1G2G3", "vgenetic"),
+ deriv=eval(substitute(expression({
+ p1 <- eta2theta(eta[,1], link=.link)
+ p2 <- eta2theta(eta[,2], link=.link)
+ p3 <- 1-p1-p2
+ f <- eta2theta(eta[,3], link=.link)
+ dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1), 0,
+ -2*(1-f)*p2, -f - 2*p3*(1-f))
+ dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
+ 2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
+ dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
+ p3*(1-p3))
+ dl1 <- apply(y * dP1 / mu, 1, sum)
+ dl2 <- apply(y * dP2 / mu, 1, sum)
+ dl3 <- apply(y * dP3 / mu, 1, sum)
+ dPP.deta <- dtheta.deta(cbind(p1,p2,f), link=.link)
+ w * cbind(dPP.deta[,1] * dl1, dPP.deta[,2] * dl2,
+ dPP.deta[,3] * dl3)
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ dPP <- array(c(dP1,dP2,dP3), c(n,6,3))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index <- iam(i1,i2,M)
+ wz[,index] <- apply(dPP[,,i1,drop=TRUE] * dPP[,,i2,drop=TRUE] /
+ mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ }
+ w * wz
+ }), list(.link=link))))
+}
+
+
+
+AAaa.nohw <- function(link="logit", ipA=NULL, iF=NULL)
+{
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
+ "Links: ",
+ namesof("pA", link), ", ",
+ namesof("f", "identity", tag=FALSE),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- c(namesof("pA", .link, tag=FALSE),
+ namesof("f", "identity", tag=FALSE))
+ if(is.null(etastart)) {
+ pA <- if(is.numeric(.ipA)) rep(.ipA, n) else
+ c(sqrt(mustart[,1] - mustart[,2]/2))
+ f <- if(is.numeric(.iF)) rep(.iF, n) else
+ rep(0.01, n) # 1- mustart[,2]/(2*pA*(1-pA))
+ if(any(pA <= 0) || any(pA >= 1))
+ stop("bad initial value for pA")
+ etastart <- cbind(theta2eta(pA, .link),
+ theta2eta(f, "identity"))
+ }
+ }), list(.link=link, .ipA=ipA, .iF=iF))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ pA <- eta2theta(eta[,1], link=.link)
+ f <- eta2theta(eta[,2], link="identity")
+ cbind(AA=pA^2+pA*(1-pA)*f, Aa=2*pA*(1-pA)*(1-f),
+ aa=(1-pA)^2 + pA*(1-pA)*f)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(pA= .link, f= "identity")
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ pA <- sqrt(mu[,1] - mu[,2]/2)
+ f <- 1- mu[,2]/(2*pA*(1-pA))
+ cbind(theta2eta(pA, .link),
+ theta2eta(f, "identity"))
+ }, list(.link=link))),
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("AAaa.nohw", "vgenetic"),
+ deriv=eval(substitute(expression({
+ pA <- eta2theta(eta[,1], link=.link)
+ f <- eta2theta(eta[,2], link="identity")
+ dP1 <- cbind(f + 2*pA*(1-f), 2*(1-f)*(1-2*pA), -2*(1-pA) +f*(1-2*pA))
+ dP2 <- cbind(pA*(1-pA), -2*pA*(1-pA), pA*(1-pA))
+ dl1 <- apply(y * dP1 / mu, 1, sum)
+ dl2 <- apply(y * dP2 / mu, 1, sum)
+ dPP.deta <- dtheta.deta(pA, link=.link)
+ w * cbind(dPP.deta * dl1, dl2)
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ dPP <- array(c(dP1,dP2), c(n,3,2))
+ dPP.deta <- cbind(dtheta.deta(pA, link=.link),
+ dtheta.deta(f, link="identity"))
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index <- iam(i1,i2,M)
+ wz[,index] <- apply(dPP[,,i1,drop=T] * dPP[,,i2,drop=T] /
+ mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ }
+ w * wz
+ }), list(.link=link))))
+}
+
+
+AB.Ab.aB.ab2 <- function(link="logit", init.p=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("AB-Ab-aB-ab2 phenotype\n\n",
+ "Links: ",
+ namesof("p", link),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- namesof("p", .link, tag=FALSE)
+
+ if(is.null(etastart)) {
+ p.init <- if(is.numeric(.init.p)) rep(.init.p, n) else
+ c(1 - 2 * sqrt(mustart[,4]))
+ etastart <- theta2eta(p.init, .link)
+ }
+ }), list(.link=link, .init.p=init.p))),
+ inverse=eval(substitute(function(eta,extra=NULL){
+ p <- eta2theta(eta, link=.link)
+ cbind("A-B-"=(2+(1-p)^2),
+ "A-bb"=(1-(1-p)^2),
+ "aaB-"=(1-(1-p)^2),
+ "aabb"=(1-p)^2) / 4
+ }, list(.link=link) )),
+ last=eval(substitute(expression({
+ misc$link <- c(p = .link)
+ }), list(.link=link) )),
+ link=eval(substitute(function(mu, extra=NULL){
+ p <- 1 - 2 * sqrt(mu[,4])
+ theta2eta(p, .link)
+ }, list(.link=link) )),
+ loglikelihood= function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("AB.Ab.aB.ab2", "vgenetic"),
+ deriv=eval(substitute(expression({
+ pp <- eta2theta(eta, link=.link)
+ dP1 <- cbind(-0.5*(1-pp), 0.5*(1-pp), 0.5*(1-pp), -0.5*(1-pp))
+ dl1 <- apply(y * dP1 / mu, 1, sum)
+ dPP.deta <- dtheta.deta(pp, link=.link)
+ w * dPP.deta * dl1
+ }), list(.link=link) )),
+ weight=eval(substitute(expression({
+ wz <- apply(dP1 * dP1 / mu, 1, sum) * dPP.deta^2
+ w * wz
+ }), list(.link=link) )))
+}
+
+
+
+A1A2A3 <- function(link="logit", ip1=NULL, ip2=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("A1A2A3 Allele System (A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
+ "Links: ",
+ namesof("p1",link), ", ",
+ namesof("p2", link, tag=FALSE),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- c(namesof("p_A",.link,tag=FALSE),
+ namesof("p_B",.link,tag=FALSE))
+ if(is.null(etastart)) {
+ p1 <- if(is.numeric(.ip1)) rep(.ip1, n) else
+ c(sqrt(mustart[,1]))
+ p2 <- if(is.numeric(.ip2)) rep(.ip2, n) else
+ c(sqrt(mustart[,3]))
+ etastart <- cbind(theta2eta(p1,.link), theta2eta(p2,.link))
+ }
+ }), list(.link=link, .ip1=ip1, .ip2=ip2))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ p1 <- eta2theta(eta[,1], link=.link)
+ p2 <- eta2theta(eta[,2], link=.link)
+ qq <- 1-p1-p2
+ cbind(A1A1=p1*p1, A1A2=2*p1*p2, A2A2=p2*p2, A1A3=2*p1*qq,
+ A2A3=2*p2*qq, A3A3=qq*qq)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(p1= .link, p2= .link)
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ p1 <- sqrt(mu[,1])
+ p2 <- sqrt(mu[,3])
+ qq <- 1-p1-p2
+ cbind(theta2eta(p1,.link), theta2eta(p2,.link))
+ }, list(.link=link))),
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("A1A2A3", "vgenetic"),
+ deriv=eval(substitute(expression({
+ p1 <- eta2theta(eta[,1], link=.link)
+ p2 <- eta2theta(eta[,2], link=.link)
+ dl.dp1 <- (2*y[,1]+y[,2]+y[,4])/p1 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
+ dl.dp2 <- (2*y[,3]+y[,2]+y[,5])/p2 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
+ dp1.deta <- dtheta.deta(p1, link=.link)
+ dp2.deta <- dtheta.deta(p2, link=.link)
+ w * cbind(dl.dp1 * dp1.deta, dl.dp2 * dp2.deta)
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ qq <- 1-p1-p2
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ed2l.dp12 <- 2 * (1/p1 + 1/qq)
+ ed2l.dp22 <- 2 * (1/p2 + 1/qq)
+ ed2l.dp1dp2 <- 2 / qq
+ wz[,iam(1,1,M)] <- dp1.deta^2 * ed2l.dp12
+ wz[,iam(2,2,M)] <- dp2.deta^2 * ed2l.dp22
+ wz[,iam(1,2,M)] <- ed2l.dp1dp2 * dp1.deta * dp2.deta
+ w * wz
+ }), list(.link=link))))
+}
+
+
+
+
+MNSs <- function(link="logit", imS=NULL, ims=NULL, inS=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
+ "Links: ",
+ namesof("mS",link), ", ",
+ namesof("ms",link), ", ",
+ namesof("nS", link, tag=FALSE),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- c(namesof("mS",.link,tag=FALSE),
+ namesof("ms",.link,tag=FALSE),
+ namesof("nS",.link,tag=FALSE))
+ if(is.null(etastart)) {
+ ms <- if(is.numeric(.ims)) rep(.ims, n) else
+ c(sqrt(mustart[,2]))
+ ns <- c(sqrt(mustart[,6]))
+ nS <- if(is.numeric(.inS)) rep(.inS, n) else
+ c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
+ mS <- if(is.numeric(.imS)) rep(.imS, n) else
+ 1-ns-ms-nS
+ etastart <- cbind(theta2eta(mS,.link),
+ theta2eta(ms,.link),
+ theta2eta(nS,.link))
+ }
+ }), list(.link=link, .imS=imS, .ims=ims, .inS=inS))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ mS <- eta2theta(eta[,1], link=.link)
+ ms <- eta2theta(eta[,2], link=.link)
+ nS <- eta2theta(eta[,3], link=.link)
+ ns <- 1-mS-ms-nS
+ cbind(MS=mS^2+2*mS*ms, Ms=ms^2, MNS=2*(mS*nS+ms*nS+mS*ns),
+ MNs=2*ms*ns, NS=nS^2 + 2*nS*ns, Ns=ns^2)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(mS= .link, ms= .link, nS= .link)
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ ms <- sqrt(mu[,2])
+ ns <- sqrt(mu[,6])
+ nS <- c(-nS + sqrt(nS^2 + mu[,5]))
+ mS <- 1-ns-ms-nS
+ cbind(theta2eta(mS,.link),
+ theta2eta(ms,.link),
+ theta2eta(nS,.link))
+ }, list(.link=link))),
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("MNSs", "vgenetic"),
+ deriv=eval(substitute(expression({
+ mS <- eta2theta(eta[,1], link=.link)
+ ms <- eta2theta(eta[,2], link=.link)
+ nS <- eta2theta(eta[,3], link=.link)
+ ns <- 1-mS-ms-nS
+ dP1 <- cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
+ dP2 <- cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
+ dP3 <- cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
+ dl1 <- apply(y * dP1 / mu, 1, sum)
+ dl2 <- apply(y * dP2 / mu, 1, sum)
+ dl3 <- apply(y * dP3 / mu, 1, sum)
+ dPP.deta <- dtheta.deta(cbind(mS,ms,nS), link=.link)
+ w * dPP.deta * cbind(dl1, dl2, dl3)
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ dPP <- array(c(dP1,dP2,dP3), c(n,6,3))
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index <- iam(i1,i2,M)
+ wz[,index] <- apply(dPP[,,i1,drop=TRUE] * dPP[,,i2,drop=TRUE] /
+ mu, 1, sum) * dPP.deta[,i1] * dPP.deta[,i2]
+ }
+ w * wz
+ }), list(.link=link))))
+}
+
+
+ABO <- function(link="logit", ir=NULL, ip=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
+ "Links: ",
+ namesof("p",link), ", ",
+ namesof("q", link, tag=FALSE),
+ "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- c(namesof("p_A",.link,tag=FALSE), namesof("p_B",.link,tag=FALSE))
+ if(is.null(etastart)) {
+ r <- if(is.numeric(.ir)) rep(.ir, n) else
+ c(sqrt(mustart[,4]))
+ p <- if(is.numeric(.ip)) rep(.ip, n) else
+ c(1-sqrt(mustart[,2]+mustart[,4]))
+ q <- 1-p-r
+ etastart <- cbind(theta2eta(p,.link), theta2eta(q,.link))
+ }
+ }), list(.link=link, .ir=ir, .ip=ip))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ p <- eta2theta(eta[,1], link=.link)
+ q <- eta2theta(eta[,2], link=.link)
+ r <- 1-p-q
+ cbind(A=p*(p+2*r), B=q*(q+2*r), AB=2*p*q, O=r*r)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(p = .link, q = .link)
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ r <- sqrt(mu[,4])
+ p1 <- ( (1-r)+sqrt((1-r)^2 + 2*mu[,3]) )/2
+ p2 <- ( (1-r)-sqrt((1-r)^2 + 2*mu[,3]) )/2
+ index <- p2 >= 0 & p2 <= 1
+ p <- p1
+ p[index] <- p2[index]
+ q <- 1-p-r
+ cbind(theta2eta(p,.link), theta2eta(q,.link))
+ }, list(.link=link))),
+
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("ABO", "vgenetic"),
+ deriv=eval(substitute(expression({
+ p <- eta2theta(eta[,1], link=.link)
+ q <- eta2theta(eta[,2], link=.link)
+ r <- 1-p-q
+ pbar <- 2*r+p
+ qbar <- 2*r+q
+ na <- y[,1]
+ nb <- y[,2]
+ nab <- y[,3]
+ no <- y[,4]
+ dl.dp <- (na+nab)/p - na/pbar - 2*nb/qbar - 2*no/r
+ dl.dq <- (nb+nab)/q - 2*na/pbar - nb/qbar - 2*no/r
+ dp.deta <- dtheta.deta(p, link=.link)
+ dq.deta <- dtheta.deta(q, link=.link)
+ w * cbind(dl.dp * dp.deta, dl.dq * dq.deta)
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ed2l.dp2 <- w * (1 + 2/p + 4*q/qbar + p/pbar)
+ ed2l.dq2 <- w * (1 + 2/q + 4*p/pbar + q/qbar)
+ ed2l.dpdq <- 2 * w * (1 + q/qbar + p/pbar)
+ wz[,iam(1,1,M)] <- dp.deta^2 * ed2l.dp2
+ wz[,iam(2,2,M)] <- dq.deta^2 * ed2l.dq2
+ wz[,iam(1,2,M)] <- ed2l.dpdq * dp.deta * dq.deta
+ wz
+ }), list(.link=link))))
+}
+
+
+
+
+AB.Ab.aB.ab <- function(link="logit", init.p=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("AB-Ab-aB-ab phenotype\n\n",
+ "Links: ", namesof("p", link, tag=TRUE), "\n",
+ "Variance: multinomial type variance"),
+
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- namesof("p", .link, tag=FALSE)
+ if(is.null(etastart)) {
+ p <- if(is.numeric(.init.p)) rep(.init.p,n) else
+ c(sqrt(4*mustart[,4]))
+ etastart <- cbind(theta2eta(p, .link))
+ }
+ }), list(.link=link, .init.p=init.p))),
+ inverse=eval(substitute(function(eta,extra=NULL){
+ p <- eta2theta(eta, link=.link)
+ pp4 <- p*p/4
+ cbind(AB=0.5+pp4, Ab=0.25-pp4, aB=0.25-pp4, ab=pp4)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c(p = .link)
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ p <- sqrt(4* mu[,4])
+ theta2eta(p, .link)
+ }, list(.link=link))),
+
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("AB.Ab.aB.ab", "vgenetic"),
+ deriv=eval(substitute(expression({
+ pp <- eta2theta(eta, link=.link)
+ p2 <- pp*pp
+ nAB <- w*y[,1]
+ nAb <- w*y[,2]
+ naB <- w*y[,3]
+ nab <- w*y[,4]
+ dl.dp <- 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
+ dp.deta <- dtheta.deta(pp, link=.link)
+ dl.dp * dp.deta
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ ed2l.dp2 <- 4 * w * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
+ wz <- cbind((dp.deta^2) * ed2l.dp2)
+ wz
+ }), list(.link=link))))
+}
+
+
+
+AA.Aa.aa <- function(link="logit", init.pA=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("AA-Aa-aa phenotype\n\n",
+ "Links: ", namesof("p_A",link), "\n",
+ "Variance: multinomial type variance"),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- namesof("p_A", .link, tag=FALSE)
+ if(is.null(etastart)) {
+ pA <- if(is.numeric(.init.pA)) rep(.init.pA, n) else
+ c(sqrt(mustart[,1]))
+ etastart <- cbind(theta2eta(pA, .link))
+ }
+ }), list(.link=link, .init.pA=init.pA))),
+ inverse=eval(substitute(function(eta,extra=NULL){
+ pA <- eta2theta(eta, link=.link)
+ pp <- pA*pA
+ cbind(AA=pp, Aa=2*pA*(1-pA), aa=(1-pA)^2)
+ }, list(.link=link))),
+ last=eval(substitute(expression({
+ misc$link <- c("p_A" = .link)
+ }), list(.link=link))),
+ link=eval(substitute(function(mu, extra=NULL){
+ pA <- sqrt(mu[,1])
+ theta2eta(pA, .link)
+ }, list(.link=link))),
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum((w*y)*log(mu)),
+ vfamily=c("AA.Aa.aa", "vgenetic"),
+ deriv=eval(substitute(expression({
+ pA <- eta2theta(eta, link=.link)
+ nAA <- w*y[,1]
+ nAa <- w*y[,2]
+ naa <- w*y[,3]
+ dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+ dpA.deta <- dtheta.deta(pA, link=.link)
+ dl.dpA * dpA.deta
+ }), list(.link=link))),
+ weight=eval(substitute(expression({
+ d2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
+ wz <- cbind((dpA.deta^2) * d2l.dp2)
+ wz
+ }), list(.link=link))))
+}
+
+
+
diff --git a/R/family.glmgam.q b/R/family.glmgam.q
new file mode 100644
index 0000000..e940675
--- /dev/null
+++ b/R/family.glmgam.q
@@ -0,0 +1,790 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+quasiff = function(link,
+ variance=c("mu", "mu(1-mu)"),
+ power.variance=1,
+ dispersion=0)
+{
+ warning("link=powl doesn't work yet")
+ estimated.dispersion <- dispersion==0
+
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+
+
+ if(mode(variance) != "character" && mode(variance) != "name")
+ variance <- as.character(substitute(variance))
+ variance <- match.arg(variance, c("mu", "mu(1-mu)"))[1]
+
+ result <-
+ new("vglmff",
+ blurb=c("Quasi family\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: ", ifelse(power.variance==1, variance,
+ paste(variance, "^", power.variance, sep=""))),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ pow <- extra$power.variance
+ thing <- extra$variance
+ if(thing=="mu" && (pow==1 || pow==2))
+ stop("this deviance function not right")
+
+ devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
+ devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
+ devi <- 2 * (devy - devmu)
+ if(residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ extra$link <- .link
+ extra$variance <- .variance
+ extra$power.variance <- .power.variance
+
+ if(.variance=="mu(1-mu)") {
+ delete.zero.colns <- TRUE
+ eval(process.categorical.data.vgam)
+
+ mustart <- mustart[,1]
+ y <- y[,1]
+ } else {
+ mustart <- y + 0.167 * (y == 0)
+ }
+
+
+ }), list( .link=link, .variance=variance, .power.variance=power.variance ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, link= .link)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ dpar <- .dispersion
+ if(!dpar) {
+ temp <- w * dmu.deta^2
+ dpar <- sum( w * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$power.variance <- .power.variance
+ misc$link = c("mu" = .link )
+ }), list( .dispersion=dispersion, .estimated.dispersion=estimated.dispersion,
+ .link=link, .power.variance=power.variance ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link= .link)
+ }, list( .link=link ))),
+ vfamily="quasiff",
+ deriv=eval(substitute(expression({
+ pow <- extra$power.variance
+ thing <- extra$variance
+ dQ.dmu <- if(thing=="mu") (y-mu)/mu^pow else (y-mu)/(mu*(1-mu))^pow
+ dmu.deta <- dtheta.deta(theta=mu, link= .link)
+ w * dQ.dmu * dmu.deta
+ }), list( .link=link, .power.variance=power.variance ))),
+ weight=eval(substitute(expression({
+ d2Q.dmu2 <- if(thing=="mu") 1 / mu^pow else
+ 1 / (mu*(1-mu))^pow
+ w * dmu.deta^2 * d2Q.dmu2
+ }), list( .link=link, .power.variance=power.variance ))))
+
+ if(variance=="mu") {
+ if(power.variance==1)
+ result at deviance <- poissonff()@deviance
+ if(power.variance==2)
+ result at deviance <- gammaff()@deviance
+ } else {
+ result at deviance <- if(power.variance==1) binomialff()@deviance else NULL
+ }
+
+ result
+}
+
+
+
+binomialff <- function(link="logit",
+ dispersion=1, mv=FALSE, onedpar=!mv,
+ parallel = FALSE, earg=NULL,
+ zero=NULL)
+
+{
+
+
+ estimated.dispersion <- dispersion==0
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=if(mv) c("Multivariate Binomial model\n\n",
+ "Link: ", namesof("mu[,j]", link), "\n",
+ "Variance: mu[,j]*(1-mu[,j])") else
+ c("Binomial model\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: mu*(1-mu)"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance=function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ devy <- y
+ nz <- y != 0
+ devy[nz] <- y[nz] * log(y[nz])
+ nz <- (1 - y) != 0
+ devy[nz] <- devy[nz] + (1 - y[nz]) * log(1 - y[nz])
+ devmu <- y * log(mu) + (1 - y) * log(1 - mu)
+ if(any(small <- mu * (1 - mu) < .Machine$double.eps)) {
+ warning("fitted values close to 0 or 1")
+ smu <- mu[small]
+ sy <- y[small]
+ smu <- ifelse(smu < .Machine$double.eps,
+ .Machine$double.eps, smu)
+ onemsmu <- ifelse((1 - smu) < .Machine$
+ double.eps, .Machine$double.eps, 1 - smu)
+ devmu[small] <- sy * log(smu) + (1 - sy) * log(onemsmu)
+ }
+ devi <- 2 * (devy - devmu)
+ if(residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ if(is.R()) {
+ assign("CQO.FastAlgorithm", ( .link=="logit" || .link=="cloglog"),
+ envir=VGAMenv)
+ assign("modelno", if( .link=="logit") 1 else
+ if( .link=="cloglog") 4 else NULL, envir=VGAMenv)
+ } else {
+ CQO.FastAlgorithm <<- ( .link == "logit" || .link=="cloglog")
+ modelno <<- if( .link=="logit") 1 else if( .link=="cloglog") 4 else NULL
+ }
+ if(.mv) {
+ y = as.matrix(y)
+ M = ncol(y)
+ if(!all(y == 0 | y == 1))
+ stop("response must contain 0's and 1's only")
+ dn2 = if(is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if(length(dn2)) {
+ paste("E[", dn2, "]", sep="")
+ } else {
+ paste("mu", 1:M, sep="")
+ }
+ predictors.names = namesof(if(M>1) dn2 else "mu", .link, short=TRUE)
+
+ mustart = (0.5 + w * y) / (1 + w)
+ } else {
+ NCOL = function (x)
+ if(is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+
+ if(NCOL(y) == 1) {
+ if(is.factor(y)) y = y != levels(y)[1]
+ nn = rep(1, n)
+ if(!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ mustart = (0.5 + w * y) / (1 + w)
+ no.successes = w * y
+ if(any(abs(no.successes - round(no.successes)) > 0.001))
+ stop("Number of successes must be integer-valued")
+ } else if(NCOL(y) == 2) {
+ if(any(abs(y - round(y)) > 0.001))
+ stop("Count data must be integer-valued")
+ nn = y[,1] + y[,2]
+ y = ifelse(nn > 0, y[,1]/nn, 0)
+ w = w * nn
+ mustart = (0.5 + nn * y) / (1 + nn)
+ } else
+ stop("Response not of the right form")
+ predictors.names = namesof("mu", .link, short=TRUE)
+ }
+ }), list( .link=link, .mv=mv ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mu = eta2theta(eta, link= .link, earg = .earg)
+ mu
+ }, list( .link=link, .earg = earg ))),
+ last=eval(substitute(expression({
+ if(is.R()) {
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
+ if(exists("modelno", envir = VGAMenv))
+ rm("modelno", envir = VGAMenv)
+ } else {
+ while(exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ while(exists("modelno"))
+ remove("modelno")
+ }
+ dpar <- .dispersion
+ if(!dpar) {
+ temp87 = (y-mu)^2 * wz /
+ (dtheta.deta(mu, link= .link, earg = .earg )^2) # w cancel
+ if(.mv && ! .onedpar) {
+ dpar = rep(as.numeric(NA), len=M)
+ temp87 = cbind(temp87)
+ nrow.mu = if(is.matrix(mu)) nrow(mu) else length(mu)
+ for(i in 1:M)
+ dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
+ if(is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
+ names(dpar) = dimnames(y)[[2]]
+ } else
+ dpar = sum(temp87) / (length(mu) - ncol(x))
+ }
+ misc$mv = .mv
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 1
+ misc$earg = .earg
+ misc$estimated.dispersion <- .estimated.dispersion
+
+ misc$link = rep( .link, length=M)
+ names(misc$link) = if(M>1) dn2 else "mu"
+ misc$expected = TRUE
+ }), list( .dispersion=dispersion, .estimated.dispersion=estimated.dispersion,
+ .onedpar=onedpar, .link=link, .mv=mv, .earg = earg ))),
+ link=eval(substitute(function(mu, extra=NULL)
+ theta2eta(mu, .link, earg = .earg )
+ , list( .link=link, .earg = earg ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ if(residuals) w*(y/mu - (1-y)/(1-mu)) else
+ sum(w*(y*log(mu) + (1-y)*log(1-mu)))
+ },
+ vfamily=c("binomialff", "vcategorical"),
+ deriv=eval(substitute(expression({
+ if( .link == "logit") {
+ w * (y - mu)
+ } else if( .link == "cloglog") {
+ mu.use = mu
+ smallno = 100 * .Machine$double.eps
+ mu.use[mu.use < smallno] = smallno
+ mu.use[mu.use > 1 - smallno] = 1 - smallno
+ -w * (y - mu) * log(1 - mu.use) / mu.use
+ } else
+ w * dtheta.deta(mu, link= .link, earg = .earg )* (y/mu - 1)/(1-mu)
+ }), list( .link=link, .earg = earg ))),
+ weight=eval(substitute(expression({
+ tmp100 = mu*(1-mu)
+
+ tmp200 = if( .link == "logit") {
+ cbind(w * tmp100)
+ } else if( .link == "cloglog") {
+ cbind(w * (1-mu.use) * (log(1-mu.use))^2 / mu.use )
+ } else {
+ cbind(w * dtheta.deta(mu, link= .link, earg = .earg)^2 / tmp100)
+ }
+ for(ii in 1:M) {
+ index200 = !is.finite(tmp200[,ii]) |
+ (abs(tmp200[,ii]) < .Machine$double.eps)
+ if(any(index200)) {
+ tmp200[index200,ii] = .Machine$double.eps # Diagonal 0's are bad
+ }
+ }
+ tmp200
+ }), list( .link=link, .earg = earg ))))
+}
+
+
+
+gammaff <- function(link=c("nreciprocal", "reciprocal", "loge", "identity"),
+ dispersion=0)
+{
+ estimated.dispersion <- dispersion==0
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+ link = match.arg(link, c("nreciprocal","reciprocal","loge","identity"))[1]
+
+ new("vglmff",
+ blurb=c("Gamma distribution\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: mu^2 / k"),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
+ if(residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ mustart <- y + 0.167 * (y == 0)
+ M = if(is.matrix(y)) ncol(y) else 1
+ dn2 = if(is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if(length(dn2)) {
+ paste("E[", dn2, "]", sep="")
+ } else {
+ paste("mu", 1:M, sep="")
+ }
+ predictors.names = namesof(if(M>1) dn2 else "mu", .link, short=TRUE)
+ if(!length(etastart))
+ etastart <- theta2eta(mustart, link= .link)
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, link= .link)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ dpar <- .dispersion
+ if(!dpar) {
+ if(M == 1) {
+ temp = w * dmu.deta^2
+ dpar = sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
+ } else {
+ dpar = rep(0, len=M)
+ for(spp in 1:M) {
+ temp = w * dmu.deta[,spp]^2
+ dpar[spp] = sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) /
+ (length(mu[,spp]) - ncol(x))
+ }
+ }
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$link = rep( .link, length=M)
+ misc$expected = TRUE
+ names(misc$link) = if(M>1) paste("mu", 1:M, sep="") else "mu"
+ }), list( .dispersion=dispersion,
+ .estimated.dispersion=estimated.dispersion,
+ .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link= .link)
+ }, list( .link=link ))),
+ vfamily="gammaff",
+ deriv=eval(substitute(expression({
+ dl.dmu = (y-mu) / mu^2
+ dmu.deta = dtheta.deta(theta=mu, link= .link)
+ w * dl.dmu * dmu.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ d2l.dmu2 = 1 / mu^2
+ w * dmu.deta^2 * d2l.dmu2
+ }), list( .link=link ))))
+}
+
+
+
+inverse.gaussianff <- function(link="natural.ig", dispersion=0)
+{
+ estimated.dispersion <- dispersion==0
+ warning("@deviance() not finished")
+ warning("needs checking, but I'm sure it works")
+
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Inverse Gaussian distribution\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: mu^3 /k"),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ pow <- 3 # Use Quasi()$deviance with pow==3
+ devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
+ devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
+ devi <- 2 * (devy - devmu)
+ if(residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ mu <- y + 0.167 * (y == 0)
+ if(!length(etastart))
+ etastart <- theta2eta(mu, link= .link)
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, link= .link)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ dpar <- .dispersion
+ if(!dpar) {
+ temp <- w * dmu.deta^2
+ dpar <- sum( w * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$link = rep( .link, length=M)
+ names(misc$link) = if(M>1) paste("mu", 1:M, sep="") else "mu"
+ }), list( .dispersion=dispersion,
+ .estimated.dispersion=estimated.dispersion,
+ .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link= .link)
+ }, list( .link=link ))),
+ vfamily="inverse.gaussianff",
+ deriv=eval(substitute(expression({
+ dl.dmu <- (y-mu) / mu^3
+ dmu.deta <- dtheta.deta(theta=mu, link= .link)
+ w * dl.dmu * dmu.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ d2l.dmu2 <- 1 / mu^3
+ w * dmu.deta^2 * d2l.dmu2
+ }), list( .link=link ))))
+}
+
+
+
+
+dinv.gaussian = function(x, mu, lambda) {
+ if(any(mu <=0)) stop("mu must be positive")
+ if(any(lambda <=0)) stop("lambda must be positive")
+ ans = x
+ mu = rep(mu, len=length(x))
+ lambda = rep(lambda, len=length(x))
+ ans[x <= 0] = 0
+ bb = x > 0
+ ans[bb] = sqrt(lambda[bb]/(2*pi*x[bb]^3)) *
+ exp(-lambda[bb]*(x[bb]-mu[bb])^2/(2*mu[bb]^2*x[bb]))
+ ans
+}
+
+
+pinv.gaussian = function(q, mu, lambda) {
+ if(any(mu <=0)) stop("mu must be positive")
+ if(any(lambda <=0)) stop("lambda must be positive")
+ ans = q
+ mu = rep(mu, len=length(q))
+ lambda = rep(lambda, len=length(q))
+ ans[q <= 0] = 0
+ bb = q > 0
+ ans[bb] = pnorm(sqrt(lambda[bb]/q[bb])*(q[bb]/mu[bb]-1)) +
+ exp(2*lambda[bb]/mu[bb]) *
+ pnorm(-sqrt(lambda[bb]/q[bb])*(q[bb]/mu[bb]+1))
+ ans
+}
+
+inv.gaussianff <- function(lmu="loge",
+ llambda="loge",
+ ilambda=1,
+ zero=NULL)
+{
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu <- as.character(substitute(lmu))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda <- as.character(substitute(llambda))
+
+ new("vglmff",
+ blurb=c("Inverse Gaussian distribution\n\n",
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-mu)^2/(2*mu^2*y)), y&lambda>0",
+ "Link: ", namesof("mu", lmu), ", ",
+ namesof("lambda", llambda), "\n",
+ "Mean: ", "mu\n",
+ "Variance: mu^3 / lambda"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(any(y <= 0)) stop("Require the response to have positive values")
+ predictors.names = c(namesof("mu", .lmu, short= TRUE),
+ namesof("lambda", .llambda, short= TRUE))
+ if(!length(etastart)) {
+ initmu = y + 1/8
+ initlambda = rep(if(length( .ilambda)) .ilambda else 1, len=n)
+ etastart = cbind(theta2eta(initmu, link=.lmu),
+ theta2eta(initlambda, link=.llambda))
+ }
+ }), list( .lmu=lmu, .llambda=llambda,
+ .ilambda=ilambda ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], link=.lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$link = c(mu = .lmu, lambda = .llambda)
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ lambda <- eta2theta(eta[,2], link=.llambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(0.5 * log(lambda / (2 * pi * y^3)) -
+ lambda *(y-mu)^2 / (2*mu^2 * y)))
+ }, list( .llambda=llambda ))),
+ vfamily="inv.gaussianff",
+ deriv=eval(substitute(expression({
+ lambda <- eta2theta(eta[,2], link=.llambda)
+ dl.dmu = lambda * (y-mu) / mu^3
+ dl.dlambda <- 0.5 / lambda - (y-mu)^2 / (2 * mu^2 * y)
+ dmu.deta <- dtheta.deta(theta=mu, link=.lmu)
+ dlambda.deta <- dtheta.deta(theta=lambda, link=.llambda)
+ w * cbind(dl.dmu * dmu.deta, dl.dlambda * dlambda.deta)
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ d2l.dmu2 = lambda / mu^3
+ d2l.dlambda2 = 0.5 / (lambda^2)
+ w * cbind(dmu.deta^2 * d2l.dmu2, dlambda.deta^2 * d2l.dlambda2)
+ }), list( .lmu=lmu, .llambda=llambda ))))
+}
+
+
+
+poissonff <- function(link="loge",
+ dispersion=1, onedpar=FALSE,
+ parallel=FALSE, zero=NULL)
+{
+
+ estimated.dispersion <- dispersion==0
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Poisson distribution\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: mu"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ nz <- y > 0
+ devi <- - (y - mu)
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ if(is.R()) assign("CQO.FastAlgorithm",
+ ( .link == "loge"), envir = VGAMenv) else
+ CQO.FastAlgorithm <<- ( .link == "loge")
+ M = if(is.matrix(y)) ncol(y) else 1
+ dn2 = if(is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if(length(dn2)) {
+ paste("E[", dn2, "]", sep="")
+ } else {
+ paste("mu", 1:M, sep="")
+ }
+ predictors.names = namesof(if(M>1) dn2 else "mu", .link, short=TRUE)
+ mu = pmax(y, 0.167) # y + 0.167 * (y == 0)
+ if(!length(etastart))
+ etastart <- theta2eta(mu, link= .link)
+ }), list( .link=link, .estimated.dispersion=estimated.dispersion ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mu = eta2theta(eta, link= .link)
+ mu
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ if(is.R()) {
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
+ } else {
+ while(exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ }
+ dpar <- .dispersion
+ if(!dpar) {
+ temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link= .link)^2) # w cancel
+ if(M > 1 && ! .onedpar) {
+ dpar = rep(as.numeric(NA), len=M)
+ temp87 = cbind(temp87)
+ nrow.mu = if(is.matrix(mu)) nrow(mu) else length(mu)
+ for(i in 1:M)
+ dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
+ if(is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
+ names(dpar) = dimnames(y)[[2]]
+ } else
+ dpar = sum(temp87) / (length(mu) - ncol(x))
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 1
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$expected = TRUE
+ misc$link = rep( .link, length=M)
+ names(misc$link) = if(M>1) dn2 else "mu"
+ }), list( .dispersion=dispersion, .estimated.dispersion=estimated.dispersion,
+ .onedpar=onedpar, .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link= .link)
+ }, list( .link=link ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ if(residuals) w*(y/mu - 1) else sum(w*(-mu + y*log(mu) - lgamma(y+1)))
+ },
+ vfamily="poissonff",
+ deriv=eval(substitute(expression({
+ if( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ w * (y - mu)
+ } else {
+ lambda <- mu
+ dl.dlambda <- (y-lambda) / lambda
+ dlambda.deta <- dtheta.deta(theta=lambda, link= .link)
+ w * dl.dlambda * dlambda.deta
+ }
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ if( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ tmp600 = mu
+ tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
+ w * tmp600
+ } else {
+ d2l.dlambda2 = 1 / lambda
+ w * dlambda.deta^2 * d2l.dlambda2
+ }
+ }), list( .link=link ))))
+}
+
+
+quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
+ parallel = FALSE, zero = NULL) {
+ dispersion = 0 # Estimated; this is the only difference with binomialff()
+ ans =
+ binomialff(link = link, dispersion=dispersion, mv=mv, onedpar=onedpar,
+ parallel=parallel, zero=zero)
+ ans at vfamily = "quasibinomialff"
+ ans
+}
+
+quasipoissonff = function(link = "loge", onedpar = FALSE, parallel = FALSE,
+ zero = NULL) {
+ dispersion = 0 # Estimated; this is the only difference with poissonff()
+ ans =
+ poissonff(link = link, dispersion=dispersion, onedpar=onedpar,
+ parallel=parallel, zero=zero)
+ ans at vfamily = "quasipoissonff"
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+poissonqn.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+
+poissonqn <- function(link="loge",
+ dispersion=1, onedpar=FALSE,
+ parallel=FALSE, zero=NULL,
+ wwts=c("expected","observed","qn"))
+{
+ estimated.dispersion <- dispersion==0
+ if(mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+ if(mode(wwts) != "character" && mode(wwts) != "name")
+ wwts <- as.character(substitute(wwts))
+ wwts <- match.arg(wwts, c("expected","observed","qn"))[1]
+
+ new("vglmff",
+ blurb=c("Poisson distribution\n\n",
+ "Link: ", namesof("mu", link), "\n",
+ "Variance: mu"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ nz <- y > 0
+ devi <- - (y - mu)
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(w * devi)
+ },
+ initialize=eval(substitute(expression({
+ M = if(is.matrix(y)) ncol(y) else 1
+ dn2 = if(is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if(length(dn2)) {
+ paste("E[", dn2, "]", sep="")
+ } else {
+ paste("mu", 1:M, sep="")
+ }
+ predictors.names = namesof(if(M>1) dn2 else "mu", .link, short=TRUE)
+ mu = pmax(y, 0.167) # y + 0.167 * (y == 0)
+ if(!length(etastart))
+ etastart <- theta2eta(mu, link= .link)
+ }), list( .link=link, .estimated.dispersion=estimated.dispersion ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, link= .link)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ dpar <- .dispersion
+ if(!dpar) {
+ temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link= .link)^2) # w cancel
+ if(M > 1 && ! .onedpar) {
+ dpar = rep(as.numeric(NA), len=M)
+ temp87 = cbind(temp87)
+ nrow.mu = if(is.matrix(mu)) nrow(mu) else length(mu)
+ for(i in 1:M)
+ dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
+ if(is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
+ names(dpar) = dimnames(y)[[2]]
+ } else
+ dpar = sum(temp87) / (length(mu) - ncol(x))
+ }
+ misc$BFGS = TRUE
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 1
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$expected = FALSE
+ misc$link = rep( .link, length=M)
+ names(misc$link) = if(M>1) dn2 else "mu"
+ }), list( .dispersion=dispersion,
+ .estimated.dispersion=estimated.dispersion,
+ .onedpar=onedpar, .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, link= .link)
+ }, list( .link=link ))),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ if(residuals) w*(y/mu - 1) else sum(w*(-mu + y*log(mu) - lgamma(y+1)))
+ },
+ vfamily="poissonqn",
+ deriv=eval(substitute(expression({
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+
+ derivnew =
+ if( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ w * (y - mu)
+ } else {
+ lambda <- mu
+ dl.dlambda <- (y-lambda) / lambda
+ dlambda.deta <- dtheta.deta(theta=lambda, link= .link)
+ w * dl.dlambda * dlambda.deta
+ }
+ derivnew
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ if( .wwts == "qn") {
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold-derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ } else if( .wwts == "expected") {
+ wznew = if( .link == "loge") {
+ tmp600 = mu
+ tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
+ w * tmp600
+ } else {
+ d2l.dlambda2 = 1 / lambda
+ w * dlambda.deta^2 * d2l.dlambda2
+ }
+ } else {
+ wznew = if( .link == "loge") {
+ tmp600 = y
+ tmp600[y < .Machine$double.eps] = sqrt(.Machine$double.eps)
+ w * tmp600
+ } else {
+ stop("this is not programmed in yet")
+ }
+ }
+ wznew
+ }), list( .wwts=wwts, .link=link ))))
+}
+
+
diff --git a/R/family.loglin.q b/R/family.loglin.q
new file mode 100644
index 0000000..070f565
--- /dev/null
+++ b/R/family.loglin.q
@@ -0,0 +1,226 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+loglinb2 <- function(exchangeable=FALSE, zero=NULL)
+{
+
+ new("vglmff",
+ blurb=c("Log-linear model for binary data\n\n",
+ "Links: ",
+ "Identity: u_1, u_2, u_{12}",
+ "\n"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
+ .exchangeable, constraints, intercept.apply=TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.exchangeable=exchangeable, .zero=zero))),
+ initialize=expression({
+
+ y <- as.matrix(y)
+ predictors.names <- c("u1", "u2", "u12")
+ if(ncol(y) != 2)
+ stop("ncol(y) must be = 2")
+
+ if(is.null(mustart)) {
+ mustart <- matrix(as.numeric(NA), nrow(y), 4)
+ mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2]), w)
+ mustart[,2] <- weighted.mean((1-y[,1])*y[,2], w)
+ mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
+ mustart[,4] <- weighted.mean(y[,1]*y[,2], w)
+ if(any(mustart==0))
+ stop("some combinations of the response not realized")
+ }
+ }),
+ inverse= function(eta, extra=NULL) {
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ cbind("00"=1/denom,
+ "01"=exp(u2) / denom,
+ "10"=exp(u1) / denom,
+ "11"=exp(u1+u2+u12) / denom)
+ },
+ last=expression({
+ misc$link = c("u1" = "identity", "u2" = "identity", "u12" = "identity")
+ }),
+ link= function(mu, extra=NULL) {
+ u0 <- log(mu[,1])
+ u2 <- log(mu[,2]) - u0
+ u1 <- log(mu[,3]) - u0
+ u12 <- log(mu[,4]) - u0 - u1 - u2
+ cbind(u1, u2, u12)
+ },
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ u0 <- -log(denom)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2]))
+ },
+ vfamily=c("loglinb2"),
+ deriv=expression({
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ du0.du1 <- -(exp(u1) + exp(u1 + u2 + u12)) / denom
+ du0.du2 <- -(exp(u2) + exp(u1 + u2 + u12)) / denom
+ du0.du12 <- -exp(u1 + u2 + u12) / denom
+ w * cbind(du0.du1 + y[,1],
+ du0.du2 + y[,2],
+ du0.du12 + y[,1]*y[,2])
+ }),
+ weight=expression({
+ d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2
+ d2u0.du22 <- -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2
+ d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2
+ d2u0.du1u2 <- -(exp(u1 + u2 + u12) - exp(u1 + u2)) / denom^2
+ d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2
+ d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[,iam(1,1,M)] <- -d2u0.du1.2
+ wz[,iam(2,2,M)] <- -d2u0.du22
+ wz[,iam(3,3,M)] <- -d2u0.du122
+ wz[,iam(1,2,M)] <- -d2u0.du1u2
+ wz[,iam(1,3,M)] <- -d2u0.du1u3
+ wz[,iam(2,3,M)] <- -d2u0.du2u3
+ w * wz
+ }))
+}
+
+
+loglinb3 <- function(exchangeable=FALSE, zero=NULL)
+{
+
+ new("vglmff",
+ blurb=c("Log-linear model for trivariate binary data\n\n",
+ "Links: ",
+ "Identity: u1, u2, u3, u12, u13, u23",
+ "\n"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
+ .exchangeable, constraints, intercept.apply=TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.exchangeable=exchangeable, .zero=zero))),
+ initialize=expression({
+ y <- as.matrix(y)
+ predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
+ if(ncol(y) != 3)
+ stop("ncol(y) must be = 3")
+ extra$my.expression <- expression({
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u3 <- eta[,3]
+ u12 <- eta[,4]
+ u13 <- eta[,5]
+ u23 <- eta[,6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+ })
+ extra$deriv.expression <- expression({
+ allterms <- exp(u1+u2+u3+u12+u13+u23)
+ A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) + allterms
+ A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) + allterms
+ A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) + allterms
+ A12 <- exp(u1 + u2 + u12) + allterms
+ A13 <- exp(u1 + u3 + u13) + allterms
+ A23 <- exp(u2 + u3 + u23) + allterms
+ })
+ if(!length(mustart)) {
+ mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
+ mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
+ mustart[,2] <- weighted.mean((1-y[,1])*(1-y[,2])*y[,3], w)
+ mustart[,3] <- weighted.mean((1-y[,1])*y[,2]*(1-y[,3]), w)
+ mustart[,4] <- weighted.mean((1-y[,1])*y[,2]*y[,3], w)
+ mustart[,5] <- weighted.mean(y[,1]*(1-y[,2])*(1-y[,3]), w)
+ mustart[,6] <- weighted.mean(y[,1]*(1-y[,2])*y[,3], w)
+ mustart[,7] <- weighted.mean(y[,1]*y[,2]*(1-y[,3]), w)
+ mustart[,8] <- weighted.mean(y[,1]*y[,2]*y[,3], w)
+ if(any(mustart==0))
+ stop("some combinations of the response not realized")
+ }
+ }),
+ inverse= function(eta, extra=NULL) {
+ eval(extra$my.expression)
+ cbind("000"=1,
+ "001"=exp(u3),
+ "010"=exp(u2),
+ "011"=exp(u2+u3+u23),
+ "100"=exp(u1),
+ "101"=exp(u1+u3+u13),
+ "110"=exp(u1+u2+u12),
+ "111"=exp(u1+u2+u3+u12+u13+u23)) / denom
+ },
+ last=expression({
+ misc$link = rep("identity", length=M)
+ names(misc$link) = predictors.names
+ }),
+ link= function(mu, extra=NULL) {
+ u0 <- log(mu[,1])
+ u3 <- log(mu[,2]) - u0
+ u2 <- log(mu[,3]) - u0
+ u23 <- log(mu[,4]) - u0 - u2 - u3
+ u1 <- log(mu[,5]) - u0
+ u13 <- log(mu[,6]) - u0 - u1 - u3
+ u12 <- log(mu[,7]) - u0 - u1 - u2
+ cbind(u1, u2, u3, u12, u13, u23)
+ },
+ loglikelihood=function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ eval(extra$my.expression)
+ u0 <- -log(denom)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
+ u13*y[,1]*y[,3] + u23*y[,2]*y[,3]))
+ },
+ vfamily=c("loglinb3"),
+ deriv=expression({
+ eval(extra$my.expression)
+ eval(extra$deriv.expression)
+ w * cbind(-A1/denom + y[,1],
+ -A2/denom + y[,2],
+ -A3/denom + y[,3],
+ -A12/denom + y[,1]*y[,2],
+ -A13/denom + y[,1]*y[,3],
+ -A23/denom + y[,2]*y[,3])
+ }),
+ weight=expression({
+ u0 <- -log(denom)
+ dA2.du1 <- exp(u1 + u2 + u12) + allterms
+ dA3.du1 <- exp(u1 + u3 + u13) + allterms
+ dA3.du2 <- exp(u2 + u3 + u23) + allterms
+ wz <- matrix(as.numeric(NA), n, dimm(6))
+ expu0 <- exp(u0)
+ wz[,iam(1,1,M)] <- A1 * (1 - expu0 * A1)
+ wz[,iam(2,2,M)] <- A2 * (1 - expu0 * A2)
+ wz[,iam(3,3,M)] <- A3 * (1 - expu0 * A3)
+ wz[,iam(1,2,M)] <- (dA2.du1 - expu0 * A1 * A2)
+ wz[,iam(1,3,M)] <- (dA3.du1 - expu0 * A1 * A3)
+ wz[,iam(2,3,M)] <- (dA3.du2 - expu0 * A2 * A3)
+ wz[,iam(4,4,M)] <- A12 * (1 - expu0 * A12)
+ wz[,iam(5,5,M)] <- A13 * (1 - expu0 * A13)
+ wz[,iam(6,6,M)] <- A23 * (1 - expu0 * A23)
+ wz[,iam(4,6,M)] <- (allterms - expu0 * A12 * A23)
+ wz[,iam(5,6,M)] <- (allterms - expu0 * A12 * A23)
+ wz[,iam(4,5,M)] <- (allterms - expu0 * A12 * A13)
+ wz[,iam(1,4,M)] <- A12 * (1 - expu0 * A1)
+ wz[,iam(1,5,M)] <- A13 * (1 - expu0 * A1)
+ wz[,iam(1,6,M)] <- (allterms - expu0 * A1 * A23)
+ wz[,iam(2,4,M)] <- A12 * (1 - expu0 * A2)
+ wz[,iam(2,5,M)] <- (allterms - expu0 * A2 * A13)
+ wz[,iam(2,6,M)] <- A23 * (1 - expu0 * A2)
+ wz[,iam(3,4,M)] <- (allterms - expu0 * A3 * A12)
+ wz[,iam(3,5,M)] <- A13 * (1 - expu0 * A3)
+ wz[,iam(3,6,M)] <- A23 * (1 - expu0 * A3)
+ wz <- w * expu0 * wz
+ wz
+ }))
+}
+
diff --git a/R/family.mixture.q b/R/family.mixture.q
new file mode 100644
index 0000000..7de28d1
--- /dev/null
+++ b/R/family.mixture.q
@@ -0,0 +1,290 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+mix2normal1.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+mix2normal1 = function(lphi="logit",
+ lmu="identity",
+ lsd="loge",
+ iphi=0.5, imu1=NULL, imu2=NULL, isd1=NULL, isd2=NULL,
+ qmu=c(0.2, 0.8),
+ esd=FALSE,
+ zero=1)
+{
+ if(mode(lphi) != "character" && mode(lphi) != "name")
+ lphi = as.character(substitute(lphi))
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(!is.Numeric(qmu, allow=2, positive=TRUE) || any(qmu >= 1))
+ stop("bad input for argument \"qmu\"")
+ if(length(iphi) && (!is.Numeric(iphi, allow=1, positive=TRUE) || iphi>= 1))
+ stop("bad input for argument \"iphi\"")
+ if(length(imu1) && !is.Numeric(imu1))
+ stop("bad input for argument \"imu1\"")
+ if(length(imu2) && !is.Numeric(imu2))
+ stop("bad input for argument \"imu2\"")
+ if(length(isd1) && !is.Numeric(isd1, positive=TRUE))
+ stop("bad input for argument \"isd1\"")
+ if(length(isd2) && !is.Numeric(isd2, positive=TRUE))
+ stop("bad input for argument \"isd2\"")
+
+ new("vglmff",
+ blurb=c("Mixture of two univariate normals\n\n",
+ "Links: ",
+ namesof("phi",lphi), ", ",
+ namesof("mu1", lmu, tag=FALSE), ", ",
+ namesof("sd1", lsd, tag=FALSE), ", ",
+ namesof("mu2", lmu, tag=FALSE), ", ",
+ namesof("sd2", lsd, tag=FALSE), "\n",
+ "Mean: phi*mu1 + (1-phi)*mu2\n",
+ "Variance: phi*sd1^2 + (1-phi)*sd2^2 + phi*(1-phi)*(mu1-mu2)^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(rbind(diag(4), c(0,0,1,0)), x, .esd,
+ constraints, int=TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero, .esd=esd))),
+ initialize=eval(substitute(expression({
+ if(ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+ predictors.names = c(namesof("phi", .lphi, tag=FALSE),
+ namesof("mu1", .lmu, tag=FALSE),
+ namesof("sd1", .lsd, tag=FALSE),
+ namesof("mu2", .lmu, tag=FALSE),
+ namesof("sd2", .lsd, tag=FALSE))
+ if(!length(etastart)) {
+ qy = quantile(y, prob= .qmu)
+ init.phi = if(length(.iphi)) rep(.iphi, length=n) else {
+ 0.5
+ }
+ init.mu1 = if(length(.imu1)) rep(.imu1, length=n) else {
+ rep(qy[1], length=n)
+ }
+ init.mu2 = if(length(.imu2)) rep(.imu2, length=n) else {
+ rep(qy[2], length=n)
+ }
+ ind.1 = if(init.mu1[1] < init.mu2[1]) 1:round(n* init.phi[1]) else
+ round(n* init.phi[1]):n
+ ind.2 = if(init.mu1[1] < init.mu2[1]) round(n* init.phi[1]):n else
+ 1:round(n* init.phi[1])
+ sorty = sort(y)
+ init.sd1 = if(length(.isd1)) rep(.isd1, length=n) else {
+ sd(sorty[ind.1])
+ }
+ init.sd2 = if(length(.isd2)) rep(.isd2, length=n) else {
+ sd(sorty[ind.2])
+ }
+ etastart = cbind(theta2eta(init.phi, .lphi),
+ theta2eta(init.mu1, .lmu),
+ theta2eta(init.sd1, .lsd),
+ theta2eta(init.mu2, .lmu),
+ theta2eta(init.sd2, .lsd))
+ }
+ }), list(.lphi=lphi, .lmu=lmu, .iphi=iphi, .imu1=imu1, .imu2=imu2,
+ .lsd=lsd, .isd1=isd1, .isd2=isd2, .qmu=qmu))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ phi = eta2theta(eta[,1], link= .lphi)
+ mu1 = eta2theta(eta[,2], link= .lmu)
+ mu2 = eta2theta(eta[,4], link= .lmu)
+ phi*mu1 + (1-phi)*mu2
+ }, list(.lphi=lphi, .lmu=lmu))),
+ last=eval(substitute(expression({
+ misc$links = if( .esd) c("phi"= .lphi, "mu1"= .lmu, "sd"= .lsd,
+ "mu2"= .lmu) else c("phi"= .lphi, "mu1"= .lmu,
+ "sd1"= .lsd, "mu2"= .lmu, "sd2"= .lsd)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list(.lphi=lphi, .lmu=lmu, .lsd=lsd, .esd=esd))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ phi = eta2theta(eta[,1], link= .lphi)
+ mu1 = eta2theta(eta[,2], link= .lmu)
+ sd1 = eta2theta(eta[,3], link= .lsd)
+ mu2 = eta2theta(eta[,4], link= .lmu)
+ sd2 = eta2theta(eta[,5], link= .lsd)
+ f1 = dnorm(y, mean=mu1, sd=sd1)
+ f2 = dnorm(y, mean=mu2, sd=sd2)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * log(phi*f1 + (1-phi)*f2))
+ }, list(.lphi=lphi, .lmu=lmu, .lsd=lsd))),
+ vfamily=c("mix2normal1"),
+ deriv=eval(substitute(expression({
+ phi = eta2theta(eta[,1], link= .lphi)
+ mu1 = eta2theta(eta[,2], link= .lmu)
+ sd1 = eta2theta(eta[,3], link= .lsd)
+ mu2 = eta2theta(eta[,4], link= .lmu)
+ sd2 = eta2theta(eta[,5], link= .lsd)
+ f1 = dnorm(y, mean=mu1, sd=sd1)
+ f2 = dnorm(y, mean=mu2, sd=sd2)
+ pdf = phi*f1 + (1-phi)*f2
+ df1.dmu1 = (y-mu1) * f1 / sd1^2
+ df2.dmu2 = (y-mu2) * f2 / sd2^2
+ dl.dphi = (f1-f2) / pdf
+ dl.dmu1 = phi * df1.dmu1 / pdf
+ dl.dmu2 = (1-phi) * df2.dmu2 / pdf
+ dl.dsd1 = phi * f1 * (((y-mu1)/sd1)^2 - 1) / (sd1 * pdf)
+ dl.dsd2 = (1-phi) * f2 * (((y-mu2)/sd2)^2 - 1) / (sd2 * pdf)
+ dphi.deta = dtheta.deta(phi, link= .lphi)
+ dmu1.deta = dtheta.deta(mu1, link= .lmu)
+ dmu2.deta = dtheta.deta(mu2, link= .lmu)
+ dsd1.deta = dtheta.deta(sd1, link= .lsd)
+ dsd2.deta = dtheta.deta(sd2, link= .lsd)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dphi * dphi.deta,
+ dl.dmu1 * dmu1.deta,
+ dl.dsd1 * dsd1.deta,
+ dl.dmu2 * dmu2.deta,
+ dl.dsd2 * dsd2.deta)
+ derivnew
+ }), list(.lphi=lphi, .lmu=lmu, .lsd=lsd))),
+ weight = eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list(.lphi=lphi, .lmu=lmu))))
+}
+
+
+
+
+mix2poisson.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+
+mix2poisson = function(lphi="logit", llambda="loge",
+ iphi=0.5, il1=NULL, il2=NULL,
+ qmu=c(0.2, 0.8), zero=1)
+{
+ if(mode(lphi) != "character" && mode(lphi) != "name")
+ lphi = as.character(substitute(lphi))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(!is.Numeric(qmu, allow=2, positive=TRUE) || any(qmu >= 1))
+ stop("bad input for argument \"qmu\"")
+ if(length(iphi) && (!is.Numeric(iphi, allow=1, positive=TRUE) || iphi>= 1))
+ stop("bad input for argument \"iphi\"")
+ if(length(il1) && !is.Numeric(il1))
+ stop("bad input for argument \"il1\"")
+ if(length(il2) && !is.Numeric(il2))
+ stop("bad input for argument \"il2\"")
+
+ new("vglmff",
+ blurb=c("Mixture of two univariate normals\n\n",
+ "Links: ",
+ namesof("phi",lphi), ", ",
+ namesof("lambda1", llambda, tag=FALSE), ", ",
+ namesof("lambda2", llambda, tag=FALSE), "\n",
+ "Mean: phi*lambda1 + (1-phi)*lambda2\n",
+ "Variance: phi*lambda1^2 + (1-phi)*lambda2^2 + phi*(1-phi)*(lambda1-lambda2)^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ if(ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+ predictors.names = c(namesof("phi", .lphi, tag=FALSE),
+ namesof("lambda1", .llambda, tag=FALSE),
+ namesof("lambda2", .llambda, tag=FALSE))
+ if(!length(etastart)) {
+ qy = quantile(y, prob= .qmu)
+ init.phi = if(length(.iphi)) rep(.iphi, length=n) else {
+ 0.5
+ }
+ init.lambda1 = if(length(.il1)) rep(.il1, length=n) else {
+ rep(qy[1], length=n)
+ }
+ init.lambda2 = if(length(.il2)) rep(.il2, length=n) else {
+ rep(qy[2], length=n)
+ }
+ etastart = cbind(theta2eta(init.phi, .lphi),
+ theta2eta(init.lambda1, .llambda),
+ theta2eta(init.lambda2, .llambda))
+ }
+ }), list(.lphi=lphi, .llambda=llambda, .iphi=iphi, .il1=il1, .il2=il2,
+ .qmu=qmu))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ phi = eta2theta(eta[,1], link= .lphi)
+ lambda1 = eta2theta(eta[,2], link= .llambda)
+ lambda2 = eta2theta(eta[,3], link= .llambda)
+ phi*lambda1 + (1-phi)*lambda2
+ }, list(.lphi=lphi, .llambda=llambda))),
+ last=eval(substitute(expression({
+ misc$links = c("phi"= .lphi, "lambda1"= .llambda, "lambda2"= .llambda)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list(.lphi=lphi, .llambda=llambda))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ phi = eta2theta(eta[,1], link= .lphi)
+ lambda1 = eta2theta(eta[,2], link= .llambda)
+ lambda2 = eta2theta(eta[,3], link= .llambda)
+ f1 = dpois(y, lam=lambda1)
+ f2 = dpois(y, lam=lambda2)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * log(phi*f1 + (1-phi)*f2))
+ }, list(.lphi=lphi, .llambda=llambda))),
+ vfamily=c("mix2poisson"),
+ deriv=eval(substitute(expression({
+ phi = eta2theta(eta[,1], link= .lphi)
+ lambda1 = eta2theta(eta[,2], link= .llambda)
+ lambda2 = eta2theta(eta[,3], link= .llambda)
+ f1 = dpois(x=y, lam=lambda1)
+ f2 = dpois(x=y, lam=lambda2)
+ pdf = phi*f1 + (1-phi)*f2
+ df1.dlambda1 = dpois(y-1, lam=lambda1) - f1
+ df2.dlambda2 = dpois(y-1, lam=lambda2) - f2
+ dl.dphi = (f1-f2) / pdf
+ dl.dlambda1 = phi * df1.dlambda1 / pdf
+ dl.dlambda2 = (1-phi) * df2.dlambda2 / pdf
+ dphi.deta = dtheta.deta(phi, link= .lphi)
+ dlambda1.deta = dtheta.deta(lambda1, link= .llambda)
+ dlambda2.deta = dtheta.deta(lambda2, link= .llambda)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dphi * dphi.deta,
+ dl.dlambda1 * dlambda1.deta,
+ dl.dlambda2 * dlambda2.deta)
+ derivnew
+ }), list(.lphi=lphi, .llambda=llambda))),
+ weight = eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list(.lphi=lphi, .llambda=llambda))))
+}
+
+
diff --git a/R/family.nonlinear.q b/R/family.nonlinear.q
new file mode 100644
index 0000000..e6c1c64
--- /dev/null
+++ b/R/family.nonlinear.q
@@ -0,0 +1,152 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+vnonlinear.control <- function(regressor, save.weight=TRUE, ...)
+{
+
+
+
+ list(regressor=regressor,
+ save.weight=as.logical(save.weight)[1])
+}
+
+
+micmen <- function(rpar=0.001, divisor=10,
+ init1=NULL, init2=NULL,
+ link1="identity",
+ link2="identity",
+ dispersion=0,
+ zero=NULL)
+{
+
+
+ estimated.dispersion <- dispersion==0
+
+ if(mode(link1) != "character" && mode(link1) != "name")
+ link1 <- as.character(substitute(link1))
+ if(mode(link2) != "character" && mode(link2) != "name")
+ link2 <- as.character(substitute(link2))
+
+ new("vglmff",
+ blurb=c("Michaelis-Menton regression model\n",
+ "Y_i=theta1 * x_i / (theta2 + x_i) + e_i\n\n",
+ "Links: ",
+ namesof("theta1", link1), ", ",
+ namesof("theta2", link2),
+ "\n",
+ "Variance: constant"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M=2)
+ }), list(.zero=zero))),
+ deviance=function(mu, y, w, residuals=FALSE, eta, extra=NULL) {
+ M <- if(is.matrix(y)) ncol(y) else 1
+ if(residuals) {
+ if(M>1) NULL else (y-mu) * sqrt(w)
+ } else
+ rss.vgam(y-mu, w, M=M)
+ },
+ initialize=eval(substitute(expression({
+ uvec = control$regressor # This is the regressor
+ extra$uvec = uvec # Needed for @inverse
+
+ predictors.names <- c(namesof("theta1", .link1, tag=FALSE),
+ namesof("theta2", .link2, tag=FALSE))
+
+ if(length(mustart) || length(coefstart))
+ stop("can't handle mustart or coefstart")
+ if(!length(etastart)) {
+ index <- (1:n)[uvec>quantile(uvec, prob=.85)]
+ init1 <- median(y[index])
+ init2 <- median(init1*uvec/y - uvec)
+
+ if(length(.init1)) init1 = .init1
+ if(length(.init2)) init2 = .init2
+
+ etastart = cbind(rep(theta2eta(init1, .link1), len=n),
+ rep(theta2eta(init2, .link2), len=n))
+ } else {
+ stop("can't handle etastart or mustart")
+ }
+
+ }), list(.init1=init1, .init2=init2,
+ .link1=link1, .link2=link2))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ theta1 <- eta2theta(eta[,1], .link1)
+ theta2 <- eta2theta(eta[,2], .link2)
+ theta1 * extra$uvec / (theta2 + extra$uvec)
+ }, list(.link1=link1, .link2=link2))),
+ last=eval(substitute(expression({
+ misc$link <- c(theta1= .link1, theta2= .link2)
+ misc$rpar <- rpar
+ fit$df.residual <- n - rank # Not n.big - rank
+ fit$df.total <- n # Not n.big
+
+ dpar <- .dispersion
+ if(!dpar) {
+ dpar <- sum(w * (y-mu)^2) / (n - p.big)
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+ }), list(.link1=link1, .link2=link2, .dispersion=dispersion,
+ .estimated.dispersion=estimated.dispersion))),
+ summary.dispersion=FALSE,
+ vfamily=c("micmen","vnonlinear"),
+ deriv=eval(substitute(expression({
+ if(iter>1) {
+ rpar = max(rpar / .divisor, 1000 * .Machine$double.eps)
+ } else {
+ rpar = .rpar
+ d3 = deriv3(~ theta1 * uvec / (theta2 + uvec),
+ c("theta1","theta2"), hessian=FALSE)
+ }
+
+ theta1 <- eta2theta(eta[,1], .link1)
+ theta2 <- eta2theta(eta[,2], .link2)
+
+ if(TRUE) {
+ dmus.dthetas = attr(eval(d3), "gradient")
+ } else {
+ dmu.dtheta1 <- uvec / (theta2 + uvec)
+ dmu.dtheta2 <- -theta1 * uvec / (uvec + theta2)^2
+ dmus.dthetas = cbind(dmu.dtheta1, dmu.dtheta2)
+ }
+
+ dthetas.detas = cbind(dtheta.deta(theta1, .link1),
+ dtheta.deta(theta2, .link2))
+
+ if(TRUE) {
+ index = iam(NA, NA, M=M, both=TRUE)
+ temp = dmus.dthetas * dthetas.detas
+ if(M>1)
+ temp[,2:M] = temp[,2:M] + sqrt(rpar)
+ w * (y-mu) * temp
+ } else {
+ w * (y-mu) *
+ cbind(dmus.dthetas[,1] * dthetas.detas[,1],
+ dmus.dthetas[,2] * dthetas.detas[,2] + sqrt(rpar))
+ }
+ }), list(.link1=link1, .link2=link2, .rpar=rpar, .divisor=divisor))),
+ weight=eval(substitute(expression({
+ if(TRUE) {
+ wz = dmus.dthetas[,index$row] * dmus.dthetas[,index$col] *
+ dthetas.detas[,index$row] * dthetas.detas[,index$col]
+ if(M>1)
+ wz[,2:M] = wz[,2:M] + rpar
+ } else {
+ wz = cbind((dmus.dthetas[,1] * dthetas.detas[,1])^2,
+ (dmus.dthetas[,2] * dthetas.detas[,2])^2 + rpar,
+ dmus.dthetas[,1] * dmus.dthetas[,2] *
+ dthetas.detas[,1] * dthetas.detas[,2])
+ }
+ w * wz
+ }), list(.link1=link1, .link2=link2))))
+}
+
+
diff --git a/R/family.normal.q b/R/family.normal.q
new file mode 100644
index 0000000..6002954
--- /dev/null
+++ b/R/family.normal.q
@@ -0,0 +1,471 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+VGAM.weights.function = function(w, M, n) {
+ ncolw = ncol(as.matrix(w))
+ if(ncolw == 1) {
+ wz = matrix(w, nrow=n, ncol=M) # w_i * diag(M)
+ } else if(ncolw == M) {
+ wz = as.matrix(w)
+ } else if(ncolw < M && M > 1) {
+ stop("ambiguous input for weights")
+ } else if(ncolw > M*(M+1)/2) {
+ stop("too many columns")
+ } else {
+ wz = as.matrix(w)
+ }
+ wz
+}
+
+
+
+
+
+
+
+
+gaussianff = function(dispersion=0, parallel=FALSE, zero=NULL)
+{
+ if(!is.Numeric(dispersion, allow=1) || dispersion < 0)
+ stop("bad input for argument 'dispersion'")
+ estimated.dispersion = dispersion==0
+
+ new("vglmff",
+ blurb=c("Vector linear/additive model\n",
+ "Links: identity for Y1,...,YM"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ deviance=function(mu, y, w, residuals= FALSE, eta, extra=NULL) {
+ M = if(is.matrix(y)) ncol(y) else 1
+ n = if(is.matrix(y)) nrow(y) else length(y)
+ wz = VGAM.weights.function(w=w, M=M, n=n)
+ if(residuals) {
+ if(M > 1) {
+ U <- vchol(wz, M=M, n=n)
+ temp = mux22(U, y-mu, M=M, upper=TRUE, as.matrix=TRUE)
+ dimnames(temp) = dimnames(y)
+ temp
+ } else (y-mu) * sqrt(wz)
+ } else
+ rss.vgam(y-mu, wz=wz, M=M)
+ },
+ initialize=eval(substitute(expression({
+ if(is.R())
+ assign("CQO.FastAlgorithm", TRUE, envir = VGAMenv) else
+ CQO.FastAlgorithm <<- TRUE
+ if(any(function.name == c("cqo","cao")) &&
+ (length( .zero ) || (is.logical( .parallel ) && .parallel )))
+ stop("cannot handle non-default arguments for cqo() and cao()")
+
+ M = if(is.matrix(y)) ncol(y) else 1
+ dy = dimnames(y)
+ predictors.names = if(!is.null(dy[[2]])) dy[[2]] else
+ paste("Y",1:M,sep="")
+ if(!length(etastart))
+ etastart = 0 * y
+ }), list( .parallel=parallel, .zero=zero ))),
+ inverse=function(eta, extra=NULL) eta,
+ last=eval(substitute(expression({
+ dy = dimnames(y)
+ if(!is.null(dy[[2]]))
+ dimnames(fit$fitted.values) = dy
+ dpar = .dispersion
+ if(!dpar) {
+ wz = VGAM.weights.function(w=w, M=M, n=n)
+ temp = rss.vgam(y-mu, wz=wz, M=M)
+ dpar = temp / (length(y) - ncol(xbig.save))
+ }
+ misc$dispersion = dpar
+ misc$default.dispersion = 0
+ misc$estimated.dispersion = .estimated.dispersion
+ misc$link = rep("identity", length=M)
+ names(misc$link) = predictors.names
+
+ if(is.R()) {
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
+ } else {
+ while(exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ }
+ }), list( .dispersion=dispersion,
+ .estimated.dispersion=estimated.dispersion ))),
+ loglikelihood= function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ M = if(is.matrix(y)) ncol(y) else 1
+ n = if(is.matrix(y)) nrow(y) else length(y)
+ wz = VGAM.weights.function(w=w, M=M, n=n)
+ temp = rss.vgam(y, wz=wz, M=M)
+ -0.5 * temp
+ },
+ link=function(mu, extra=NULL) mu,
+ vfamily="gaussianff",
+ deriv=expression({
+ wz = VGAM.weights.function(w=w, M=M, n=n)
+ mux22(cc=t(wz), xmat=y-mu, M=M, as.mat=TRUE)
+ }),
+ weight= expression({
+ wz
+ }))
+}
+
+
+
+
+
+
+dposnorm = function(x, mean=0, sd=1) {
+ L = max(length(x), length(mean), length(sd))
+ x = rep(x, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
+ ifelse(x < 0, 0, dnorm(x=x, mean=mean, sd=sd) /
+ (1-pnorm(q=0, mean=mean, sd=sd)))
+}
+
+pposnorm = function(q, mean=0, sd=1) {
+ L = max(length(q), length(mean), length(sd))
+ q = rep(q, len=L); mean = rep(mean, len=L); sd = rep(sd, len=L);
+ ifelse(q < 0, 0, (pnorm(q=q, mean=mean, sd=sd) -
+ pnorm(q=0, mean=mean, sd=sd)) /
+ (1-pnorm(q=0, mean=mean, sd=sd)))
+}
+
+qposnorm = function(p, mean=0, sd=1) {
+ if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+ stop("bad input for argument \"p\"")
+ qnorm(p=p+(1-p)*pnorm(0, mean=mean, sd=sd), mean=mean, sd=sd)
+}
+
+rposnorm = function(n, mean=0, sd=1) {
+ if(!is.Numeric(n, integ=TRUE, posit=TRUE))
+ stop("bad input for argument \"n\"")
+ y = rnorm(n, mean=mean, sd=sd)
+ mean = rep(mean, length=n)
+ sd = rep(sd, length=n)
+ repeat {
+ index = y < 0
+ if(any(index)) {
+ y[index] = rnorm(n=sum(index), mean=mean[index], sd=sd[index])
+ } else break
+ }
+ y
+}
+
+posnormal1 = function(lmean="identity", lsd="loge",
+ imean=NULL, isd=NULL, zero=NULL)
+{
+ if(mode(lmean) != "character" && mode(lmean) != "name")
+ lmean = as.character(substitute(lmean))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(length(isd) && !is.Numeric(isd, posit=TRUE))
+ stop("bad input for argument \"isd\"")
+
+ new("vglmff",
+ blurb=c("Positive (univariate) normal distribution\n\n",
+ "Links: ",
+ namesof("mean", lmean, tag= TRUE), "; ",
+ namesof("sd", lsd, tag= TRUE)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("mean", .lmean, tag= FALSE),
+ namesof("sd", .lsd, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(min(y) <= 0)
+ stop("response must be positive")
+ if(!length(etastart)) {
+ init.me = if(length( .imean)) rep( .imean, len=n) else NULL
+ init.sd = if(length( .isd )) rep( .isd , len=n) else NULL
+ if(!length(init.me)) init.me = rep(quantile(y, probs=0.40), len=n)
+ if(!length(init.sd)) init.sd = rep(sd(y)*1.2, len=n)
+ etastart = cbind(theta2eta(init.me, .lmean),
+ theta2eta(init.sd, .lsd))
+ }
+ }), list( .lmean=lmean, .lsd=lsd, .imean=imean, .isd=isd ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmean)
+ mysd = eta2theta(eta[,2], .lsd)
+ mymu + mysd * dnorm(-mymu/mysd) / (1-pnorm(-mymu/mysd))
+ }, list( .lmean=lmean, .lsd=lsd ))),
+ last=eval(substitute(expression({
+ misc$link = c("mean"= .lmean, "sd"= .lsd) # zz mu or mean ?
+ misc$expected = TRUE
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmean)
+ mysd = eta2theta(eta[,2], .lsd)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R())
+ sum(w*(dnorm(y, m=mymu, sd=mysd, log=TRUE) -
+ pnorm(-mymu/mysd, log=TRUE, lower.tail=FALSE))) else
+ sum(w*(-log(mysd)-0.5*((y-mymu)/mysd)^2 -log(1-pnorm(-mymu/mysd))))
+ }
+ }, list( .lmean=lmean, .lsd=lsd ))),
+ vfamily=c("posnormal1"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean)
+ mysd = eta2theta(eta[,2], .lsd)
+ zedd = (y-mymu) / mysd
+ temp7 = dnorm(-mymu/mysd)
+ temp8 = if(is.R()) pnorm(-mymu/mysd, low=FALSE) else 1-pnorm(-mymu/mysd)
+ temp8 = temp8 * mysd
+ dl.dmu = zedd / mysd - temp7 / temp8
+ dl.dsd = (mymu*temp7/temp8 + zedd^2 - 1) / mysd
+ dmu.deta = dtheta.deta(mymu, .lmean)
+ dsd.deta = dtheta.deta(mysd, .lsd)
+ w * cbind(dl.dmu * dmu.deta,
+ dl.dsd * dsd.deta)
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ ed2l.dmu2 = (1 - temp7*mymu/temp8) / mysd^2 - (temp7/temp8)^2
+ ed2l.dmusd = (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
+ mymu*temp7 / temp8)
+ ed2l.dsd2 = 2 / mysd^2 - (temp7 * mymu /(mysd^2 * temp8)) *
+ (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
+ wz[,iam(1,1,M)] = ed2l.dmu2 * dmu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsd2 * dsd.deta^2
+ wz[,iam(1,2,M)] = ed2l.dmusd * dsd.deta * dmu.deta
+ w * wz
+ }), list( .lmean=lmean, .lsd=lsd ))))
+}
+
+
+
+dbetanorm = function(x, shape1, shape2, mean=0, sd=1, log.arg=FALSE) {
+ if(!is.logical(log.arg) || length(log.arg)!=1)
+ stop("bad input for argument \"log.arg\"")
+ ans =
+ if(is.R() && log.arg) {
+ dnorm(x=x, mean=mean, sd=sd, log=TRUE) +
+ (shape1-1) * pnorm(q=x, mean=mean, sd=sd, log=TRUE) +
+ (shape2-1) * pnorm(q=x, mean=mean, sd=sd, lower=FALSE, log=TRUE) -
+ lbeta(shape1, shape2)
+ } else {
+ dnorm(x=x, mean=mean, sd=sd) *
+ pnorm(q=x, mean=mean, sd=sd)^(shape1-1) *
+ pnorm(q=x, mean=mean, sd=sd, lower=FALSE)^(shape2-1) /
+ beta(shape1, shape2)
+ }
+ if(!is.R() && log.arg) ans = log(ans)
+ ans
+}
+
+pbetanorm = function(q, shape1, shape2, mean=0, sd=1,
+ lower.tail=TRUE, log.p=FALSE) {
+ pbeta(q=pnorm(q=q, mean=mean, sd=sd), shape1=shape1, shape2=shape2,
+ lower.tail = lower.tail, log.p = log.p)
+}
+
+qbetanorm = function(p, shape1, shape2, mean=0, sd=1) {
+ if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+ stop("bad input for argument \"p\"")
+ qnorm(p=qbeta(p=p, shape1=shape1, shape2=shape2), mean=mean, sd=sd)
+}
+
+rbetanorm = function(n, shape1, shape2, mean=0, sd=1) {
+ if(!is.Numeric(n, integ=TRUE, posit=TRUE))
+ stop("bad input for argument \"n\"")
+ qnorm(p=qbeta(p=runif(n), shape1=shape1, shape2=shape2), mean=mean, sd=sd)
+}
+
+
+
+tikuv = function(d, lmean="identity", lsigma="loge",
+ isigma=NULL, zero=2)
+{
+ if(mode(lmean) != "character" && mode(lmean) != "name")
+ lmean = as.character(substitute(lmean))
+ if(mode(lsigma) != "character" && mode(lsigma) != "name")
+ lsigma = as.character(substitute(lsigma))
+ if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
+ max(zero) > 2))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument \"d\"")
+
+ new("vglmff",
+ blurb=c("Short-tailed symmetric [Tiku and Vaughan (1999)] distribution\n",
+ "Link: ",
+ namesof("mean", lmean), ", ",
+ namesof("sigma", lsigma),
+ "\n",
+ "\n",
+ "Mean: mean"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("the response must be a vector or one-column matrix")
+ predictors.names = c(namesof("mean", .lmean, tag= FALSE),
+ namesof("sigma", .lsigma, tag= FALSE))
+ if(!length(etastart)) {
+ sigma.init = if(length(.isigma)) rep(.isigma, length=n) else {
+ hh = 2 - .d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ K2 = 1 + 3/hh + 15/(4*hh^2)
+ rep(sqrt(var(y) / (KK*K2)), len=n)
+ }
+ mean.init = rep(weighted.mean(y, w), len=n)
+ etastart = cbind(theta2eta(mean.init, .lmean),
+ theta2eta(sigma.init, .lsigma))
+ }
+ }),list( .lmean=lmean, .lsigma=lsigma, .isigma=isigma, .d=d ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmean)
+ }, list( .lmean=lmean ))),
+ last=eval(substitute(expression({
+ misc$link = c("mean"= .lmean, "sigma"= .lsigma)
+ misc$expected = TRUE
+ misc$d = .d
+ }), list( .lmean=lmean, .lsigma=lsigma, .d=d ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmean)
+ sigma = eta2theta(eta[,2], .lsigma)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ zedd = (y - mymu) / sigma
+ hh = 2 - .d
+ sum(w * (-log(sigma) + 2 * log(1 + 0.5*zedd^2 / hh) - 0.5*zedd^2))
+ }
+ }, list( .lmean=lmean, .lsigma=lsigma, .d=d ))),
+ vfamily=c("tikuv"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean)
+ sigma = eta2theta(eta[,2], .lsigma)
+ dmu.deta = dtheta.deta(mymu, .lmean)
+ dsigma.deta = dtheta.deta(sigma, .lsigma)
+ zedd = (y - mymu) / sigma
+ hh = 2 - .d
+ gzedd = zedd / (1 + 0.5*zedd^2 / hh)
+ dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
+ dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
+ w * cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
+ }), list( .lmean=lmean, .lsigma=lsigma, .d=d ))),
+ weight=eval(substitute(expression({
+ ayy = 1 / (2*hh)
+ Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
+ Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
+ ed2l.dmymu2 = Dnos / sigma^2
+ ed2l.dnu2 = Dstar / sigma^2
+ wz = matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dnu2 * dsigma.deta^2
+ w * wz
+ }), list( .lmean=lmean, .lsigma=lsigma ))))
+}
+
+
+dtikuv = function(x, d, mean=0, sigma=1) {
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument \"d\"")
+ L = max(length(x), length(mean), length(sigma))
+ x = rep(x, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
+ hh = 2 - d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ dnorm(x=x, mean=mean, sd=sigma) * KK * (1 + ((x-mean)/sigma)^2 / (2*hh))^2
+}
+
+
+ptikuv = function(q, d, mean=0, sigma=1) {
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument \"d\"")
+ L = max(length(q), length(mean), length(sigma))
+ q = rep(q, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
+ zedd1 = 0.5 * ((q - mean) / sigma)^2
+ ans = q*0 + 0.5
+ hh = 2 - d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ if(any(lhs <- q < mean)) {
+ ans[lhs] = ( KK/(2*sqrt(pi))) * (
+ gamma(0.5) * (1 - pgamma(zedd1[lhs], 0.5)) +
+ 2 * gamma(1.5) * (1 - pgamma(zedd1[lhs], 1.5)) / hh +
+ gamma(2.5) * (1 - pgamma(zedd1[lhs], 2.5)) / hh^2)
+ }
+ if(any(rhs <- q > mean)) {
+ ans[rhs] = 1.0 - Recall(q=(2*mean[rhs]-q[rhs]), d=d,
+ mean=mean[rhs], sigma=sigma[rhs])
+ }
+ ans
+}
+
+
+qtikuv = function(p, d, mean=0, sigma=1, ...) {
+ if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument \"d\"")
+ if(!is.Numeric(mean))
+ stop("bad input for argument \"mean\"")
+ if(!is.Numeric(sigma))
+ stop("bad input for argument \"sigma\"")
+ L = max(length(p), length(mean), length(sigma))
+ p = rep(p, len=L); mean = rep(mean, len=L); sigma = rep(sigma, len=L);
+ ans = rep(0.0, len=L)
+ myfun = function(x, d, mean=0, sigma=1, p)
+ ptikuv(q=x, d=d, mean=mean, sigma=sigma) - p
+ for(i in 1:L) {
+ Lower = ifelse(p[i] <= 0.5, mean[i] - 3 * sigma[i], mean[i])
+ while(ptikuv(q=Lower, d=d, mean=mean[i], sigma=sigma[i]) > p[i])
+ Lower = Lower - sigma[i]
+ Upper = ifelse(p[i] >= 0.5, mean[i] + 3 * sigma[i], mean[i])
+ while(ptikuv(q=Upper, d=d, mean=mean[i], sigma=sigma[i]) < p[i])
+ Upper = Upper + sigma[i]
+ ans[i] = uniroot(f=myfun, lower=Lower, upper=Upper,
+ d=d, mean=mean[i], sigma=sigma[i], p=p[i], ...)$root
+ }
+ ans
+}
+
+
+rtikuv = function(n, d, mean=0, sigma=1, Smallno=1.0e-6) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(d, allow=1) || max(d) >= 2)
+ stop("bad input for argument \"d\"")
+ if(!is.Numeric(mean, allow=1))
+ stop("bad input for argument \"mean\"")
+ if(!is.Numeric(sigma, allow=1))
+ stop("bad input for argument \"sigma\"")
+ if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument \"Smallno\"")
+ ans = rep(0.0, len=n)
+
+ ptr1 = 1; ptr2 = 0
+ hh = 2 - d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ ymax = ifelse(hh < 2,
+ dtikuv(x=mean + sigma*sqrt(4 - 2*hh), d=d, m=mean, s=sigma),
+ KK / (sqrt(2 * pi) * sigma))
+ while(ptr2 < n) {
+ Lower = mean - 5 * sigma
+ while(ptikuv(q=Lower, d=d, mean=mean, sigma=sigma) > Smallno)
+ Lower = Lower - sigma
+ Upper = mean + 5 * sigma
+ while(ptikuv(q=Upper, d=d, mean=mean, sigma=sigma) < 1-Smallno)
+ Upper = Upper + sigma
+ x = runif(2*n, min=Lower, max=Upper)
+ index = runif(2*n, max=ymax) < dtikuv(x,d=d,m=mean,s=sigma)
+ sindex = sum(index)
+ if(sindex) {
+ ptr2 = min(n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
+ }
+ }
+ ans
+}
+
+
+
diff --git a/R/family.positive.q b/R/family.positive.q
new file mode 100644
index 0000000..78519b4
--- /dev/null
+++ b/R/family.positive.q
@@ -0,0 +1,359 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+posnegbinomial = function(lmunb = "loge", lk = "loge",
+ ik = NULL, zero = -2, cutoff = 0.995,
+ method.init=1)
+{
+ if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument \"cutoff\"")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
+ if(length(ik) && !is.Numeric(ik, posit=TRUE))
+ stop("bad input for argument \"ik\"")
+
+ if(mode(lmunb) != "character" && mode(lmunb) != "name")
+ lmunb = as.character(substitute(lmunb))
+ if(mode(lk) != "character" && mode(lk) != "name")
+ lk = as.character(substitute(lk))
+
+ new("vglmff",
+ blurb=c("Positive-negative binomial distribution\n\n",
+ "Links: ",
+ namesof("munb", lmunb), ", ",
+ namesof("k", lk), "\n",
+ "Mean: munb / (1 - (k/(k+munb))^k)"),
+ constraints=eval(substitute(expression({
+ temp752 = .zero
+ if(length(temp752) && all(temp752 == -2))
+ temp752 = 2*(1:ncol(y))
+ constraints = cm.zero.vgam(constraints, x, temp752, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(any(y==0)) stop("there are zero values in the response")
+ y = as.matrix(y)
+ M = 2 * ncol(y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ predictors.names = c(namesof(if(NOS==1) "munb" else
+ paste("munb", 1:NOS, sep=""), .lmunb, tag= FALSE),
+ namesof(if(NOS==1) "k" else paste("k", 1:NOS, sep=""),
+ .lk, tag= FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+ if(!length(etastart)) {
+ if( .method.init == 3) {
+ mu.init = y + 1/8
+ } else {
+ mu.init = y
+ for(iii in 1:ncol(y))
+ mu.init[,iii] = if( .method.init == 2)
+ weighted.mean(y[,iii], w=w) else
+ median(rep(y[,iii], w)) + 1/8
+ }
+ if( is.Numeric( .ik )) {
+ kmat0 = matrix( .ik, nr=n, nc=NOS, byrow=TRUE)
+ } else {
+ kmat0 = matrix(0, nr=n, nc=NOS)
+ Loglikfun = function(y, munb, kmat, w) {
+ p0 = (kmat / (kmat + munb))^kmat
+ sum(w * (y * log(munb/(munb+kmat)) + kmat*log(kmat/(munb+kmat)) +
+ lgamma(y+kmat) - lgamma(kmat) - lgamma(y+1) -
+ (if(is.R()) log1p(-p0) else log(1 - p0)))) }
+
+ k.grid = rvar = 2^((-3):6)
+ for(spp. in 1:NOS) {
+ for(ii in 1:length(k.grid))
+ rvar[ii] = Loglikfun(y=y[,spp.], mu=mu.init[,spp.],
+ kmat=k.grid[ii], w=w)
+ try.this = k.grid[rvar == max(rvar)]
+ kmat0[,spp.] = try.this
+ }
+ }
+ p00 = (kmat0 / (kmat0 + mu.init))^kmat0
+ etastart = cbind(theta2eta(mu.init*(1-p00), .lmunb),
+ theta2eta(kmat0, .lk))
+ etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
+ }
+ }), list( .lmunb=lmunb, .lk=lk, .ik=ik, .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk)
+ p0 = (kmat / (kmat + munb))^kmat
+ munb / (1 - p0)
+ }, list( .lk=lk, .lmunb=lmunb ))),
+ last=eval(substitute(expression({
+ temp0303 = c(rep(.lmunb, length=NOS), rep(.lk, length=NOS))
+ names(temp0303) = c(if(NOS==1) "munb" else paste("munb", 1:NOS, sep=""),
+ if(NOS==1) "k" else paste("k", 1:NOS, sep=""))
+ temp0303 = temp0303[interleave.VGAM(M, M=2)]
+ misc$link = temp0303 # Already named
+ misc$cutoff = .cutoff
+ misc$method.init = .method.init
+ }), list( .lmunb=lmunb, .lk=lk, .cutoff=cutoff,
+ .method.init=method.init ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk)
+ p0 = (kmat / (kmat + munb))^kmat
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (y * log(munb/(munb+kmat)) + kmat*log(kmat/(munb+kmat)) +
+ lgamma(y+kmat) - lgamma(kmat) - lgamma(y+1) -
+ (if(is.R()) log1p(-p0) else log(1 - p0))))
+ }, list( .lmunb=lmunb, .lk=lk ))),
+ vfamily=c("posnegbinomial"),
+ deriv=eval(substitute(expression({
+ NOS= extra$NOS
+ munb = eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lk)
+ d3 = deriv3(~ -log(1 - (kmat. /(kmat. + munb. ))^kmat. ),
+ c("munb.", "kmat."), hessian= TRUE) # Extra term
+ dl0.dthetas = array(NA, c(n, NOS, 2))
+ d2l0.dthetas2 = array(NA, c(n, NOS, 3)) # matrix-band format
+ for(spp. in 1:NOS) {
+ kmat. = kmat[,spp.]
+ munb. = munb[,spp.]
+ eval.d3 = eval(d3) # Evaluated for one species
+ dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[,1]
+ dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[,2]
+ d2l0.dthetas2[,spp.,1] = attr(eval.d3, "hessian")[,1,1]
+ d2l0.dthetas2[,spp.,2] = attr(eval.d3, "hessian")[,2,2]
+ d2l0.dthetas2[,spp.,3] = attr(eval.d3, "hessian")[,1,2]
+ }
+
+ NOS = ncol(eta) / 2
+ dl.dmunb = y/munb - (y+kmat)/(kmat+munb) + dl0.dthetas[,,1]
+ dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(munb+kmat) + 1 +
+ log(kmat/(kmat+munb)) + dl0.dthetas[,,2]
+ dmunb.deta = dtheta.deta(munb, .lmunb)
+ dk.deta = dtheta.deta(kmat, .lk)
+ myderiv = w * cbind(dl.dmunb * dmunb.deta, dl.dk * dk.deta)
+ myderiv[,interleave.VGAM(M, M=2)]
+ }), list( .lmunb=lmunb, .lk=lk ))),
+ weight=eval(substitute(expression({
+ wz = matrix(0, n, 4*NOS-1) # wz is no longer 'diagonal'
+ p0 = (kmat / (kmat + munb))^kmat
+ ed2l.dmunb2 = (1/munb - (munb + kmat*(1-p0))/(munb+kmat)^2) / (1-p0) -
+ d2l0.dthetas2[,,1]
+ fred = dotFortran(name="enbin8",
+ ans=double(n*NOS),
+ as.double(kmat),
+ as.double(kmat/(munb+kmat)),
+ as.double(.cutoff),
+ as.integer(n), ok=as.integer(1), as.integer(NOS),
+ sumpdf=double(1), macheps=as.double(.Machine$double.eps))
+ if(fred$ok != 1)
+ stop("error in Fortran subroutine exnbin")
+ dim(fred$ans) = c(n, NOS)
+ ed2l.dk2 = -fred$ans/(1-p0) - 1/kmat + 1/(kmat+munb) -
+ munb * p0 / ((1-p0)*(munb+kmat)^2) - d2l0.dthetas2[,,2]
+ wz[,2*(1:NOS)-1] = dmunb.deta^2 * ed2l.dmunb2
+ wz[,2*(1:NOS)] = dk.deta^2 * ed2l.dk2
+ wz[,2*NOS+2*(1:NOS)-1] = -d2l0.dthetas2[,,3] * dmunb.deta * dk.deta
+ w * wz
+ }), list( .cutoff=cutoff ))))
+}
+
+
+
+dpospois = function(x, lambda) {
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ L = max(length(x), length(lambda))
+ x = rep(x, len=L); lambda = rep(lambda, len=L);
+ ans = ifelse(x==0, 0, dpois(x, lambda) / (1 - exp(-lambda)))
+ ans
+}
+
+
+ppospois = function(q, lambda) {
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ L = max(length(q), length(lambda))
+ q = rep(q, len=L); lambda = rep(lambda, len=L);
+ ifelse(q<1, 0, (ppois(q, lambda) - exp(-lambda)) / (1 - exp(-lambda)))
+}
+
+qpospois = function(p, lambda) {
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("bad input for argument \"p\"")
+ qpois(p * (1 - exp(-lambda)) + exp(-lambda), lambda)
+}
+
+
+rpospois = function(n, lambda) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ ans = rpois(n, lambda)
+ lambda = rep(lambda, len=n)
+ index = ans == 0
+ while(any(index)) {
+ more = rpois(sum(index), lambda[index])
+ ans[index] = more
+ index = ans == 0
+ }
+ ans
+}
+
+
+
+
+pospoisson = function(link="loge")
+{
+ if(!missing(link))
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Positive-Poisson distribution\n\n",
+ "Links: ",
+ namesof("lambda", link, tag=FALSE),
+ "\n"),
+ initialize=eval(substitute(expression({
+ y = as.matrix(y)
+ predictors.names = namesof(if(ncol(y)==1) "lambda"
+ else paste("lambda", 1:ncol(y), sep=""), .link, tag=FALSE)
+ if(!length(etastart))
+ etastart = theta2eta(y / (1-exp(-y)), .link)
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ lambda = eta2theta(eta, .link)
+ lambda / (1-exp(-lambda))
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(lambda = .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ lambda = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(1-exp(-lambda)) - lambda + y*log(lambda)))
+ }, list( .link=link ))),
+ vfamily=c("pospoisson"),
+ deriv=eval(substitute(expression({
+ lambda = eta2theta(eta, .link)
+ dl.dlambda = y/lambda - 1 - 1/(exp(lambda)-1)
+ dlambda.deta = dtheta.deta(lambda, .link)
+ w * dl.dlambda * dlambda.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ temp = exp(lambda)
+ ed2l.dlambda2 = -temp * (1/lambda - 1/(temp-1)) / (temp-1)
+ wz = -w * (dlambda.deta^2) * ed2l.dlambda2
+ wz
+ }), list( .link=link ))))
+}
+
+
+
+posbinomial = function(link="logit")
+{
+
+ if(!missing(link))
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Positive-Binomial distribution\n\n",
+ "Links: ",
+ namesof("p", link, tag=FALSE), "\n"),
+ initialize=eval(substitute(expression({
+ eval(binomialff(link= .link)@initialize)
+ predictors.names = namesof("p", .link, tag = FALSE)
+ if(length(extra)) extra$w = w else extra = list(w=w)
+ if(!length(etastart))
+ etastart = cbind(theta2eta(mustart, .link))
+ }), list( .link = link ))),
+ inverse=eval(substitute(function(eta, extra=NULL){
+ theta = eta2theta(eta, .link)
+ theta/(1-(1-theta)^(extra$w))},
+ list(.link=link ))),
+ last=eval(substitute(expression({
+ extra$w = NULL # Kill it off
+ misc$link = c(p = .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE,eta,extra=NULL) {
+ yi = round(y*w)
+ theta = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(yi*log(theta)+(w-yi)*log(1-theta)-log(1-(1-theta)^w))
+ }, list( .link=link ))),
+ vfamily=c("posbinomial"),
+ deriv=eval(substitute(expression({
+ yi = round(y*w)
+ theta = eta2theta(eta, .link)
+ dldtheta = yi/theta-(w-yi)/(1-theta)-w*(1-theta)^(w-1) /
+ (1-(1-theta)^w)
+ dthetadeta = dtheta.deta(theta, .link)
+ dldtheta * dthetadeta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ temp = 1 - (1-theta)^w
+ temp2 = (1-theta)^2
+ ed2ldtheta2 = -w/(theta*temp) - w/temp2 + w*theta/(temp2*temp) +
+ w*(w-1)* (1-theta)^(w-2) /temp +
+ w^2 * temp2^(w-1) / temp^2
+ wz = -(dthetadeta^2) * ed2ldtheta2
+ wz
+ }), list( .link=link ))))
+}
+
+
+
+dposbinom = function(x, size, prob, log = FALSE) {
+ if(!is.Numeric(prob, positive=TRUE))
+ stop("no zero or non-numeric values allowed for argument \"prob\"")
+ L = max(length(x), length(size), length(prob))
+ x = rep(x, len=L); size = rep(size, len=L); prob = rep(prob, len=L);
+ ifelse(x==0, 0, dbinom(x=x, size=size, prob=prob, log=log) /
+ (1 - (1-prob)^size))
+}
+
+
+pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
+ if(!is.Numeric(prob, positive=TRUE))
+ stop("no zero or non-numeric values allowed for argument \"prob\"")
+ L = max(length(q), length(size), length(prob))
+ q = rep(q, len=L); size = rep(size, len=L); prob = rep(prob, len=L);
+ ifelse(q<1, 0, (pbinom(q=q, size=size, prob=prob, lower.tail=lower.tail,
+ log.p=log.p) - (1-prob)^size) / (1 - (1-prob)^size))
+}
+
+qposbinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE) {
+ if(!is.Numeric(prob, positive=TRUE))
+ stop("no zero or non-numeric values allowed for argument \"prob\"")
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("bad input for argument \"p\"")
+ qbinom(p=p * (1 - (1-prob)^size) + (1-prob)^size, size=size, prob=prob,
+ lower.tail=lower.tail, log.p=log.p)
+}
+
+
+rposbinom = function(n, size, prob) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(prob, positive=TRUE))
+ stop("no zero or non-numeric values allowed for argument \"prob\"")
+ ans = rbinom(n=n, size=size, prob=prob)
+ index = ans == 0
+ size = rep(size, len=length(ans))
+ prob = rep(prob, len=length(ans))
+ while(any(index)) {
+ more = rbinom(n=sum(index), size[index], prob=prob[index])
+ ans[index] = more
+ index = ans == 0
+ }
+ ans
+}
+
+
+
+
diff --git a/R/family.qreg.q b/R/family.qreg.q
new file mode 100644
index 0000000..a813294
--- /dev/null
+++ b/R/family.qreg.q
@@ -0,0 +1,909 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+lms.bcn.control <-
+lms.bcg.control <-
+lms.yjn.control <- function(trace=TRUE, ...)
+ list(trace=trace)
+
+
+
+
+
+lms.bcn <- function(percentiles=c(25,50,75),
+ zero=NULL,
+ link.sigma="loge",
+ link.mu="identity",
+ dfmu.init=4,
+ dfsigma.init=2,
+ init.lambda=1,
+ init.sigma=NULL)
+{
+ if(mode(link.sigma) != "character" && mode(link.sigma) != "name")
+ link.sigma <- as.character(substitute(link.sigma))
+ if(mode(link.mu) != "character" && mode(link.mu) != "name")
+ link.mu <- as.character(substitute(link.mu))
+
+ new("vglmff",
+ blurb=c("LMS Quantile Regression (Box-Cox transformation to normality)\n",
+ "Links: ",
+ "lambda",
+ ", ",
+ namesof("mu", link=link.mu),
+ ", ",
+ namesof("sigma", link=link.sigma)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ if(any(y<0, na.rm = TRUE))
+ stop("negative responses not allowed")
+
+ predictors.names <- c("lambda",
+ namesof("mu", .link.mu, short= TRUE),
+ namesof("sigma", .link.sigma, short= TRUE))
+
+ if(!length(etastart)) {
+
+ fit500=vsmooth.spline(x=x[,min(ncol(x),2)],y=y,w=w, df= .dfmu.init)
+ fv.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+
+ lambda.init = if(is.Numeric( .init.lambda)) .init.lambda else 1.0
+ sigma.init <- if(is.null(.init.sigma)) {
+ myratio <- ((y/fv.init)^lambda.init - 1) / lambda.init
+ if(is.Numeric( .dfsigma.init)) {
+ fit600 = vsmooth.spline(x=x[,min(ncol(x),2)], y=myratio^2,
+ w=w, df= .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x=x[,min(ncol(x),2)])$y)))
+ } else
+ sqrt(var(myratio))
+ } else .init.sigma
+
+ etastart <- cbind(lambda.init,
+ theta2eta(fv.init, .link.mu),
+ theta2eta(sigma.init, .link.sigma))
+ }
+ }), list(.link.sigma=link.sigma,
+ .link.mu=link.mu,
+ .dfmu.init=dfmu.init,
+ .dfsigma.init=dfsigma.init,
+ .init.lambda=init.lambda,
+ .init.sigma=init.sigma))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta[,2] <- eta2theta(eta[,2], .link.mu)
+ eta[,3] <- eta2theta(eta[,3], .link.sigma)
+ qtplot.lms.bcn(percentiles= .percentiles, eta=eta)
+ }, list(.percentiles=percentiles,
+ .link.mu=link.mu,
+ .link.sigma=link.sigma))),
+ last=eval(substitute(expression({
+ misc$percentiles <- .percentiles
+ misc$links <- c(lambda = "identity", mu = .link.mu, sigma = .link.sigma)
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ if(control$cdf) {
+ post$cdf = cdf.lms.bcn(y, eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles=percentiles,
+ .link.mu=link.mu,
+ .link.sigma=link.sigma))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
+ lambda <- eta[,1]
+ mu <- eta2theta(eta[,2], .link.mu)
+ sigma <- eta2theta(eta[,3], .link.sigma)
+ z <- ((y/mu)^lambda - 1) / (lambda * sigma)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (lambda * log(y/mu) - log(sigma) - 0.5*z^2))
+ }, list(.link.sigma=link.sigma, .link.mu=link.mu))),
+ vfamily=c("lms.bcn", "lmscreg"),
+ deriv=eval(substitute(expression({
+ lambda <- eta[,1]
+ mymu <- eta2theta(eta[,2], .link.mu)
+ sigma <- eta2theta(eta[,3], .link.sigma)
+ z <- ((y/mymu)^lambda - 1) / (lambda * sigma)
+ z2m1 <- z * z - 1
+ d1 <- z*(z - log(y/mymu) / sigma) / lambda - z2m1 * log(y/mymu)
+ d2 <- z / (mymu * sigma) + z2m1 * lambda / mymu
+ d2 <- d2 * dtheta.deta(mymu, .link.mu)
+ d3 <- z2m1 / sigma
+ d3 <- d3 * dtheta.deta(sigma, .link.sigma)
+ w * cbind(d1, d2, d3)
+ }), list(.link.sigma=link.sigma, .link.mu=link.mu))),
+ weight=eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, 6)
+ wz[,iam(1,1,M)] <- (7 * sigma^2 / 4)
+ wz[,iam(2,2,M)] <- (1 + 2*(lambda * sigma)^2) / (mymu*sigma)^2 *
+ dtheta.deta(mymu, .link.mu)^2
+ wz[,iam(3,3,M)] <- (2 / sigma^2) *
+ dtheta.deta(sigma, .link.sigma)^2
+ wz[,iam(1,2,M)] <- (-1 / (2 * mymu)) *
+ dtheta.deta(mymu, .link.mu)
+ wz[,iam(1,3,M)] <- (lambda * sigma) *
+ dtheta.deta(sigma, .link.sigma)
+ wz[,iam(2,3,M)] <- (2 * lambda / (mymu * sigma)) *
+ dtheta.deta(sigma, .link.sigma) *
+ dtheta.deta(mymu, .link.mu)
+ wz * w
+ }), list(.link.sigma=link.sigma, .link.mu=link.mu))))
+}
+
+
+
+lms.bcg <- function(percentiles=c(25,50,75),
+ zero=NULL,
+ link.sigma="loge",
+ link.mu="identity",
+ dfmu.init=4,
+ dfsigma.init=2,
+ init.lambda=1,
+ init.sigma=NULL)
+{
+ if(mode(link.sigma) != "character" && mode(link.sigma) != "name")
+ link.sigma <- as.character(substitute(link.sigma))
+ if(mode(link.mu) != "character" && mode(link.mu) != "name")
+ link.mu <- as.character(substitute(link.mu))
+
+ new("vglmff",
+ blurb=c("LMS Quantile Regression (Box-Cox transformation to a Gamma distribution)\n",
+ "Links: ",
+ "lambda",
+ ", ",
+ namesof("mu", link=link.mu),
+ ", ",
+ namesof("sigma", link=link.sigma)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ if(any(y<0, na.rm = TRUE))
+ stop("negative responses not allowed")
+
+ predictors.names <- c("lambda",
+ namesof("mu", .link.mu, short= TRUE),
+ namesof("sigma", .link.sigma, short= TRUE))
+
+ if(!length(etastart)) {
+
+ fit500=vsmooth.spline(x=x[,min(ncol(x),2)],y=y,w=w, df= .dfmu.init)
+ fv.init = c(predict(fit500, x=x[,min(ncol(x),2)])$y)
+
+ lambda.init = if(is.Numeric( .init.lambda)) .init.lambda else 1.0
+
+ sigma.init <- if(is.null(.init.sigma)) {
+ myratio=((y/fv.init)^lambda.init-1)/lambda.init #~(0,var=sigma^2)
+ if(is.numeric( .dfsigma.init) && is.finite( .dfsigma.init)) {
+ fit600 = vsmooth.spline(x=x[,min(ncol(x),2)],
+ y=(myratio)^2,
+ w=w, df= .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x=x[,min(ncol(x),2)])$y)))
+ } else
+ sqrt(var(myratio))
+ } else .init.sigma
+
+ etastart <- cbind(lambda.init,
+ theta2eta(fv.init, .link.mu),
+ theta2eta(sigma.init, .link.sigma))
+ }
+ }), list(.link.sigma=link.sigma,
+ .link.mu=link.mu,
+ .dfmu.init=dfmu.init,
+ .dfsigma.init=dfsigma.init,
+ .init.lambda=init.lambda,
+ .init.sigma=init.sigma))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta[,2] <- eta2theta(eta[,2], .link.mu)
+ eta[,3] <- eta2theta(eta[,3], .link.sigma)
+ qtplot.lms.bcg(percentiles= .percentiles, eta=eta)
+ }, list(.percentiles=percentiles,
+ .link.mu=link.mu,
+ .link.sigma=link.sigma))),
+ last=eval(substitute(expression({
+ misc$percentiles <- .percentiles
+ misc$links = c(lambda = "identity", mu = .link.mu, sigma = .link.sigma)
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ if(control$cdf) {
+ post$cdf = cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles=percentiles,
+ .link.mu=link.mu,
+ .link.sigma=link.sigma))),
+
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
+ lambda <- eta[,1]
+ mu <- eta2theta(eta[,2], .link.mu)
+ sigma <- eta2theta(eta[,3], .link.sigma)
+ g <- (y/mu)^lambda
+ theta <- 1 / (sigma * lambda)^2
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(abs(lambda)) + theta*(log(theta)+log(g)-g) -
+ lgamma(theta) - log(y)))
+ }, list(.link.sigma=link.sigma, .link.mu=link.mu))),
+ vfamily=c("lms.bcg", "lmscreg"),
+ deriv=eval(substitute(expression({
+ lambda <- eta[,1]
+ mymu <- eta2theta(eta[,2], .link.mu)
+ sigma <- eta2theta(eta[,3], .link.sigma)
+
+ g <- (y/mymu)^lambda
+ theta <- 1 / (sigma * lambda)^2
+ dd <- digamma(theta)
+
+ dl.dlambda <- (1 + 2*theta*(dd+g-1-log(theta) -
+ 0.5 * (g+1)*log(g))) / lambda
+ dl.dmu <- lambda * theta * (g-1) / mymu
+ dl.dsigma <- 2*theta*(dd+g-log(theta * g)-1) / sigma
+ dsigma.deta <- dtheta.deta(sigma, link=.link.sigma)
+
+ cbind(dl.dlambda,
+ dl.dmu * dtheta.deta(mymu, link= .link.mu),
+ dl.dsigma * dsigma.deta) * w
+ }), list(.link.sigma=link.sigma, .link.mu=link.mu))),
+ weight=eval(substitute(expression({
+ tt <- trigamma(theta)
+
+ wz <- matrix(0, n, 6)
+
+ if(TRUE) {
+ part2 <- dd + 2/theta - 2*log(theta)
+ wz[,iam(1,1,M)] <- (1 + theta*(tt*(1+4*theta) - 4*(1+1/theta) -
+ log(theta)*(2/theta - log(theta)) + dd*part2)) / lambda^2
+ } else {
+ temp <- mean( g*(log(g))^2 )
+ wz[,iam(1,1,M)] <- (4*theta*(theta*tt-1) -1+ theta*temp)/lambda^2
+ }
+
+ wz[,iam(2,2,M)] <- 1 / (mymu*sigma)^2 *
+ dtheta.deta(mymu, .link.mu)^2
+ wz[,iam(3,3,M)] <- (4*theta*(theta*tt-1) / sigma^2) *
+ dtheta.deta(sigma, .link.sigma)^2
+ wz[,iam(1,2,M)] <- -theta * (dd + 1/theta - log(theta)) / mymu
+ wz[,iam(1,2,M)] <- wz[,iam(1,2,M)] *
+ dtheta.deta(mymu, .link.mu)
+ wz[,iam(1,3,M)] <- 2 * theta^1.5 * (2 * theta * tt - 2 -
+ 1/theta) * dtheta.deta(sigma, .link.sigma)
+ wz * w
+ }), list(.link.sigma=link.sigma, .link.mu=link.mu))))
+}
+
+
+
+
+
+
+dy.dyj <- function(psi, lambda, epsilon=sqrt(.Machine$double.eps)) {
+
+ L = max(length(psi), length(lambda))
+ psi = rep(psi, len=L); lambda = rep(lambda, len=L);
+ ifelse(psi>0, (1 + psi * lambda)^(1/lambda - 1),
+ (1 - (2-lambda) * psi)^((lambda - 1)/(2-lambda)))
+}
+
+dyj.dy <- function(y, lambda) {
+ L = max(length(y), length(lambda))
+ y = rep(y, len=L); lambda = rep(lambda, len=L);
+ ifelse(y>0, (1 + y)^(lambda - 1), (1 - y)^(1 - lambda))
+}
+
+yeo.johnson <- function(y, lambda, derivative=0,
+ epsilon=sqrt(.Machine$double.eps), inverse= FALSE)
+{
+
+ if(length(derivative)!=1 || derivative<0 || round(derivative)!=derivative)
+ stop("derivative must be a non-negative integer")
+ ans <- y
+
+ if((length(epsilon) != 1) || (epsilon <= 0))
+ stop("epsilon must be a single positive number")
+ lambda <- rep(lambda, len=length(y)) # lambda may be of length 1
+
+ if(inverse) {
+ if(derivative!=0)
+ stop("derivative must 0 when inverse=TRUE")
+ index <- y >= 0 & abs(lambda) > epsilon
+ ans[index] <- (y[index]*lambda[index] + 1)^(1/lambda[index]) - 1
+ index <- y >= 0 & abs(lambda) <= epsilon
+ ans[index] <- exp(y[index]) - 1
+ index <- y < 0 & abs(lambda-2) > epsilon
+ ans[index] <- 1-(-(2-lambda[index])*y[index]+1)^(1/(2-lambda[index]))
+ index <- y < 0 & abs(lambda-2) <= epsilon
+ ans[index] <- 1 - exp(-y[index])
+ return(ans)
+ }
+ if(derivative==0) {
+ index <- y >= 0 & abs(lambda) > epsilon
+ ans[index] <- ((y[index]+1)^(lambda[index]) - 1) / lambda[index]
+ index <- y >= 0 & abs(lambda) <= epsilon
+ ans[index] <- log(y[index]+1)
+ index <- y < 0 & abs(lambda-2) > epsilon
+ ans[index] <- -((-y[index]+1)^(2-lambda[index]) - 1) / (2-lambda[index])
+ index <- y < 0 & abs(lambda-2) <= epsilon
+ ans[index] <- -log(-y[index]+1)
+ } else {
+ psi <- yeo.johnson(y, lambda, derivative=derivative-1,
+ epsilon=epsilon, inverse=inverse)
+ index <- y >= 0 & abs(lambda) > epsilon
+ ans[index] <- ( (y[index]+1)^(lambda[index]) *
+ (log(y[index]+1))^(derivative) - derivative *
+ psi[index] ) / lambda[index]
+ index <- y >= 0 & abs(lambda) <= epsilon
+ ans[index] <- (log(y[index]+1))^(derivative + 1) / (derivative + 1)
+ index <- y < 0 & abs(lambda-2) > epsilon
+ ans[index] <- -( (-y[index]+1)^(2-lambda[index]) *
+ (-log(-y[index]+1))^(derivative) - derivative *
+ psi[index] ) / (2-lambda[index])
+ index <- y < 0 & abs(lambda-2) <= epsilon
+ ans[index] <- (-log(-y[index]+1))^(derivative + 1) / (derivative + 1)
+ }
+ ans
+}
+
+
+dpsi.dlambda.yjn = function(psi, lambda, mymu, sigma, derivative=0, small=1e-8) {
+
+ answer = matrix(as.numeric(NA), length(mymu), derivative+1)
+
+ if(length(psi) < length(lambda))
+ psi = rep(psi, length=length(lambda))
+
+ CC = psi >= 0
+ BB = ifelse(CC, lambda, -2+lambda)
+ AA = 1 + psi * BB
+
+ if(derivative>0) {
+ answer[,1:derivative] =
+ Recall(psi=psi, lambda=lambda, mymu=mymu, sigma=sigma,
+ derivative=derivative-1, small=small)
+ temp8 = answer[,derivative] * derivative
+ } else {
+ temp8 = 0
+ }
+
+ answer[,1+derivative] =
+ (AA * (log(AA)/BB)^derivative -
+ temp8) / BB
+
+
+
+ pos = (CC & abs(lambda) <= small) | (!CC & abs(lambda-2) <= small)
+ if(any(pos))
+ answer[pos,1+derivative] = (answer[pos,1]^(1+derivative))/(derivative+1)
+
+
+
+ answer
+}
+
+gh.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+
+
+ if(length(derivmat)) {
+ ((derivmat[,2]/sigma)^2 + sqrt(2) * z * derivmat[,3] / sigma) / sqrt(pi)
+ } else {
+ # Long-winded way
+ psi = mymu + sqrt(2) * sigma * z
+ (1 / sqrt(pi)) *
+ (dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]^2 +
+ (psi - mymu) *
+ dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)[,3]) / sigma^2
+ }
+}
+
+gh.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ if(length(derivmat)) {
+ (-derivmat[,2]) / (sqrt(pi) * sigma^2)
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (1 / sqrt(pi)) *
+ (- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) / sigma^2
+ }
+}
+
+gh.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ if(length(derivmat)) {
+ sqrt(8 / pi) * (-derivmat[,2]) * z / sigma^2
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (1 / sqrt(pi)) *
+ (-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) *
+ (psi - mymu) / sigma^3
+ }
+}
+
+
+glag.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+
+
+ if(length(derivmat)) {
+ derivmat[,4] * (derivmat[,2]^2 + sqrt(2) * sigma * z * derivmat[,3])
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ discontinuity = -mymu / (sqrt(2) * sigma)
+ (1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
+ (1 / sqrt(pi)) *
+ (dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]^2 +
+ (psi - mymu) *
+ dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)[,3]) / sigma^2
+ }
+}
+
+glag.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ discontinuity = -mymu / (sqrt(2) * sigma)
+ if(length(derivmat)) {
+ derivmat[,4] * (-derivmat[,2])
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
+ (1 / sqrt(pi)) *
+ (- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) / sigma^2
+ }
+}
+
+glag.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ if(length(derivmat)) {
+ derivmat[,4] * (-derivmat[,2]) * sqrt(8) * z
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ discontinuity = -mymu / (sqrt(2) * sigma)
+ (1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
+ (1 / sqrt(pi)) *
+ (-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) *
+ (psi - mymu) / sigma^3
+ }
+}
+
+
+gleg.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+
+
+
+
+ if(length(derivmat)) {
+ derivmat[,4] * (derivmat[,2]^2 + sqrt(2) * sigma * z * derivmat[,3])
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (exp(-z^2) / sqrt(pi)) *
+ (dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]^2 +
+ (psi - mymu) *
+ dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)[,3]) / sigma^2
+ }
+}
+
+gleg.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ if(length(derivmat)) {
+ derivmat[,4] * (- derivmat[,2])
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (exp(-z^2) / sqrt(pi)) *
+ (- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) / sigma^2
+ }
+}
+
+gleg.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat=NULL) {
+ if(length(derivmat)) {
+ derivmat[,4] * (-derivmat[,2]) * sqrt(8) * z
+ } else {
+ psi = mymu + sqrt(2) * sigma * z
+ (exp(-z^2) / sqrt(pi)) *
+ (-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=1)[,2]) *
+ (psi - mymu) / sigma^3
+ }
+}
+
+
+lms.yjn <- function(percentiles=c(25,50,75),
+ zero=NULL,
+ link.lambda="identity",
+ link.sigma="loge",
+ dfmu.init=4,
+ dfsigma.init=2,
+ init.lambda=1.0,
+ init.sigma=NULL,
+ rule=c(10,5),
+ yoffset=NULL,
+ diagW=FALSE, iters.diagW=6)
+{
+
+
+
+ if(mode(link.sigma) != "character" && mode(link.sigma) != "name")
+ link.sigma <- as.character(substitute(link.sigma))
+ if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
+ link.lambda <- as.character(substitute(link.lambda))
+
+ rule = rule[1] # Number of points (common) for all the quadrature schemes
+ if(rule != 5 && rule != 10)
+ stop("only rule=5 or 10 is supported")
+
+ new("vglmff",
+ blurb=c("LMS Quantile Regression (Yeo-Johnson transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link=link.lambda),
+ ", mu, ",
+ namesof("sigma", link=link.sigma)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ predictors.names <- c(namesof("lambda", .link.lambda, short= TRUE),
+ "mu", namesof("sigma", .link.sigma, short= TRUE))
+
+ y.save <- y
+ yoff = if(is.Numeric( .yoffset)) .yoffset else -median(y)
+ extra$yoffset = yoff
+ y <- y + yoff
+
+ if(!length(etastart)) {
+
+ lambda.init = if(is.Numeric( .init.lambda)) .init.lambda else 1.0
+
+ y.tx = yeo.johnson(y, lambda.init)
+ fit700=vsmooth.spline(x=x[,min(ncol(x),2)],y=y.tx,w=w, df= .dfmu.init)
+ fv.init = c(predict(fit700, x=x[,min(ncol(x),2)])$y)
+
+ sigma.init = if(!is.Numeric(.init.sigma)) {
+ if(is.Numeric( .dfsigma.init)) {
+ fit710 = vsmooth.spline(x=x[,min(ncol(x),2)],
+ y=(y.tx - fv.init)^2,
+ w=w, df= .dfsigma.init)
+ sqrt(c(abs(predict(fit710,
+ x=x[,min(ncol(x),2)])$y)))
+ } else {
+ sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
+ }
+ } else
+ .init.sigma
+
+ etastart <- cbind(theta2eta(lambda.init, .link.lambda),
+ fv.init,
+ theta2eta(sigma.init, .link.sigma))
+
+ }
+ }), list(.link.sigma=link.sigma,
+ .link.lambda=link.lambda,
+ .dfmu.init=dfmu.init,
+ .dfsigma.init=dfsigma.init,
+ .init.lambda=init.lambda,
+ .yoffset=yoffset,
+ .init.sigma=init.sigma))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta[,1] = eta2theta(eta[,1], .link.lambda)
+ eta[,3] = eta2theta(eta[,3], .link.sigma)
+ qtplot.lms.yjn(percentiles= .percentiles, eta=eta, yoffset= extra$yoff)
+ }, list(.percentiles=percentiles,
+ .link.lambda=link.lambda,
+ .link.sigma=link.sigma))),
+ last=eval(substitute(expression({
+ misc$percentiles <- .percentiles
+ misc$links = c(lambda= .link.lambda, mu= "identity", sigma= .link.sigma)
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ misc[["yoffset"]] = extra$yoff # zz Splus6.0 bug: sometimes the name is lost
+
+ y <- y.save # Restore back the value; to be attached to object
+
+ if(control$cdf) {
+ post$cdf = cdf.lms.yjn(y + misc$yoffset,
+ eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles=percentiles,
+ .link.lambda=link.lambda,
+ .link.sigma=link.sigma))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
+ lambda <- eta2theta(eta[,1], .link.lambda)
+ mu <- eta[,2]
+ sigma <- eta2theta(eta[,3], .link.sigma)
+ psi <- yeo.johnson(y, lambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
+ (lambda-1) * sign(y) * log(abs(y)+1)))
+ }, list(.link.sigma=link.sigma, .link.lambda=link.lambda))),
+ vfamily=c("lms.yjn", "lmscreg"),
+ deriv=eval(substitute(expression({
+ lambda <- eta2theta(eta[,1], .link.lambda)
+ mymu <- eta[,2]
+ sigma <- eta2theta(eta[,3], .link.sigma)
+
+ psi <- yeo.johnson(y, lambda)
+ d1 <- yeo.johnson(y, lambda, deriv=1)
+ AA = (psi - mymu) / sigma
+
+ dl.dlambda <- -AA * d1 /sigma + sign(y) * log(abs(y)+1)
+ dl.dmu <- AA / sigma
+ dl.dsigma <- (AA^2 -1) / sigma
+ dlambda.deta <- dtheta.deta(lambda, link=.link.lambda)
+ dsigma.deta <- dtheta.deta(sigma, link=.link.sigma)
+
+ cbind(dl.dlambda * dlambda.deta,
+ dl.dmu,
+ dl.dsigma * dsigma.deta) * w
+ }), list(.link.sigma=link.sigma, .link.lambda=link.lambda))),
+ weight=eval(substitute(expression({
+ wz <- matrix(0, n, 6)
+
+
+ wz[,iam(2,2,M)] = 1 / sigma^2
+ wz[,iam(3,3,M)] = 2 * wz[,iam(2,2,M)] # 2 / sigma^2
+
+
+ if(.rule == 10) {
+ glag.abs=c(0.13779347054,0.729454549503,1.80834290174,3.40143369785,
+ 5.55249614006,8.33015274676,11.8437858379,16.2792578314,
+ 21.996585812, 29.9206970123)
+ glag.wts = c(0.308441115765, 0.401119929155, 0.218068287612,
+ 0.0620874560987, 0.00950151697517, 0.000753008388588,
+ 2.82592334963e-5,
+ 4.24931398502e-7, 1.83956482398e-9, 9.91182721958e-13)
+ } else {
+ glag.abs = c(0.2635603197180449, 1.4134030591060496, 3.5964257710396850,
+ 7.0858100058570503, 12.6408008442729685)
+ glag.wts=c(5.217556105826727e-01,3.986668110832433e-01,7.594244968176882e-02,
+ 3.611758679927785e-03, 2.336997238583738e-05)
+ }
+
+ if(.rule == 10) {
+ sgh.abs = c(0.03873852801690856, 0.19823332465268367, 0.46520116404433082,
+ 0.81686197962535023, 1.23454146277833154, 1.70679833036403172,
+ 2.22994030591819214, 2.80910399394755972, 3.46387269067033854,
+ 4.25536209637269280)
+ sgh.wts=c(9.855210713854302e-02,2.086780884700499e-01,2.520517066468666e-01,
+ 1.986843323208932e-01,9.719839905023238e-02,2.702440190640464e-02,
+ 3.804646170194185e-03, 2.288859354675587e-04, 4.345336765471935e-06,
+ 1.247734096219375e-08)
+ } else {
+ sgh.abs = c(0.1002421519682381, 0.4828139660462573, 1.0609498215257607,
+ 1.7797294185202606, 2.6697603560875995)
+ sgh.wts=c(0.2484061520284881475,0.3923310666523834311,0.2114181930760276606,
+ 0.0332466603513424663, 0.0008248533445158026)
+ }
+
+ if(.rule == 10) {
+ gleg.abs = c(-0.973906528517, -0.865063366689, -0.679409568299,
+ -0.433395394129, -0.148874338982)
+ gleg.abs = c(gleg.abs, rev(-gleg.abs))
+ gleg.wts = c(0.0666713443087, 0.149451349151, 0.219086362516,
+ 0.26926671931, 0.295524224715)
+ gleg.wts = c(gleg.wts, rev(gleg.wts))
+ } else {
+ gleg.abs = c(-0.9061798459386643,-0.5384693101056820, 0,
+ 0.5384693101056828, 0.9061798459386635)
+ gleg.wts=c(0.2369268850561853,0.4786286704993680,0.5688888888888889,
+ 0.4786286704993661, 0.2369268850561916)
+ }
+
+
+ discontinuity = -mymu/(sqrt(2)*sigma) # Needs to be near 0, eg within 4
+
+
+ LL = pmin(discontinuity, 0)
+ UU = pmax(discontinuity, 0)
+ if(FALSE) {
+ AA = (UU-LL)/2
+ for(kk in 1:length(gleg.wts)) {
+ temp1 = AA * gleg.wts[kk]
+ abscissae = (UU+LL)/2 + AA * gleg.abs[kk]
+ psi = mymu + sqrt(2) * sigma * abscissae
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)
+ temp9 = cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
+
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp1 *
+ gleg.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + temp1 *
+ gleg.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + temp1 *
+ gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
+ } else {
+ temp9 = dotFortran(name="yjngintf", as.double(LL), as.double(UU),
+ as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
+ as.integer(length(gleg.abs)), as.double(lambda),
+ as.double(mymu), as.double(sigma), answer=double(3*n),
+ eps=as.double(1.0e-5))$ans #zz adjust eps for more accuracy
+ dim(temp9) = c(3,n)
+ wz[,iam(1,1,M)] = temp9[1,]
+ wz[,iam(1,2,M)] = temp9[2,]
+ wz[,iam(1,3,M)] = temp9[3,]
+ }
+
+
+
+ for(kk in 1:length(sgh.wts)) {
+
+ abscissae = sign(-discontinuity) * sgh.abs[kk]
+ psi = mymu + sqrt(2) * sigma * abscissae # abscissae = z
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + sgh.wts[kk] *
+ gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + sgh.wts[kk] *
+ gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + sgh.wts[kk] *
+ gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
+
+ temp1 = exp(-discontinuity^2)
+ for(kk in 1:length(glag.wts)) {
+ abscissae = sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
+ psi = mymu + sqrt(2) * sigma * abscissae
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative=2)
+ temp9 = cbind(temp9,
+ 1 / (2 * sqrt((abscissae-discontinuity^2)^2 + discontinuity^2) *
+ sqrt(pi) * sigma^2))
+ temp7 = temp1 * glag.wts[kk]
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 *
+ glag.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + temp7 *
+ glag.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + temp7 *
+ glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
+
+ wz[,iam(1,1,M)] <- wz[,iam(1,1,M)] * dlambda.deta^2
+ wz[,iam(1,2,M)] <- wz[,iam(1,2,M)] * dlambda.deta
+ wz[,iam(1,3,M)] <- wz[,iam(1,3,M)] * dsigma.deta * dlambda.deta
+ if( .diagW && iter <= .iters.diagW) {
+ wz[,iam(1,2,M)] = wz[,iam(1,3,M)] = 0
+ }
+ wz[,iam(2,3,M)] <- wz[,iam(2,3,M)] * dsigma.deta
+ wz[,iam(3,3,M)] <- wz[,iam(3,3,M)] * dsigma.deta^2
+
+ wz = wz * w
+ wz
+ }), list(.link.sigma=link.sigma,
+ .rule=rule,
+ .diagW=diagW,
+ .iters.diagW=iters.diagW,
+ .link.lambda=link.lambda))))
+}
+
+
+
+lmscreg.control <- function(cdf= TRUE, at.arg=NULL, x0=NULL, ...)
+{
+
+ if(!is.logical(cdf)) {
+ warning("\"cdf\" is not logical; using TRUE instead")
+ cdf = T
+ }
+ list(cdf=cdf, at.arg=at.arg, x0=x0)
+}
+
+
+
+
+
+if(FALSE)
+lms.yjn1 = function(percentiles=c(25,50,75),
+ zero=NULL,
+ link.lambda="identity",
+ dfmu.init=4,
+ dfsigma.init=2,
+ init.lambda=1.0,
+ yoffset=NULL)
+{
+ if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
+ link.lambda <- as.character(substitute(link.lambda))
+
+ new("vglmff",
+ blurb=c("LMS Quantile Regression (Yeo-Johnson transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link=link.lambda)),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero=zero))),
+ initialize=eval(substitute(expression({
+ predictors.names <- c(namesof("lambda", .link.lambda, short= TRUE))
+
+ y.save <- y
+ yoff = if(is.Numeric( .yoffset)) .yoffset else -median(y)
+ extra$yoffset = yoff
+ y <- y + yoff
+
+ if(!length(etastart)) {
+
+ lambda.init = if(is.Numeric( .init.lambda)) .init.lambda else 1.0
+
+ y.tx = yeo.johnson(y, lambda.init)
+ fit700=vsmooth.spline(x=x[,min(ncol(x),2)],y=y.tx,w=w, df= .dfmu.init)
+ fv.init = c(predict(fit700, x=x[,min(ncol(x),2)])$y)
+ extra$mymu = fv.init
+
+ sigma.init = if(TRUE) {
+ if(is.Numeric( .dfsigma.init)) {
+ fit710 = vsmooth.spline(x=x[,min(ncol(x),2)],
+ y=(y.tx - fv.init)^2,
+ w=w, df= .dfsigma.init)
+ sqrt(c(abs(predict(fit710,
+ x=x[,min(ncol(x),2)])$y)))
+ } else {
+ sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
+ }
+ } else
+ 1
+ extra$sigma = sigma.init
+
+ etastart <- cbind(theta2eta(lambda.init, .link.lambda))
+ }
+ }), list(.link.lambda=link.lambda,
+ .dfmu.init=dfmu.init,
+ .dfsigma.init=dfsigma.init,
+ .init.lambda=init.lambda,
+ .yoffset=yoffset,
+ ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta = eta2theta(eta, .link.lambda)
+ eta2 = extra$mymu
+ eta3 = extra$sigma
+ qtplot.lms.yjn(percentiles= .percentiles,
+ eta=cbind(c(eta),eta2,eta3), yoffset= extra$yoff)
+ }, list(.percentiles=percentiles,
+ .link.lambda=link.lambda))),
+ last=eval(substitute(expression({
+ misc$percentiles <- .percentiles
+ misc$links <- c(lambda = .link.lambda)
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ misc[["yoffset"]] = extra$yoff # zz Splus6.0 bug: sometimes the name is lost
+
+ y <- y.save # Restore back the value; to be attached to object
+
+ if(control$cdf) {
+ post$cdf = cdf.lms.yjn(y + misc$yoffset,
+ eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames=list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles=percentiles,
+ .link.lambda=link.lambda))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w, residuals= FALSE, eta, extra=NULL) {
+ lambda <- eta2theta(eta, .link.lambda)
+ mu <- extra$mymu
+ sigma <- extra$sigma
+ psi <- yeo.johnson(y, lambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2))
+ }, list(.link.lambda=link.lambda))),
+ vfamily=c("lms.yjn", "lmscreg"),
+ deriv=eval(substitute(expression({
+ lambda <- eta2theta(eta, .link.lambda)
+ psi <- yeo.johnson(y, lambda)
+
+ fit8 <- vsmooth.spline(x=x[,min(ncol(x),2)],y=psi,w=w, df= .dfmu.init)
+ mymu = c(predict(fit8, x=x[,min(ncol(x),2)])$y)
+ extra$mymu = mymu
+ fit8 <- vsmooth.spline(x=x[,min(ncol(x),2)],y=(psi-mymu)^2,w=w,
+ df= .dfsigma.init)
+ sigma = sqrt(c(predict(fit8, x=x[,min(ncol(x),2)])$y))
+ extra$sigma = sigma
+
+ d1 <- yeo.johnson(y, lambda, deriv=1)
+ AA = (psi - mymu) / sigma
+
+ dl.dlambda = -AA * d1 / sigma
+ warning("dl.dlambda is wrong")
+ dlambda.deta <- dtheta.deta(lambda, link=.link.lambda)
+ cbind(dl.dlambda * dlambda.deta) * w
+ }), list(.dfmu.init=dfmu.init,
+ .dfsigma.init=dfsigma.init,
+ .link.lambda=link.lambda))),
+ weight=eval(substitute(expression({
+ wz = (d1 / sigma)^2 # Approximate
+ wz = ifelse(wz <= 0, 1.0e-9, wz)
+ wz = wz * w
+ wz
+ }), list(.link.lambda=link.lambda))))
+}
+
+
+
diff --git a/R/family.rcqo.q b/R/family.rcqo.q
new file mode 100644
index 0000000..73b41e7
--- /dev/null
+++ b/R/family.rcqo.q
@@ -0,0 +1,346 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+rcqo <- function(n, p, S,
+ Rank = 1,
+ family = c("poisson", "negbinomial", "binomial-poisson",
+ "Binomial-negbinomial", "ordinal-poisson",
+ "Ordinal-negbinomial","gamma2"),
+ EqualMaxima = FALSE,
+ EqualTolerances = TRUE,
+ ESOptima = FALSE,
+ loabundance = if(EqualMaxima) hiabundance else 10,
+ hiabundance = 100,
+ sdlv = c(1.5/2^(0:3))[1:Rank],
+ sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * sdlv,
+ sdTolerances = 0.25,
+ Kvector = 1,
+ Shape = 1,
+ sqrt = FALSE,
+ Log = FALSE,
+ rhox = 0.5,
+ breaks = 4, # ignored unless family="ordinal"
+ seed = NULL,
+ Crow1positive=TRUE
+ ) {
+ family = match.arg(family, c("poisson","negbinomial", "binomial-poisson",
+ "Binomial-negbinomial", "ordinal-poisson",
+ "Ordinal-negbinomial","gamma2"))[1]
+ if(!is.Numeric(n, integer=TRUE, posit=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 1 + Rank)
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
+ stop("bad input for argument \"S\"")
+ if(!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1) || Rank > 4)
+ stop("bad input for argument \"Rank\"")
+ if(!is.Numeric(Kvector, posit=TRUE))
+ stop("bad input for argument \"Kvector\"")
+ if(!is.Numeric(rhox) || abs(rhox) >= 1)
+ stop("bad input for argument \"rhox\"")
+ if(length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"seed\"")
+ if(!is.logical(EqualTolerances) || length(EqualTolerances)>1)
+ stop("bad input for argument \"EqualTolerances)\"")
+ if(!is.logical(sqrt) || length(sqrt)>1)
+ stop("bad input for argument \"sqrt)\"")
+ if(family != "negbinomial" && sqrt)
+ warning("argument \"sqrt\" is used only with family=\"negbinomial\"")
+ if(!EqualTolerances && !is.Numeric(sdTolerances, posit=TRUE))
+ stop("bad input for argument \"sdTolerances\"")
+ if(!is.Numeric(loabundance, posit=TRUE))
+ stop("bad input for argument \"loabundance\"")
+ if(!is.Numeric(sdlv, posit=TRUE))
+ stop("bad input for argument \"sdlv\"")
+ if(!is.Numeric(sdOptima, posit=TRUE))
+ stop("bad input for argument \"sdOptima\"")
+ if(EqualMaxima && loabundance != hiabundance)
+ stop(paste("arguments \"loabundance)\" and \"hiabundance)\" must",
+ "be equal when EqualTolerances=TRUE"))
+ if(any(loabundance > hiabundance))
+ stop("loabundance > hiabundance is not allowed")
+ if(!is.logical(Crow1positive)) {
+ stop("bad input for argument \"Crow1positive)\"")
+ } else {
+ Crow1positive = rep(Crow1positive, len=Rank)
+ }
+ Shape = rep(Shape, len=S)
+ sdlv = rep(sdlv, len=Rank)
+ sdOptima = rep(sdOptima, len=Rank)
+ sdTolerances = rep(sdTolerances, len=Rank)
+ AA = sdOptima / 3^0.5
+ if(Rank > 1 && any(diff(sdlv) > 0))
+ stop("argument \"sdlv)\" must be a vector with decreasing values")
+
+ if(length(seed)) set.seed(seed)
+ V = matrix(rhox, p-1, p-1)
+ diag(V) = 1
+ L = chol(V)
+ xmat = matrix(rnorm(n*(p-1)), n, p-1) %*% L
+ xmat = scale(xmat, center=TRUE)
+ xnames = paste("x", 2:p, sep="")
+ dimnames(xmat) = list(as.character(1:n), xnames)
+ ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
+ lvmat = cbind(xmat %*% ccoefs)
+ if(Rank > 1) {
+ Rmat = chol(var(lvmat))
+ iRmat = solve(Rmat)
+ lvmat = lvmat %*% iRmat # var(lvmat) == diag(Rank)
+ ccoefs = ccoefs %*% iRmat
+ }
+ for(r in 1:Rank)
+ if(( Crow1positive[r] && ccoefs[1,r] < 0) ||
+ (!Crow1positive[r] && ccoefs[1,r] > 0)) {
+ ccoefs[,r] = -ccoefs[,r]
+ lvmat[,r] = -lvmat[,r]
+ }
+
+ for(r in 1:Rank) {
+ sdlvr = sd(lvmat[,r])
+ lvmat[,r] = lvmat[,r] * sdlv[r] / sdlvr
+ ccoefs[,r] = ccoefs[,r] * sdlv[r] / sdlvr
+ }
+ if(ESOptima) {
+ if(!is.Numeric(S^(1/Rank), integ=TRUE) || S^(1/Rank) < 2)
+ stop("S^(1/Rank) must be an integer greater or equal to 2")
+ if(Rank == 1) {
+ optima = matrix(as.numeric(NA), S, Rank)
+ for(r in 1:Rank) {
+ optima[,r] = seq(-AA, AA, len=S^(1/Rank))
+ }
+ } else if(Rank == 2) {
+ optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)))
+ } else if(Rank == 3) {
+ optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
+ lv3=seq(-AA[3], AA[3], len=S^(1/Rank)))
+ } else {
+ optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
+ lv3=seq(-AA[3], AA[3], len=S^(1/Rank)),
+ lv4=seq(-AA[4], AA[4], len=S^(1/Rank)))
+ }
+ if(Rank > 1)
+ optima = matrix(unlist(optima), S, Rank) # Make sure it is a matrix
+ } else {
+ optima = matrix(1, S, Rank)
+ for(r in 1:Rank) {
+ optima[,r] = rnorm(n=S, sd=sdOptima[r])
+ }
+ }
+ for(r in 1:Rank)
+ optima[,r] = optima[,r] * sdOptima[r] / sd(optima[,r])
+
+ ynames = paste("y", 1:S, sep="")
+ Kvector = rep(Kvector, len=S)
+ names(Kvector) = ynames
+ lvnames = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ Tols = if(EqualTolerances) matrix(1, S, Rank) else {
+ temp = matrix(1, S, Rank)
+ if(S > 1)
+ for(r in 1:Rank) {
+ temp[-1,r] = rnorm(S-1, mean=1, sd=sdTolerances[r])
+ if(any(temp[,r] <= 0)) stop("negative tolerances!")
+ temp[,r] = temp[,r]^2 # Tolerance matrix = var-cov matrix)
+ }
+ temp
+ }
+
+ dimnames(Tols) = list(ynames, lvnames)
+ dimnames(ccoefs) = list(xnames, lvnames)
+ dimnames(optima) = list(ynames, lvnames)
+ loeta = log(loabundance) # May be a vector
+ hieta = log(hiabundance)
+ logmaxima = runif(S, min=loeta, max=hieta) # loeta and hieta may be vector
+ names(logmaxima) = ynames
+ etamat = matrix(logmaxima,n,S,byrow=TRUE) # eta=log(mu) only; intercept term
+ for(jay in 1:S) {
+ optmat = matrix(optima[jay,], nrow=n, ncol=Rank, byrow=TRUE)
+ tolmat = matrix(Tols[jay,], nrow=n, ncol=Rank, byrow=TRUE)
+ temp = cbind((lvmat - optmat) / tolmat)
+ for(r in 1:Rank)
+ etamat[,jay]=etamat[,jay]-0.5*(lvmat[,r] - optmat[jay,r])*temp[,r]
+ }
+
+ rootdist = switch(family,
+ "poisson"=1, "binomial-poisson"=1, "ordinal-poisson"=1,
+ "negbinomial"=2, "Binomial-negbinomial"=2, "Ordinal-negbinomial"=2,
+ "gamma2"=3)
+ if(rootdist == 1) {
+ ymat = matrix(rpois(n*S, lam=exp(etamat)), n, S)
+ } else if(rootdist == 2) {
+ mKvector = matrix(Kvector, n, S, byrow=TRUE)
+ ymat = matrix(rnbinom(n=n*S, mu=exp(etamat), size=mKvector),n,S)
+ if(sqrt) ymat = ymat^0.5
+ } else if(rootdist == 3) {
+ Shape = matrix(Shape, n, S, byrow=TRUE)
+ ymat = matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
+ if(Log) ymat = log(ymat)
+ } else stop("'rootdist' unmatched")
+
+ tmp1 = NULL
+ if(any(family == c("ordinal-poisson","Ordinal-negbinomial"))) {
+ tmp1 = cut(c(ymat), breaks=breaks, labels=NULL) #To get attributes(tmp1)
+ ymat = cut(c(ymat), breaks=breaks, labels=FALSE)
+ dim(ymat) = c(n,S)
+ }
+ if(any(family == c("binomial-poisson","Binomial-negbinomial")))
+ ymat = 0 + (ymat > 0)
+
+ myform = as.formula(paste(paste("cbind(",
+ paste(paste("y",1:S,sep=""), collapse=","),
+ ") ~ ", sep=""),
+ paste(paste("x",2:p,sep=""), collapse="+"), sep=""))
+
+ dimnames(ymat) = list(as.character(1:n), ynames)
+ ans = data.frame(xmat, ymat)
+ attr(ans, "ccoefficients") = ccoefs
+ attr(ans, "Crow1positive") = Crow1positive
+ attr(ans, "family") = family
+ attr(ans, "formula") = myform # Useful for running cqo() on the data
+ attr(ans, "Rank") = Rank
+ attr(ans, "family") = family
+ attr(ans, "Kvector") = Kvector
+ attr(ans, "logmaxima") = logmaxima
+ attr(ans, "loabundance") = loabundance
+ attr(ans, "hiabundance") = hiabundance
+ attr(ans, "optima") = optima
+ attr(ans, "Log") = Log
+ attr(ans, "lv") = lvmat
+ attr(ans, "EqualTolerances") = EqualTolerances
+ attr(ans, "EqualMaxima") = EqualMaxima || all(loabundance == hiabundance)
+ attr(ans, "ESOptima") = ESOptima
+ attr(ans, "seed") = seed
+ attr(ans, "sdTolerances") = sdTolerances
+ attr(ans, "sdlv") = sdlv
+ attr(ans, "sdOptima") = sdOptima
+ attr(ans, "Shape") = Shape
+ attr(ans, "sqrt") = sqrt
+ attr(ans, "tolerances") = Tols^0.5 # Like a standard deviation
+ attr(ans, "breaks") = if(length(tmp1)) attributes(tmp1) else breaks
+ ans
+}
+
+
+
+
+if(FALSE)
+dcqo <- function(x, p, S,
+ family = c("poisson", "binomial", "negbinomial", "ordinal"),
+ Rank = 1,
+ EqualTolerances = TRUE,
+ EqualMaxima = FALSE,
+ EquallySpacedOptima = FALSE,
+ loabundance = if(EqualMaxima) 100 else 10,
+ hiabundance = 100,
+ sdTolerances = 1,
+ sdOptima = 1,
+ nlevels = 4, # ignored unless family="ordinal"
+ seed = NULL
+ ) {
+ warning("12/6/06; needs a lot of work based on rcqo()")
+
+
+ if(mode(family) != "character" && mode(family) != "name")
+ family = as.character(substitute(family))
+ family = match.arg(family, c("poisson", "binomial", "negbinomial", "ordinal"))[1]
+ if(!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 2)
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
+ stop("bad input for argument \"S\"")
+ if(!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1))
+ stop("bad input for argument \"Rank\"")
+ if(length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"seed\"")
+ if(!is.logical(EqualTolerances) || length(EqualTolerances)>1)
+ stop("bad input for argument \"EqualTolerances)\"")
+ if(EqualMaxima && loabundance != hiabundance)
+ stop(paste("\"loabundance)\" and \"hiabundance)\" must",
+ "be equal when EqualTolerances=TRUE"))
+ if(length(seed)) set.seed(seed)
+
+ xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
+ paste("x", 2:p, sep="")))
+ ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
+ lvmat = xmat %*% ccoefs
+ optima = matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
+ Tols = if(EqualTolerances) matrix(1, S, Rank) else
+ matrix(rnorm(Rank*S, mean=1, sd=1), S, Rank)
+ loeta = log(loabundance)
+ hieta = log(hiabundance)
+ logmaxima = runif(S, min=loeta, max=hieta)
+
+ etamat = matrix(logmaxima, n, S, byrow=TRUE) # eta=log(mu) only; intercept term
+ for(jay in 1:S) {
+ optmat = matrix(optima[jay,], n, Rank, byrow=TRUE)
+ tolmat = matrix(Tols[jay,], n, Rank, byrow=TRUE)
+ temp = cbind((lvmat - optmat) * tolmat)
+ for(r in 1:Rank)
+ etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] * (lvmat[,r] - optmat[jay,r])
+ }
+
+ ymat = if(family == "negbinomial") {
+
+
+
+ } else {
+ matrix(rpois(n*S, lam=exp(etamat)), n, S)
+ }
+ if(family == "binomial")
+ ymat = 0 + (ymat > 0)
+
+ dimnames(ymat) = list(as.character(1:n), paste("y", 1:S, sep=""))
+ ans = data.frame(xmat, ymat)
+ attr(ans, "ccoefficients") = ccoefs
+ attr(ans, "family") = family
+ ans
+}
+
+
+
+
+
+getInitVals = function(gvals, llfun, ...) {
+ LLFUN = match.fun(llfun)
+ ff = function(myx, ...) LLFUN(myx, ...)
+ objFun = gvals
+ for(ii in 1:length(gvals))
+ objFun[ii] = ff(myx=gvals[ii], ...)
+ try.this = gvals[objFun == max(objFun)] # Usually scalar, maybe vector
+ try.this
+}
+
+
+
+
+
+
+
+
+
+campp = function(q, size, prob, mu) {
+ if (!missing(mu)) {
+ if(!missing(prob))
+ stop("'prob' and 'mu' both specified")
+ prob <- size/(size + mu)
+ }
+ K = (1/3) * ((9*q+8)/(q+1) - ((9*size-1)/size) * (mu/(q+1))^(1/3)) /
+ sqrt( (1/size) * (mu/(q+1))^(2/3) + 1 / (q+1)) # Note the +, not -
+ pnorm(K)
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.rrr.q b/R/family.rrr.q
new file mode 100644
index 0000000..be82fe0
--- /dev/null
+++ b/R/family.rrr.q
@@ -0,0 +1,3092 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+replace.constraints <- function(Blist, cm, index)
+{
+
+ for(i in index)
+ Blist[[i]] = cm
+ Blist
+}
+
+
+valt.control <- function(
+ Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
+ 60, 80, 100, 125, 2^(8:12)),
+ Criterion = c("rss", "coefficients"),
+ Linesearch= FALSE, Maxit=7,
+ Suppress.warning=TRUE,
+ Tolerance=1e-7, ...)
+{
+
+ if(mode(Criterion) != "character" && mode(Criterion) != "name")
+ Criterion <- as.character(substitute(Criterion))
+ Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
+
+ list(Alphavec=Alphavec,
+ Criterion = Criterion,
+ Linesearch=Linesearch,
+ Maxit=Maxit,
+ Suppress.warning=Suppress.warning,
+ Tolerance=Tolerance)
+
+}
+
+
+qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
+ Rank = ncol(numat)
+ moff = NULL
+ ans = if(Quadratic) {
+ index = iam(NA, NA, M=Rank, diagonal=TRUE, both=TRUE)
+ temp1 = cbind(numat[,index$row] * numat[,index$col])
+ if(ITolerances) {
+ moff = 0
+ for(ii in 1:Rank)
+ moff = moff - 0.5 * temp1[,ii]
+ }
+ cbind(numat, if(ITolerances) NULL else temp1)
+ } else
+ as.matrix(numat)
+ list(matrix=if(Aoffset>0) ans else ans[,-(1:Rank),drop=FALSE],
+ offset = moff)
+}
+
+
+valt <- function(x, z, U, Rank=1,
+ Blist=NULL,
+ Cinit=NULL,
+ Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
+ 60, 80, 100, 125, 2^(8:12)),
+ Criterion=c("rss", "coefficients"),
+ Crow1positive = rep(TRUE, len=Rank),
+ colx1.index,
+ Linesearch=FALSE,
+ Maxit=20,
+ Structural.zero=NULL,
+ SD.Cinit=0.02,
+ Suppress.warning= FALSE,
+ Tolerance=1e-6,
+ trace= FALSE,
+ xij=NULL)
+{
+
+
+
+
+
+ if(mode(Criterion) != "character" && mode(Criterion) != "name")
+ Criterion <- as.character(substitute(Criterion))
+ Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
+
+ if(any(diff(Alphavec)) <= 0)
+ stop("Alphavec must be an increasing sequence")
+
+ if(!is.matrix(z))
+ z <- as.matrix(z)
+ n <- nrow(z)
+ M <- ncol(z)
+ if(!is.matrix(x))
+ x <- as.matrix(x)
+
+ colx2.index = (1:ncol(x))[-colx1.index]
+ p1 = length(colx1.index)
+ p2 = length(colx2.index)
+ p = p1 + p2
+ if(!p2) stop("p2, the dimension of vars for reduced-rank regn, must be > 0")
+
+ if(!length(Blist)) {
+ Blist = replace.constraints(vector("list", p), diag(M), 1:p)
+ }
+
+ dU <- dim(U)
+ if(dU[2] != n)
+ stop("input unconformable")
+
+ cmat2 = replace.constraints(vector("list", Rank+p1),
+ if(length(Structural.zero))
+ diag(M)[,-Structural.zero,drop=FALSE] else diag(M), 1:Rank)
+ if(p1)
+ for(k in 1:p1)
+ cmat2[[Rank+k]] <- Blist[[colx1.index[k]]]
+
+ if(is.null(Cinit))
+ Cinit <- matrix(rnorm(p2*Rank, sd=SD.Cinit), p2, Rank)
+
+ fit <- list(rss=0) # Only for initial old.crit below
+
+ C <- Cinit # This is input for the main iter loop
+ old.crit <- switch(Criterion, coefficients=C, rss=fit$rss)
+
+ recover = 0 # Allow a few iterations between different line searches
+ for(iter in 1:Maxit) {
+ iter.save <- iter
+
+ lv.mat <- x[,colx2.index,drop=FALSE] %*% C
+ new.lv.model.matrix = cbind(lv.mat, if(p1) x[,colx1.index] else NULL)
+ fit = vlm.wfit(new.lv.model.matrix, z, Blist=cmat2, U=U,
+ matrix.out=TRUE, XBIG=FALSE, rss=FALSE, qr=FALSE, xij=xij)
+ A <- t(fit$mat.coef[1:Rank,,drop=FALSE])
+
+ cmat1 = replace.constraints(Blist, A, colx2.index)
+ fit = vlm.wfit(x, z, Blist=cmat1, U=U,
+ matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE, xij=xij)
+ C = fit$mat.coef[colx2.index,,drop=FALSE] %*% A %*% solve(t(A) %*% A)
+
+ numat = x[,colx2.index,drop=FALSE] %*% C
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ C = C %*% temp7
+ A = A %*% t(solve(temp7))
+ temp8 = crow1C(cmat=C, Crow1positive, amat=A)
+ C = temp8$cmat
+ A = temp8$amat
+
+
+ ratio=switch(Criterion,
+ coefficients=max(abs(C - old.crit) / (Tolerance+abs(C))),
+ rss=max(abs(fit$rss - old.crit) / (Tolerance+fit$rss)))
+
+ if(trace) {
+ cat(" Alternating iteration", iter,
+ " ratio =", format(ratio), "\n")
+ if(!is.null(fit$rss))
+ cat(" rss =", fit$rss, "\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+
+ if(ratio < Tolerance) {
+ if(!Linesearch || (Linesearch && iter >= 3)) break
+ } else if(iter == Maxit && !Suppress.warning) {
+ warning("did not converge")
+ }
+
+ fini.linesearch = FALSE
+ if(Linesearch && iter - recover >= 2) {
+ xnew <- C
+
+ direction1 <- (xnew-xold) # / sqrt(1 + sum((xnew-xold)^2))
+ ftemp <- fit$rss # Most recent objective function
+ use.alpha <- 0 # The current step relative to (xold, yold)
+ for(itter in 1:length(Alphavec)) {
+ CC <- xold + Alphavec[itter] * direction1
+
+ try.lv.mat <- x[,colx2.index,drop=FALSE] %*% CC
+ try.new.lv.model.matrix = cbind(try.lv.mat,
+ if(p1) x[,colx1.index] else NULL)
+
+ try = vlm.wfit(try.new.lv.model.matrix, z, Blist=cmat2, U=U,
+ matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE,
+ xij=xij)
+ if(try$rss < ftemp) {
+ use.alpha <- Alphavec[itter]
+ fit <- try
+ ftemp <- try$rss
+ C <- CC
+ A = t(fit$mat.coef[1:Rank,,drop=FALSE])
+ lv.mat <- x[,colx2.index,drop=FALSE] %*% C
+ recover = iter # Give it some alt'g iterations to recover
+ } else {
+ if(trace && use.alpha>0) {
+ cat(" Finished line search using Alpha =",
+ use.alpha, "\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ fini.linesearch = TRUE
+ }
+ if(fini.linesearch) break
+ } # End of itter loop
+ }
+
+ xold <- C # Don't take care of drift
+ old.crit <- switch(Criterion, coefficients=C, rss=fit$rss)
+ } # End of iter loop
+
+ list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef, rss=fit$rss)
+}
+
+
+
+lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign=TRUE,
+ no.thrills=FALSE)
+{
+
+ Rank = control$Rank
+ colx1.index = control$colx1.index
+ Quadratic = control$Quadratic
+ Dzero = control$Dzero
+ Corner = control$Corner
+ ITolerances = control$ITolerances
+
+ M = nrow(Blist[[1]])
+ p1 = length(colx1.index)
+ combine2 = c(control$Structural.zero,
+ if(Corner) control$Index.corner else NULL)
+
+ Qoffset = if(Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
+ NoA = length(combine2) == M # No unknown parameters in A
+ cmat2 = if(NoA) {
+ Aoffset = 0
+ vector("list", Aoffset+Qoffset+p1)
+ } else {
+ Aoffset = Rank
+ replace.constraints(vector("list", Aoffset+Qoffset+p1),
+ if(length(combine2)) diag(M)[,-combine2,drop=FALSE] else diag(M),
+ 1:Rank) # If Corner then doesn't contain \bI_{Rank}
+ }
+ if(Quadratic && !ITolerances)
+ cmat2 = replace.constraints(cmat2,
+ if(control$EqualTolerances)
+ matrix(1, M, 1) - eij(Dzero, M) else {
+ if(length(Dzero)) diag(M)[,-Dzero,drop=FALSE] else diag(M)},
+ Aoffset + (1:Qoffset))
+ if(p1)
+ for(k in 1:p1)
+ cmat2[[Aoffset+Qoffset+k]] <- Blist[[colx1.index[k]]]
+ if(!no.thrills) {
+ i63 = iam(NA, NA, M=Rank, both=TRUE)
+ names(cmat2) = c(
+ if(NoA) NULL else paste("(lv", 1:Rank, ")", sep=""),
+ if(Quadratic && Rank==1 && !ITolerances)
+ "(lv^2)" else
+ if(Quadratic && Rank>1 && !ITolerances)
+ paste("(lv", i63$row, ifelse(i63$row==i63$col, "^2",
+ paste("*lv", i63$col, sep="")), ")", sep="") else NULL,
+ if(p1) names(colx1.index) else NULL)
+ }
+
+ lv.mat = x[,control$colx2.index,drop=FALSE] %*% C
+
+
+ tmp900 = qrrvglm.xprod(lv.mat, Aoffset, Quadratic, ITolerances)
+ new.lv.model.matrix = cbind(tmp900$matrix,
+ if(p1) x[,colx1.index] else NULL)
+ if(!no.thrills)
+ dimnames(new.lv.model.matrix) = list(dimnames(x)[[1]], names(cmat2))
+
+ if(assign) {
+ asx = attr(x, "assign")
+ asx = vector("list", ncol(new.lv.model.matrix))
+ names(asx) = names(cmat2) # wrong zz
+ for(i in 1:length(names(asx))) {
+ asx[[i]] = i
+ }
+ attr(new.lv.model.matrix, "assign") = asx
+ }
+
+ if(no.thrills)
+ list(new.lv.model.matrix = new.lv.model.matrix, constraints = cmat2,
+ offset = tmp900$offset) else
+ list(new.lv.model.matrix = new.lv.model.matrix, constraints = cmat2,
+ NoA = NoA, Aoffset = Aoffset, lv.mat = lv.mat,
+ offset = tmp900$offset)
+}
+
+valt.2iter <- function(x, z, U, Blist, A, control) {
+
+
+ cmat1 = replace.constraints(Blist, A, control$colx2.index)
+ fit <- vlm.wfit(x, z, Blist=cmat1, U=U, matrix.out=TRUE,
+ XBIG= FALSE, rss=TRUE, qr= FALSE, xij=control$xij)
+ C = fit$mat.coef[control$colx2.index,,drop=FALSE] %*% A %*% solve(t(A) %*% A)
+
+ list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef,
+ Blist=cmat1, rss=fit$rss)
+}
+
+
+
+valt.1iter = function(x, z, U, Blist, C, control, lp.names=NULL, nice31=FALSE,
+ MSratio = 1) {
+
+ Rank = control$Rank
+ Quadratic = control$Quadratic
+ Index.corner = control$Index.corner
+ p1 = length(control$colx1.index)
+ M = ncol(zedd <- as.matrix(z))
+ NOS = M / MSratio
+ Corner = control$Corner
+ ITolerances = control$ITolerances
+
+ Qoffset = if(Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
+ tmp833 = lm2qrrvlm.model.matrix(x=x, Blist=Blist, C=C, control=control)
+ new.lv.model.matrix = tmp833$new.lv.model.matrix
+ cmat2.save = cmat2 = tmp833$constraints # Doesn't contain \bI_{Rank}
+ lv.mat = tmp833$lv.mat
+ if(Corner)
+ zedd[,Index.corner] = zedd[,Index.corner] - lv.mat
+
+ if(nice31 && MSratio == 1) {
+ fit = list(mat.coef = NULL, fitted.values = NULL, rss = 0)
+
+ cmat2 = NULL # for vlm.wfit
+
+ i5 = rep(0, len=MSratio)
+ for(ii in 1:NOS) {
+ i5 = i5 + 1:MSratio
+
+ tmp100 = vlm.wfit(new.lv.model.matrix, zedd[,i5,drop=FALSE],
+ Blist=cmat2, U=U[i5,,drop=FALSE],
+ matrix.out=TRUE, XBIG=FALSE, rss=TRUE, qr=FALSE,
+ Eta.range = control$Eta.range,
+ xij=control$xij, lp.names=lp.names[i5])
+ fit$rss = fit$rss + tmp100$rss
+ fit$mat.coef = cbind(fit$mat.coef, tmp100$mat.coef)
+ fit$fitted.values = cbind(fit$fitted.values, tmp100$fitted.values)
+ }
+ } else {
+ fit = vlm.wfit(new.lv.model.matrix, zedd, Blist=cmat2, U=U,
+ matrix.out=TRUE, XBIG= FALSE, rss=TRUE, qr= FALSE,
+ Eta.range = control$Eta.range,
+ xij=control$xij, lp.names=lp.names)
+ }
+ A = if(tmp833$NoA) matrix(0, M, Rank) else
+ t(fit$mat.coef[1:Rank,,drop=FALSE])
+ if(Corner)
+ A[Index.corner,] = diag(Rank)
+
+ B1=if(p1) fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),,drop=FALSE] else NULL
+ fv = as.matrix(fit$fitted.values)
+ if(Corner)
+ fv[,Index.corner] = fv[,Index.corner] + lv.mat
+ Dmat = if(Quadratic) {
+ if(ITolerances) {
+ tmp800 = matrix(0, M, Rank*(Rank+1)/2)
+ tmp800[if(MSratio==2) c(TRUE,FALSE) else TRUE,1:Rank] = -0.5
+ tmp800
+ } else
+ t(fit$mat.coef[(tmp833$Aoffset+1):
+ (tmp833$Aoffset+Qoffset),,drop=FALSE])
+ } else
+ NULL
+
+ list(Amat=A, B1=B1, Cmat=C, Dmat=Dmat, fitted=if(M==1) c(fv) else fv,
+ new.coeffs = fit$coef, constraints=cmat2, rss=fit$rss,
+ offset = if(length(tmp833$offset)) tmp833$offset else NULL)
+}
+
+
+
+
+
+rrr.init.expression <- expression({
+ if(backchat || control$Quadratic)
+ copyxbig <- TRUE
+
+ modelno = switch(family at vfamily[1], "poissonff"=2,
+ "quasipoissonff"=2, "quasipoisson"=2,
+ "binomialff"=1, "quasibinomialff"=1,
+ "quasibinomial"=1, "negbinomial"=3,
+ "gamma2"=5, "gaussianff"=8,
+ 0) # stop("can't fit this model using fast algorithm")
+ if(modelno == 1) modelno = get("modelno", envir = VGAMenv)
+ rrcontrol$modelno = control$modelno = modelno
+ if(modelno==3 || modelno==5) {
+
+
+ M = 2 * ifelse(is.matrix(y), ncol(y), 1)
+ control$Structural.zero =
+ rrcontrol$Structural.zero = seq(from=2, to=M, by=2) # Handles A
+ control$Dzero =
+ rrcontrol$Dzero = seq(from=2, to=M, by=2) # Handles D
+
+ }
+
+
+})
+
+
+
+rrr.alternating.expression <- expression({
+
+ alt <- valt(x, z, U, Rank=Rank,
+ Blist=Blist,
+ Cinit=rrcontrol$Cinit,
+ Criterion=rrcontrol$Criterion,
+ colx1.index=rrcontrol$colx1.index,
+ Linesearch=rrcontrol$Linesearch,
+ Maxit=rrcontrol$Maxit,
+ Structural.zero=rrcontrol$Structural.zero,
+ SD.Cinit=rrcontrol$SD.Cinit,
+ Suppress.warning=rrcontrol$Suppress.warning,
+ Tolerance=rrcontrol$Tolerance,
+ trace=trace,
+ xij=control$xij) # This is subject to drift in A and C
+
+ ans2 = rrr.normalize(rrcontrol=rrcontrol, A=alt$A, C=alt$C, x=x)
+
+ Amat = ans2$A # Fed into Blist below (in rrr.end.expression)
+ tmp.fitted = alt$fitted # Also fed; was alt2$fitted
+
+ rrcontrol$Cinit <- ans2$C # For next valt() call
+
+ eval(rrr.end.expression) # Put Amat into Blist, and create new z
+})
+
+ adjust.Dmat.expression = expression({
+ if(length(Dmat)) {
+ ind0 = iam(NA, NA, both= TRUE, M=Rank)
+ for(kay in 1:M) {
+ elts = Dmat[kay,,drop=FALSE] # Manual recycling
+ if(length(elts) < Rank)
+ elts = matrix(elts, 1, Rank)
+ Dk = m2adefault(elts, M=Rank)[,,1]
+ Dk = matrix(Dk, Rank, Rank)
+ Dk = t(Mmat) %*% Dk %*% Mmat # 22/8/03; Not diagonal in general
+ Dmat[kay,] = Dk[cbind(ind0$row.index[1:ncol(Dmat)],
+ ind0$col.index[1:ncol(Dmat)])]
+ }
+ }})
+
+rrr.normalize = function(rrcontrol, A, C, x, Dmat=NULL) {
+
+
+
+ colx2.index = rrcontrol$colx2.index
+ Rank = rrcontrol$Rank
+ Index.corner = rrcontrol$Index.corner
+ M = nrow(A)
+ C.old = C
+
+ if(rrcontrol$Corner) {
+ tmp87 = A[Index.corner,,drop=FALSE]
+ Mmat <- solve(tmp87) # The normalizing matrix
+ C <- C %*% t(tmp87)
+ A <- A %*% Mmat
+ A[Index.corner,] <- diag(Rank) # Make sure
+ eval(adjust.Dmat.expression)
+ }
+
+ if(rrcontrol$Svd.arg) {
+ temp = svd(C %*% t(A))
+ if(!is.matrix(temp$v))
+ temp$v = as.matrix(temp$v)
+ C = temp$u[,1:Rank,drop=FALSE] %*%
+ diag(temp$d[1:Rank]^(1-rrcontrol$Alpha), nrow=Rank)
+ A = diag(temp$d[1:Rank]^(rrcontrol$Alpha), nrow=Rank) %*%
+ t(temp$v[,1:Rank,drop=FALSE])
+ A = t(A)
+ Mmat = t(C.old) %*% C.old %*% solve(t(C) %*% C.old)
+ eval(adjust.Dmat.expression)
+ }
+
+ if(rrcontrol$Uncor) {
+ lv.mat <- x[,colx2.index,drop=FALSE] %*% C
+ var.lv.mat <- var(lv.mat)
+ UU = chol(var.lv.mat)
+ Ut <- solve(UU)
+ Mmat <- t(UU)
+ C <- C %*% Ut
+ A <- A %*% t(UU)
+ eval(adjust.Dmat.expression)
+ }
+
+
+ if(rrcontrol$Quadratic) {
+ Mmat = diag(Rank)
+ for(LV in 1:Rank)
+ if(( rrcontrol$Crow1positive[LV] && C[1,LV] < 0) ||
+ (!rrcontrol$Crow1positive[LV] && C[1,LV] > 0)) {
+ C[,LV] = -C[,LV]
+ A[,LV] = -A[,LV]
+ Mmat[LV,LV] = -1
+ }
+ eval(adjust.Dmat.expression) # Using Mmat above
+ }
+
+
+ list(Amat=A, Cmat=C, Dmat=Dmat)
+}
+
+
+rrr.end.expression = expression({
+
+ if(is.R()) {
+ if(exists(".VGAM.etamat", envir = VGAMenv))
+ rm(".VGAM.etamat", envir = VGAMenv)
+ } else {
+ while(exists(".VGAM.etamat", inherits=TRUE))
+ rm(".VGAM.etamat", inherits=TRUE)
+ }
+
+
+ if(control$Quadratic) {
+ if(!length(extra)) extra=list()
+ extra$Cmat = Cmat # Saves the latest iteration
+ extra$Dmat = Dmat # Not the latest iteration
+ extra$B1 = B1.save # Not the latest iteration (not good)
+ } else {
+ Blist = replace.constraints(Blist.save, Amat, colx2.index)
+ }
+
+ xbig.save = if(control$Quadratic) {
+ tmp300 = lm2qrrvlm.model.matrix(x=x, Blist=Blist.save,
+ C=Cmat, control=control)
+ lv.mat = tmp300$lv.mat # Needed at the top of new.s.call
+
+ lm2vlm.model.matrix(tmp300$new.lv.model.matrix,B.list,xij=control$xij)
+ } else {
+ lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ }
+
+
+ fv <- tmp.fitted # Contains \bI \bnu
+ eta <- fv + offset
+ if(FALSE && control$Rank == 1) {
+ ooo = order(lv.mat[,1])
+ }
+ mu <- family at inverse(eta, extra)
+
+ if(any(is.na(mu)))
+ warning("there are NAs in mu")
+
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset # Contains \bI \bnu
+
+
+
+})
+
+
+
+rrr.derivative.expression <- expression({
+
+
+
+
+
+
+ which.optimizer = if(is.R()) {
+ if(control$Quadratic && control$FastAlgorithm) {
+ "BFGS"
+ } else {
+ if(iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
+ }
+ } else "Quasi-Newton"
+ if(trace && control$OptimizeWrtC) {
+ cat("\n\n")
+ cat("Using", which.optimizer, "\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+
+ constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
+ nice31 = (!control$EqualTol || control$ITolerances) &&
+ all(trivial.constraints(constraints))
+
+ theta0 <- c(Cmat) # zz; Possibly bad because of normalization?
+ if(is.R()) assign(".VGAM.dot.counter", 0, envir = VGAMenv) else
+ .VGAM.dot.counter <<- 0
+if(control$OptimizeWrtC) {
+ if(is.R()) {
+ if(control$Quadratic && control$FastAlgorithm) {
+ if(iter == 2) {
+ if(is.R()) {
+ if(exists(".VGAM.etamat", envir = VGAMenv))
+ rm(".VGAM.etamat", envir = VGAMenv)
+ } else {
+ if(exists(".VGAM.etamat", inherits=TRUE))
+ rm(".VGAM.etamat", inherits=TRUE)
+ }
+ }
+ if(iter > 2 && !quasi.newton$convergence) {
+ if(is.R()) {
+ if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
+ }
+ } else {
+ if(zthere <- exists(".VGAM.z")) {
+ ..VGAM.z = .VGAM.z
+ ..VGAM.U = .VGAM.U
+ ..VGAM.beta = .VGAM.beta
+ }
+ }
+ if(zthere) {
+ z = matrix(..VGAM.z, n, M) # minus any offset
+ U = matrix(..VGAM.U, M, n)
+ }
+
+ }
+
+ if(iter == 2 || quasi.newton$convergence) {
+ NOS = ifelse(modelno==3 || modelno==5, M/2, M)
+
+ canfitok = if(is.R())
+ (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAMenv)) else
+ (exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
+ if(!canfitok)
+ stop("can't fit this model using fast algorithm")
+ p2star = if(nice31)
+ ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
+ (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
+ p1star = if(nice31) p1 * ifelse(modelno==3 || modelno==5,2,1) else
+ (ncol(xbig.save) - p2star)
+ xbig.save1 = if(p1star > 0) xbig.save[,-(1:p2star)] else NULL
+ quasi.newton = optim(par=Cmat, fn=callcqof,
+ gr=if(control$GradientFunction) calldcqof else NULL,
+ method=which.optimizer,
+ control=list(fnscale=1,trace=as.integer(control$trace),
+ parscale=rep(control$Parscale, len=length(Cmat)),
+ maxit=250),
+ etamat=eta, xmat=x, ymat=y, wvec=w,
+ xbig.save1 = if(nice31) NULL else xbig.save1,
+ modelno=modelno, Control=control,
+ n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
+
+
+ if(is.R()) {
+ if(zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+ ..VGAM.z = get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U = get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta = get(".VGAM.beta", envir = VGAMenv)
+ }
+ } else {
+ if(zthere <- exists(".VGAM.z")) {
+ ..VGAM.z = .VGAM.z
+ ..VGAM.U = .VGAM.U
+ ..VGAM.beta = .VGAM.beta
+ }
+ }
+ if(zthere) {
+ z = matrix(..VGAM.z, n, M) # minus any offset
+ U = matrix(..VGAM.U, M, n)
+ }
+ } else {
+ if(is.R()) {
+ if(exists(".VGAM.offset", envir = VGAMenv))
+ rm(".VGAM.offset", envir = VGAMenv)
+ } else {
+ while(exists(".VGAM.offset", inherits=TRUE))
+ rm(".VGAM.offset", inherits=TRUE)
+ }
+ }
+ } else {
+ use.reltol = if(length(rrcontrol$Reltol) >= iter)
+ rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
+ quasi.newton <-
+ optim(par=theta0,
+ fn=rrr.derivC.rss,
+ method=which.optimizer,
+ control=list(fnscale=rrcontrol$Fnscale,
+ maxit=rrcontrol$Maxit,
+ abstol=rrcontrol$Abstol,
+ reltol=use.reltol),
+ U=U, z= if(control$ITolerances) z+offset else z,
+ M=M, xmat=x, # varbix2=varbix2,
+ Blist=Blist, rrcontrol=rrcontrol)
+ }
+ } else {
+ quasi.newton <-
+ nlminb(start=theta0,
+ objective=rrr.derivC.rss,
+ control = nlminb.control(x.tol = rrcontrol$X.tol,
+ eval.max = rrcontrol$Eval.max,
+ iter.max = rrcontrol$Iter.max,
+ abs.tol = rrcontrol$Abs.tol,
+ rel.tol = rrcontrol$Rel.tol,
+ step.min = rrcontrol$Step.min,
+ rel.err = rrcontrol$Rel.err),
+ U=U, z= if(control$ITolerances) z+offset else z,
+ M=M, xmat=x, # varbix2=varbix2,
+ Blist=Blist, rrcontrol=rrcontrol)
+ }
+
+
+
+ Cmat = matrix(quasi.newton$par, p2, Rank, byrow=FALSE)
+
+ if(Rank > 1 && rrcontrol$ITolerances) {
+ numat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
+ evnu = eigen(var(numat))
+ Cmat = Cmat %*% evnu$vector
+ numat = x[,rrcontrol$colx2.index,drop=FALSE] %*% Cmat
+ offset = if(Rank > 1) -0.5*apply(numat^2, 1, sum) else -0.5*numat^2
+ }
+}
+
+
+ alt = valt.1iter(x=x, z=z, U=U, Blist=Blist, C=Cmat, nice31=nice31,
+ control=rrcontrol, lp.names=predictors.names)
+
+
+ if(length(alt$offset))
+ offset = alt$offset
+
+ B1.save = alt$B1 # Put later into extra
+ tmp.fitted = alt$fitted # contains \bI_{Rank} \bnu if Corner
+
+ if(modelno!=33 && control$OptimizeWrtC)
+ alt = rrr.normalize(rrc=rrcontrol, A=alt$Amat, C=alt$Cmat,
+ x=x, Dmat=alt$Dmat)
+
+ if(trace && control$OptimizeWrtC) {
+ cat("\n")
+ cat(which.optimizer, "using",
+ if(is.R()) "optim():" else "nlminb():", "\n")
+ cat("Objective =", if(is.R())
+ quasi.newton$value else format(quasi.newton$objective), "\n")
+ cat("Parameters (= c(C)) = ", if(length(quasi.newton$par) < 5)
+ "" else "\n")
+ cat(if(is.R()) alt$Cmat else format(alt$Cmat), fill=TRUE)
+ cat("\n")
+ if(!is.R())
+ cat("Gradient norm =", format(quasi.newton$grad.norm), "\n")
+ cat("Number of function evaluations =", if(is.R())
+ quasi.newton$count[1] else quasi.newton$f.evals, "\n")
+ if(!is.R())
+ cat("Number of gradient evaluations =", quasi.newton$g.evals, "\n")
+ if(length(quasi.newton$message))
+ cat("Message =", quasi.newton$message, "\n")
+ cat("\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+
+
+
+ Amat = alt$Amat # Needed in rrr.end.expression
+ Cmat = alt$Cmat # Needed in rrr.end.expression if Quadratic
+ Dmat = alt$Dmat # Put later into extra
+
+ eval(rrr.end.expression) # Put Amat into Blist, and create new z
+})
+
+
+rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
+ omit.these=NULL) {
+
+ if(rrcontrol$trace) {
+ cat(".")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ alreadyThere = if(is.R())
+ exists(".VGAM.dot.counter", envir = VGAMenv) else
+ exists(".VGAM.dot.counter")
+ if(alreadyThere) {
+ if(is.R()) {
+ VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAMenv)
+ VGAM.dot.counter = VGAM.dot.counter + 1
+ assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
+ } else {
+ .VGAM.dot.counter <<- .VGAM.dot.counter + 1
+ }
+ if(VGAM.dot.counter > max(50, options()$width - 5)) {
+ if(rrcontrol$trace) {
+ cat("\n")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ if(is.R()) assign(".VGAM.dot.counter", 0, envir = VGAMenv) else
+ .VGAM.dot.counter <<- 0
+ }
+ }
+
+ Cmat = matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
+
+
+ tmp700 = lm2qrrvlm.model.matrix(x=xmat, Blist=Blist,
+ no.thrills = !rrcontrol$Corner,
+ C=Cmat, control=rrcontrol, assign= FALSE)
+ Blist = tmp700$constraints # Doesn't contain \bI_{Rank} \bnu
+
+ if(rrcontrol$Corner) {
+ z = as.matrix(z) # should actually call this zedd
+ z[,rrcontrol$Index.corner] = z[,rrcontrol$Index.corner] - tmp700$lv.mat
+ }
+
+ if(length(tmp700$offset)) z = z - tmp700$offset
+
+
+ vlm.wfit(x=tmp700$new.lv.model.matrix, z=z,
+ Blist=Blist, ncolx=ncol(xmat), U=U, only.rss=TRUE,
+ matrix.out= FALSE, XBIG= FALSE, rss= TRUE, qr= FALSE,
+ Eta.range = rrcontrol$Eta.range,
+ xij=rrcontrol$xij)$rss
+}
+
+
+
+rrvglm.optim.control = function(Fnscale=1,
+ Maxit=100,
+ Switch.optimizer=3,
+ Abstol= -Inf,
+ Reltol=sqrt(.Machine$double.eps),
+ ...)
+{
+
+
+
+
+ list(Fnscale=Fnscale,
+ Maxit=Maxit,
+ Switch.optimizer=Switch.optimizer,
+ Abstol=Abstol,
+ Reltol=Reltol)
+}
+
+
+
+if(is.R())
+nlminbcontrol = function(Abs.tol = 10^(-6),
+ Eval.max=91,
+ Iter.max=91,
+ Rel.err = 10^(-6),
+ Rel.tol = 10^(-6),
+ Step.min = 10^(-6),
+ X.tol = 10^(-6),
+ ...)
+{
+
+
+ list(Abs.tol = Abs.tol,
+ Eval.max=Eval.max,
+ Iter.max = Iter.max,
+ Rel.err=Rel.err,
+ Rel.tol=Rel.tol,
+ Step.min=Step.min,
+ X.tol=X.tol)
+}
+
+
+
+
+Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
+
+
+ if(length(varlvI) != 1 || !is.logical(varlvI))
+ stop("\"varlvI\" must be TRUE or FALSE")
+ if(length(reference) > 1) stop("\"reference\" must be of length 0 or 1")
+ if(length(reference) && is.Numeric(reference))
+ if(!is.Numeric(reference, allow=1, integ=TRUE))
+ stop("bad input for argument \"reference\"")
+ if(!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
+ stop("can't determine whether the model is constrained or not")
+ ocontrol = object at control
+ coef.object = object at coefficients
+ Rank = ocontrol$Rank
+ M = object at misc$M
+ NOS = if(length(object at y)) ncol(object at y) else M
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ Quadratic = if(ConstrainedQO) ocontrol$Quadratic else TRUE
+ if(!Quadratic) stop("object is not a quadratic ordination object")
+ p1 = length(ocontrol$colx1.index)
+ p2 = length(ocontrol$colx2.index)
+ Index.corner = ocontrol$Index.corner
+ Structural.zero = ocontrol$Structural.zero
+ EqualTolerances = ocontrol$EqualTolerances
+ Dzero = ocontrol$Dzero
+ Corner = if(ConstrainedQO) ocontrol$Corner else FALSE
+ estITol = if(ConstrainedQO) object at control$ITolerances else FALSE
+ modelno = object at control$modelno # 1,2,3,4,5,6,7 or 0
+ combine2 = c(Structural.zero, if(Corner) Index.corner else NULL)
+ NoA = length(combine2) == M # A is fully known # doesn't handle !Corner yet
+
+ Qoffset = if(Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
+
+ ynames = object at misc$ynames
+ if(!length(ynames)) ynames = object at misc$predictors.names
+ if(!length(ynames)) ynames = object at misc$ynames
+ if(!length(ynames)) ynames = paste("Y", 1:NOS, sep="")
+ lp.names = object at misc$predictors.names
+ if(!length(lp.names)) lp.names = NULL
+
+ dzero.vector = rep(FALSE, length=M)
+ if(length(Dzero))
+ dzero.vector[Dzero] = TRUE
+ names(dzero.vector) = ynames
+ lv.names = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+
+ td.expression = expression({
+ Tolerance = Darray = m2adefault(Dmat, M=Rank)
+ for(i in 1:M)
+ if(length(Dzero) && any(Dzero == i)) {
+ Tolerance[,,i] = NA # Darray[,,i] == O
+ bellshaped[i] = FALSE
+ } else {
+ Tolerance[,,i] = -0.5 * solve(Darray[,,i])
+ bellshaped[i] = all(eigen(Tolerance[,,i])$values > 0)
+ }
+ optimum = matrix(as.numeric(NA),Rank,M) # dimnames=list(lv.names,ynames)
+ for(i in 1:M)
+ if(bellshaped[i])
+ optimum[,i] = Tolerance[,,i] %*% cbind(Amat[i,])
+ })
+ Amat = object at extra$Amat # M x Rank
+ Cmat = object at extra$Cmat # p2 x Rank
+ Dmat = object at extra$Dmat #
+ B1 = object at extra$B1 #
+ bellshaped = rep(FALSE, length=M)
+
+ if(is.character(reference)) {
+ reference = (1:NOS)[reference == ynames]
+ if(length(reference) != 1)
+ stop("could not match argument \"reference\" with any response")
+ }
+ ptr1 = 1
+ candidates = if(length(reference)) reference else {
+ if(length(ocontrol$Dzero)) (1:M)[-ocontrol$Dzero] else (1:M)}
+ repeat {
+ if(ptr1 > 0) {
+ this.spp = candidates[ptr1]
+ }
+ elts = Dmat[this.spp,,drop=FALSE]
+ if(length(elts) < Rank)
+ elts = matrix(elts, 1, Rank)
+ Dk = m2adefault(elts, M=Rank)[,,1] # Hopefully negative-def
+ temp400 = eigen(Dk)
+ ptr1 = ptr1 + 1
+ if(all(temp400$value < 0)) break
+ if(ptr1 > length(candidates)) break
+ }
+ if(all(temp400$value < 0)) {
+ temp1tol = -0.5 * solve(Dk)
+ dim(temp1tol) = c(Rank,Rank)
+ Mmat = t(chol(temp1tol))
+ if(ConstrainedQO) {
+ temp900 = solve(t(Mmat))
+ Cmat = Cmat %*% temp900
+ Amat = Amat %*% Mmat
+ }
+ if(length(Cmat)) {
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ Cmat = temp800$cmat
+ Amat = temp800$amat
+ }
+ eval(adjust.Dmat.expression)
+ eval(td.expression)
+ } else {
+ if(length(reference) == 1)
+ stop(paste("tolerance matrix specified by \"reference\"",
+ "is not positive-definite")) else
+ warning(paste("could not find any positive-definite",
+ "tolerance matrix"))
+ }
+
+
+ if(ConstrainedQO)
+ if(Rank > 1) {
+ if(!length(xmat <- object at x)) stop("cannot obtain the model matrix")
+ numat = xmat[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ evnu = eigen(var(numat))
+ Mmat = solve(t(evnu$vector))
+ Cmat = Cmat %*% evnu$vector # == Cmat %*% solve(t(Mmat))
+ Amat = Amat %*% Mmat
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ Cmat = temp800$cmat
+ Amat = temp800$amat
+ eval(adjust.Dmat.expression)
+ eval(td.expression)
+ }
+
+
+ if(ConstrainedQO)
+ if(varlvI) {
+ if(!length(xmat <- object at x)) stop("cannot obtain the model matrix")
+ numat = xmat[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ sdnumat = sd(numat)
+ Mmat = if(Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
+ Cmat = Cmat %*% solve(t(Mmat))
+ Amat = Amat %*% Mmat
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ Cmat = temp800$cmat
+ Amat = temp800$amat
+ eval(adjust.Dmat.expression)
+ eval(td.expression)
+ }
+
+
+ cx1i = ocontrol$colx1.index
+ maximum = if(length(cx1i)==1 && names(cx1i)=="(Intercept)") {
+ eta.temp = B1
+ for(i in 1:M)
+ eta.temp[i] = eta.temp[i] +
+ Amat[i,,drop=FALSE] %*% optimum[,i,drop=FALSE] +
+ t(optimum[,i,drop=FALSE]) %*%
+ Darray[,,i,drop= TRUE] %*% optimum[,i,drop=FALSE]
+ mymax = object at family@inverse(rbind(eta.temp), extra=object at extra)
+ c(mymax) # Convert from matrix to vector
+ } else {
+ 5 * rep(as.numeric(NA), len=M) # Make "numeric"
+ }
+ names(maximum) = ynames
+
+ lv.mat = if(ConstrainedQO) {
+ object at x[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ } else {
+ object at lv
+ }
+
+ dimnames(Amat) = list(lp.names, lv.names)
+ if(ConstrainedQO)
+ dimnames(Cmat) = list(names(ocontrol$colx2.index), lv.names)
+ if(!length(xmat <- object at x)) stop("cannot obtain the model matrix")
+ dimnames(lv.mat) = list(dimnames(xmat)[[1]], lv.names)
+
+ ans =
+ new(Class=if(ConstrainedQO) "Coef.qrrvglm" else "Coef.uqo",
+ A=Amat, B1=B1, Constrained=ConstrainedQO, D=Darray,
+ NOS = NOS, Rank = Rank,
+ lv = lv.mat,
+ lvOrder = lv.mat,
+ Optimum=optimum,
+ OptimumOrder=optimum,
+ bellshaped=bellshaped,
+ Dzero=dzero.vector,
+ Maximum = maximum,
+ Tolerance=Tolerance)
+ if(ConstrainedQO) {ans at C = Cmat} else {Cmat = NULL}
+
+ for(r in 1:Rank)
+ ans at OptimumOrder[r,] = order(ans at Optimum[r,])
+ for(r in 1:Rank)
+ ans at lvOrder[,r] = order(ans at lv[,r])
+
+ if(length(object at misc$estimated.dispersion) &&
+ object at misc$estimated.dispersion) {
+ p = length(object at coefficients)
+ n = object at misc$n
+ M = object at misc$M
+ NOS = if(length(object at y)) ncol(object at y) else M
+ pstar = if(ConstrainedQO) (p + length(Cmat)) else
+ p + n*Rank # Adjustment; not sure about UQO
+ adjusted.dispersion = object at misc$dispersion * (n*M - p) /
+ (n*M - pstar)
+ ans at dispersion = adjusted.dispersion
+ }
+
+ if(MSratio > 1) {
+ keepIndex = seq(from=1, to=M, by=MSratio)
+ ans at Dzero = ans at Dzero[keepIndex]
+ ans at Optimum = ans at Optimum[,keepIndex,drop=FALSE]
+ ans at Tolerance = ans at Tolerance[,,keepIndex,drop=FALSE]
+ ans at bellshaped = ans at bellshaped[keepIndex]
+ names(ans at Dzero) = ynames
+ } else {
+ dimnames(ans at D) = list(lv.names, lv.names, ynames)
+ }
+ names(ans at bellshaped) = ynames
+ dimnames(ans at Optimum) = list(lv.names, ynames)
+ dimnames(ans at Tolerance) = list(lv.names, lv.names, ynames)
+ ans
+}
+
+
+setClass("Coef.rrvglm", representation(
+ "A" = "matrix",
+ "B1" = "matrix",
+ "C" = "matrix",
+ "Rank" = "numeric",
+ "colx1.index" = "numeric",
+ "colx2.index" = "numeric",
+ "Atilde" = "matrix"))
+
+setClass("Coef.uqo", representation(
+ "A" = "matrix",
+ "B1" = "matrix",
+ "Constrained" = "logical",
+ "D" = "array",
+ "NOS" = "numeric",
+ "Rank" = "numeric",
+ "lv" = "matrix",
+ "lvOrder" = "matrix",
+ "Maximum" = "numeric",
+ "Optimum" = "matrix",
+ "OptimumOrder" = "matrix",
+ "bellshaped" = "logical",
+ "dispersion" = "numeric",
+ "Dzero" = "logical",
+ "Tolerance" = "array"))
+
+setClass("Coef.qrrvglm", representation("Coef.uqo",
+ "C" = "matrix"))
+
+printCoef.qrrvglm = function(x, ...) {
+
+ object = x
+ Rank = object at Rank
+ M = nrow(object at A)
+ NOS = object at NOS
+ iii = matrix(as.numeric(NA), NOS, Rank)
+ if(Rank == 1) { # || object at Diagonal
+ for(i in 1:NOS) {
+ fred = if(Rank>1) diag(object at Tolerance[,,i,drop=F]) else
+ object at Tolerance[,,i]
+ if(all(fred > 0))
+ iii[i,] = sqrt(fred)
+ }
+ dimnames(iii) = list(dimnames(object at Tolerance)[[3]],
+ if(Rank==1) "lv" else
+ paste("Tolerance", dimnames(iii)[[2]], sep=""))
+ } else {
+ for(i in 1:NOS) {
+ fred = eigen(object at Tolerance[,,i])
+ if(all(fred$value > 0))
+ iii[i,] = sqrt(fred$value)
+ }
+ dimnames(iii) = list(dimnames(object at Tolerance)[[3]],
+ paste("tol", 1:Rank, sep=""))
+ }
+
+ dimnames(object at A) = list(dimnames(object at A)[[1]],
+ if(Rank > 1) paste("A", dimnames(object at A)[[2]], sep=".") else "A")
+
+ Maximum = if(length(object at Maximum)) cbind(Maximum=object at Maximum) else NULL
+ if(length(Maximum) && length(iii) && Rank==1)
+ Maximum[is.na(iii),] = NA
+
+ optmat = cbind(t(object at Optimum))
+ dimnames(optmat) = list(dimnames(optmat)[[1]],
+ if(Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep=".")
+ else "Optimum")
+ if(length(optmat) && length(iii) && Rank==1)
+ optmat[is.na(iii),] = NA
+
+ if( object at Constrained ) {
+ cat("\nC matrix (constrained/canonical coefficients)\n")
+ print(object at C, ...)
+ }
+ cat("\nB1 and A matrices\n")
+ print(cbind(t(object at B1),
+ A=object at A), ...)
+ cat("\nOptima and maxima\n")
+ print(cbind(Optimum=optmat,
+ Maximum), ...)
+ if(Rank > 1) { # !object at Diagonal && Rank > 1
+ cat("\nTolerances\n") } else
+ cat("\nTolerance\n")
+ print(iii, ...)
+
+ cat("\nStandard deviation of the latent variables (site scores)\n")
+ print(sd(object at lv))
+ invisible(object)
+}
+
+
+ setMethod("show", "Coef.qrrvglm", function(object)
+ printCoef.qrrvglm(object))
+ setMethod("print", "Coef.qrrvglm", function(x, ...)
+ printCoef.qrrvglm(x, ...))
+ setMethod("summary", "qrrvglm", function(object, ...)
+ summary.qrrvglm(object, ...))
+
+predict.qrrvglm <- function(object,
+ newdata=NULL,
+ type=c("link", "response", "lv", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ extra=object at extra,
+ varlvI = FALSE, reference = NULL, ...)
+{
+ if(se.fit)
+ stop("can't handle se.fit==TRUE yet")
+ if(deriv != 0)
+ stop("derivative is not equal to 0")
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("link", "response", "lv", "terms"))[1]
+ if(type=="lv")
+ stop("can't handle type='lv' yet")
+ if(type=="terms")
+ stop("can't handle type='terms' yet")
+
+ na.act = object at na.action
+ object at na.action = list()
+ M = object at misc$M
+ Rank = object at control$Rank
+
+ if(length(newdata)) {
+ p1 = length(object at control$colx1.index)
+ Coefs = Coef(object, varlvI = varlvI, reference = reference)
+ temptype = ifelse(type=="link", "response", type[1])
+
+ conmat = replace.constraints(vector("list", object at misc$p),
+ diag(M), object at control$colx1.index)
+ conmat = replace.constraints(conmat, Coefs at A,
+ object at control$colx2.index)
+ names(conmat) = object at misc$colnames.x
+ object at constraints = conmat # To get RR-VGLM type eta
+
+ newcoefs = lm2vlm.model.matrix(object at x, conmat, xij=object at control$xij)
+ newcoefs = dimnames(newcoefs)[[2]]
+ newcoefs = coef(object)[newcoefs] # Fixes up order of coefficients
+ c.index = is.na(newcoefs) # Indices corresponding to C matrix
+ newcoefs[c.index] = t(Coefs at C) # Fill in the rest (C) of the outerprod
+
+ object at coefficients = newcoefs
+
+ pvlm = predict.vlm(object, newdata=newdata, type=temptype,
+ se.fit=se.fit, deriv=deriv,
+ dispersion=dispersion, extra=extra, ...)
+
+ newcoefs[c.index] = 0 # Trick; set C==0 here to give B1 terms only
+ object at coefficients = newcoefs
+ pvlm1 = if(!length(object at control$colx1.index)) 0 else
+ predict.vlm(object, newdata=newdata, type=temptype,
+ se.fit=se.fit, deriv=deriv,
+ dispersion=dispersion, extra=extra, ...)
+
+ lvmat = if(object at control$Corner)
+ (pvlm - pvlm1)[,object at control$Index.corner,drop=FALSE] else
+ stop("corner constraints needed")
+
+ for(j in 1:M)
+ pvlm[,j] = pvlm[,j] + (if(Rank==1) (lvmat^2 * Coefs at D[,,j]) else
+ (((lvmat %*% Coefs at D[,,j]) * lvmat) %*% rep(1, Rank)))
+ } else {
+ pvlm = predict.vglm(object, type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion,
+ extra=extra, ...)
+ }
+
+ pred = switch(type,
+ response={ fv = if(length(newdata)) object at family@inverse(pvlm, extra) else
+ pvlm
+ if(M > 1 && is.matrix(fv)) {
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ }
+ fv
+ },
+ link = pvlm,
+ terms=stop("failure here"))
+
+ if(!length(newdata) && length(na.act)) {
+ if(se.fit) {
+ pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit = napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred = napredict(na.act[[1]], pred)
+ }
+ }
+ pred
+}
+
+setMethod("predict", "qrrvglm", function(object, ...)
+ predict.qrrvglm(object, ...))
+
+coefqrrvglm = function(object, matrix.out = FALSE,
+ label = TRUE, compress = TRUE) {
+ if(matrix.out)
+ stop("currently can't handle matrix.out=TRUE")
+ coefvlm(object, matrix.out = matrix.out, label = label, compress = compress)
+}
+
+
+
+residualsqrrvglm <- function(object,
+ type = c("deviance", "pearson", "working", "response", "ldot"),
+ matrix.arg= TRUE) {
+ stop("this function hasn't been written yet")
+
+}
+
+setMethod("residuals", "qrrvglm", function(object, ...)
+ residualsqrrvglm(object, ...))
+
+
+
+
+printrrvglm <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+ coef <- x at coefficients
+ if(any(nas <- is.na(coef))) {
+ if(is.null(names(coef)))
+ names(coef) <- paste("b", 1:length(coef), sep = "")
+ cat("\nCoefficients: (", sum(nas),
+ " not defined because of singularities)\n", sep = "")
+ } else
+ cat("\nCoefficients:\n")
+
+ if(FALSE) {
+ Rank <- x at Rank
+ if(!length(Rank))
+ Rank <- sum(!nas)
+ }
+
+ if(FALSE) {
+ nobs <- if(length(x at df.total)) x at df.total else length(x at residuals)
+ rdf <- x at df.residual
+ if(!length(rdf))
+ rdf <- nobs - Rank
+ }
+ cat("\n")
+
+ if(length(deviance(x)))
+ cat("Residual Deviance:", format(deviance(x)), "\n")
+ if(length(logLik(x)))
+ cat("Log-likelihood:", format(logLik(x)), "\n")
+
+ if(length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(i in ncrit)
+ if(i!="loglikelihood" && i!="deviance")
+ cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
+ }
+
+ invisible(x)
+}
+
+
+
+
+setMethod("print", "rrvglm", function(x, ...) printrrvglm(x, ...))
+
+ setMethod("show", "rrvglm", function(object) printrrvglm(object))
+
+
+
+
+rrvglm.control.Gaussian <- function(backchat= FALSE, half.stepsizing= FALSE,
+ save.weight= TRUE, ...)
+{
+
+ list(backchat= FALSE, half.stepsizing= FALSE,
+ save.weight=as.logical(save.weight)[1])
+}
+
+
+
+summary.rrvglm <- function(object, correlation= FALSE,
+ dispersion=NULL, digits=NULL,
+ numerical= TRUE,
+ h.step = 0.0001,
+ kill.all= FALSE, omit13= FALSE, fixA= FALSE, ...)
+{
+
+
+
+
+
+ if(!is.Numeric(h.step, allow=1) || abs(h.step)>1)
+ stop("bad input for \"h.step\"")
+
+ if(!object at control$Corner)
+ stop("this function works with corner constraints only")
+
+ if(is.null(dispersion))
+ dispersion <- object at misc$dispersion
+
+ newobject <- object
+ class(newobject) <- "vglm" # 6/2/02; For Splus6
+ stuff <- summaryvglm(newobject, correlation=correlation,
+ dispersion=dispersion)
+
+ answer <-
+ new(Class="summary.rrvglm",
+ object,
+ call=stuff at call,
+ coef3=stuff at coef3,
+ cov.unscaled=stuff at cov.unscaled,
+ correlation=stuff at correlation,
+ df=stuff at df,
+ pearson.resid=stuff at pearson.resid,
+ sigma=stuff at sigma)
+
+
+ if(is.numeric(stuff at dispersion))
+ slot(answer, "dispersion") = stuff at dispersion
+
+
+
+ tmp5 <- get.rrvglm.se1(object, omit13=omit13,
+ numerical=numerical, h.step=h.step,
+ kill.all=kill.all, fixA=fixA, ...)
+ if(any(diag(tmp5$cov.unscaled) <= 0) ||
+ any(eigen(tmp5$cov.unscaled)$value <= 0)) {
+ warning("cov.unscaled is not positive definite")
+ }
+
+ answer at cov.unscaled <- tmp5$cov.unscaled
+
+ od <- if(is.numeric(object at misc$disper)) object at misc$disper else
+ object at misc$default.disper
+ if(is.numeric(dispersion)) {
+ if(is.numeric(od) && dispersion!=od)
+ warning("dispersion != object at misc$dispersion; using the former")
+ } else {
+ dispersion <- if(is.numeric(od)) od else 1
+ }
+
+ tmp8 = object at misc$M - object at control$Rank -
+ length(object at control$Structural.zero)
+ answer at df[1] <- answer at df[1] + tmp8 * object at control$Rank
+ answer at df[2] <- answer at df[2] - tmp8 * object at control$Rank
+ if(dispersion==0) {
+ dispersion <- tmp5$rss / answer at df[2] # Estimate
+ }
+
+ answer at coef3 <- get.rrvglm.se2(answer at cov.unscaled, dispersion=dispersion,
+ coef=tmp5$coefficients)
+
+ answer at dispersion <- dispersion
+ answer at sigma <- dispersion^0.5
+
+
+ answer
+}
+
+
+
+
+
+printsummary.rrvglm <- function(x, digits=NULL, quote= TRUE, prefix="")
+{
+
+
+ printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
+
+
+ invisible(x)
+}
+
+
+
+get.rrvglm.se1 <- function(fit, omit13= FALSE, kill.all= FALSE,
+ numerical= TRUE,
+ fixA= FALSE, h.step=0.0001,
+ trace.arg= FALSE, ...) {
+
+
+
+
+ if(length(fit at control$Nested) && fit at control$Nested)
+ stop("sorry, can't handle nested models yet")
+
+ Structural.zero = fit at control$Structural.zero
+
+
+ if(!length(fit at x))
+ stop("fix at x is empty. Run rrvglm(... , x= TRUE)")
+
+ colx1.index = fit at control$colx1.index
+ colx2.index = fit at control$colx2.index
+ Blist <- fit at constraints
+ ncolBlist <- unlist(lapply(Blist, ncol))
+
+ p1 = length(colx1.index)
+ p2 = length(colx2.index)
+
+ Rank <- fit at control$Rank # fit at misc$Nested.Rank
+
+ Amat <- fit at constraints[[colx2.index[1]]]
+ Bmat <- if(p1) coef(fit, mat= TRUE)[colx1.index,,drop=FALSE] else NULL
+ C.try <- coef(fit, mat= TRUE)[colx2.index,,drop=FALSE]
+ Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
+
+ x1mat <- if(p1) fit at x[,colx1.index,drop=FALSE] else NULL
+ x2mat <- fit at x[,colx2.index,drop=FALSE]
+
+ wz <- weights(fit, type="w") # old: wweights(fit) #fit at weights
+ if(!length(wz))
+ stop("can't get fit at weights")
+
+ M <- fit at misc$M
+ n <- fit at misc$n
+ Index.corner <- fit at control$Index.corner # used to be (1:Rank);
+ zmat <- fit at predictors + fit at residuals
+ theta <- c(Amat[-c(Index.corner,Structural.zero),])
+ if(fit at control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=fit at control$wzepsilon)
+ U <- vchol(wz, M=M, n=n, silent= TRUE)
+
+ if(numerical) {
+ delct.da <- num.deriv.rrr(fit, M=M, r=Rank,
+ x1mat=x1mat, x2mat=x2mat, p2=p2,
+ Index.corner, Aimat=Amat, Bmat=Bmat, Cimat=Cmat,
+ h.step=h.step, colx2.index=colx2.index,
+ xij=fit at control$xij,
+ Structural.zero=Structural.zero)
+ } else {
+ delct.da <- dctda.fast.only(theta=theta, wz=wz, U=U, zmat, M=M, r=Rank,
+ x1mat=x1mat, x2mat=x2mat,
+ p2=p2, Index.corner, Aimat=Amat,
+ Bmat=Bmat, Cimat=Cmat,
+ xij=fit at control$xij,
+ Structural.zero=Structural.zero)
+ }
+
+
+ newobject <- fit
+ class(newobject) <- "vglm" # 6/2/02; For Splus6
+ sfit2233 <- summaryvglm(newobject)
+ d8 <- dimnames(sfit2233 at cov.unscaled)[[1]]
+ cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
+ dimnames(cov2233) = list(d8, d8)
+
+ log.vec33 = NULL
+ nassign = names(fit at constraints)
+ choose.from = varassign(fit at constraints, nassign)
+ for(ii in nassign)
+ if(any(ii== names(colx2.index))) {
+ log.vec33 = c(log.vec33, choose.from[[ii]])
+ }
+ cov33 = cov2233[ log.vec33, log.vec33, drop=FALSE] # r*p2 by r*p2
+ cov23 = cov2233[-log.vec33, log.vec33, drop=FALSE]
+ cov22 = cov2233[-log.vec33,-log.vec33, drop=FALSE]
+
+
+ lv.mat <- x2mat %*% Cmat
+ offs = matrix(0, n, M) # The "0" handles Structural.zero's
+ offs[,Index.corner] = lv.mat
+ if(M == (Rank+length(Structural.zero)))
+ stop("can't handle full-rank models yet")
+ cm = matrix(0, M, M-Rank-length(Structural.zero))
+ cm[-c(Index.corner,Structural.zero),] = diag(M-Rank-length(Structural.zero))
+
+ Blist = vector("list", length(colx1.index)+1)
+ names(Blist) = c(names(colx1.index), "I(lv.mat)")
+ for(ii in names(colx1.index))
+ Blist[[ii]] = fit at constraints[[ii]]
+ Blist[["I(lv.mat)"]] = cm
+
+
+ if(p1) {
+ ooo = fit at assign
+ bb = NULL
+ for(ii in 1:length(ooo)) {
+ if(any(ooo[[ii]][1] == colx1.index))
+ bb = c(bb, names(ooo)[ii])
+ }
+
+ has.intercept = any(bb=="(Intercept)")
+ bb[bb=="(Intercept)"] = "1"
+ if(p1>1)
+ bb = paste(bb, collapse="+")
+ if(has.intercept) {
+ bb = paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse=" ")
+ } else {
+ bb = paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse=" ")
+ }
+ bb = as.formula(bb)
+ } else {
+ bb = as.formula("zmat - offs ~ -1 + I(lv.mat)")
+ }
+
+
+ if(( is.R() && fit at misc$dataname == "list") ||
+ (!is.R() && fit at misc$dataname == "sys.parent")) {
+ dspec = FALSE
+ } else {
+ if(is.R()) {
+ mytext1 = "exists(x=fit at misc$dataname, envir = VGAMenv)"
+ myexp1 = parse(text=mytext1)
+ is.there = eval(myexp1)
+ bbdata= if(is.there) get(fit at misc$dataname, envir=VGAMenv) else
+ get(fit at misc$dataname)
+ } else {
+ bbdata = get(fit at misc$dataname)
+ }
+ dspec = TRUE
+ }
+
+ if(!is.R()) {
+ assign("zmat", zmat, frame = 1)
+ assign("offs", offs, frame = 1)
+ assign("wz", wz, frame = 1)
+ assign("lv.mat", lv.mat, frame = 1)
+ }
+
+ fit1122 <- if(dspec) vlm(bb,
+ constraint=Blist, crit="d", weight=wz, data=bbdata,
+ save.weight= TRUE, smart= FALSE, trace=trace.arg, x= TRUE) else
+ vlm(bb,
+ constraint=Blist, crit="d", weight=wz,
+ save.weight= TRUE, smart= FALSE, trace=trace.arg, x= TRUE)
+
+
+
+ sfit1122 <- summaryvlm(fit1122)
+ d8 <- dimnames(sfit1122 at cov.unscaled)[[1]]
+ cov1122 <- solve(sfit1122 at cov.unscaled)
+ dimnames(cov1122) = list(d8, d8)
+
+ lcs = length(coef(sfit1122))
+ log.vec11 = (lcs-(M-Rank-length(Structural.zero))*Rank+1):lcs
+ cov11 = cov1122[log.vec11, log.vec11, drop=FALSE]
+ cov12 = cov1122[ log.vec11, -log.vec11, drop=FALSE]
+ cov22 = cov1122[-log.vec11, -log.vec11, drop=FALSE]
+ cov13 = delct.da %*% cov33 # zz; this always seems to be negative
+
+
+ if(omit13)
+ cov13 = cov13 * 0 # zero it
+
+ if(kill.all) {
+ cov13 = cov13 * 0 # zero it
+ if(fixA) {
+ cov12 = cov12 * 0 # zero it
+ } else {
+ cov23 = cov23 * 0 # zero it
+ }
+ }
+
+ cov13 = -cov13 # Richards (1961)
+
+ if(fixA) {
+ cov.unscaled <- rbind(cbind(cov1122, rbind(cov13, cov23)),
+ cbind(t(cov13), t(cov23), cov33))
+ } else {
+ cov.unscaled <- rbind(cbind(cov11, cov12, cov13),
+ cbind(rbind(t(cov12), t(cov13)), cov2233))
+ }
+
+ ans <- solve(cov.unscaled)
+
+ # Get all the coefficients
+ acoefs <- c(fit1122 at coefficients[log.vec11], fit at coefficients)
+ dimnames(ans) = list(names(acoefs), names(acoefs))
+ list(cov.unscaled=ans, coefficients=acoefs, rss=sfit1122 at rss)
+}
+
+
+
+get.rrvglm.se2 <- function(cov.unscaled, dispersion=1, coefficients) {
+
+ d8 <- dimnames(cov.unscaled)[[1]]
+ ans <- matrix(coefficients, length(coefficients), 3)
+ ans[,2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
+ ans[,3] <- ans[,1] / ans[,2]
+ dimnames(ans) <- list(d8, c("Value", "Std. Error", "t value"))
+ ans
+}
+
+
+
+num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
+ p2, Index.corner, Aimat, Bmat, Cimat,
+ h.step=0.0001, colx2.index,
+ xij=NULL, Structural.zero=NULL)
+{
+
+ nn <- nrow(x2mat)
+ if(nrow(Cimat)!=p2 || ncol(Cimat)!=r)
+ stop("Cimat wrong shape")
+
+ dct.da <- matrix(as.numeric(NA), (M-r-length(Structural.zero))*r, r*p2)
+
+ if((length(Index.corner) + length(Structural.zero)) == M)
+ stop("can't handle full rank models yet")
+ cbindex = (1:M)[-c(Index.corner, Structural.zero)]
+
+ ptr = 1
+ for(s in 1:r)
+ for(tt in cbindex) {
+ small.Blist = vector("list", p2)
+ pAmat = Aimat
+ pAmat[tt,s] = pAmat[tt,s] + h.step # Perturb it
+ for(ii in 1:p2)
+ small.Blist[[ii]] = pAmat
+
+ offset = if(length(fit at offset)) fit at offset else 0
+ if(all(offset==0)) offset = 0
+ neweta = x1mat %*% Bmat + x2mat %*% Cimat %*% t(pAmat)
+ fit at predictors = neweta
+
+
+ newmu <- fit at family@inverse(neweta, fit at extra)
+ fit at fitted.values = newmu
+
+ fred = weights(fit, type="w", deriv= TRUE, ignore.slot= TRUE)
+ if(!length(fred))
+ stop("can't get @weights and $deriv from object")
+ wz = fred$weights
+ deriv.mu <- fred$deriv
+
+ U <- vchol(wz, M=M, n=nn, silent= TRUE)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=nn)
+ newzmat <- neweta + vbacksub(U, tvfor, M=M, n=nn) - offset
+
+ newfit = vlm.wfit(x=x2mat, z=newzmat - x1mat %*% Bmat,
+ Blist=small.Blist, U = U,
+ matrix.out = FALSE, XBIG = FALSE,
+ rss = TRUE, qr = FALSE, x.ret = FALSE, offset = NULL,
+ xij=xij)
+ dct.da[ptr,] <- (newfit$coef - t(Cimat)) / h.step
+ ptr = ptr + 1
+ }
+
+ dct.da
+}
+
+
+
+
+dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
+ p2, Index.corner, Aimat, Bmat, Cimat,
+ xij=NULL,
+ Structural.zero=NULL)
+{
+
+
+ if(length(Structural.zero))
+ stop("can't handle Structural.zero in dctda.fast.only()")
+
+ nn <- nrow(x2mat)
+ if(nrow(Cimat)!=p2 || ncol(Cimat)!=r)
+ stop("Cimat wrong shape")
+
+ fred <- kronecker(matrix(1,1,r), x2mat)
+ fred <- kronecker(fred, matrix(1,M,1))
+ barney <- kronecker(Aimat, matrix(1,1,p2))
+ barney <- kronecker(matrix(1,nn,1), barney)
+
+ temp <- array(t(barney*fred), c(p2*r, M, nn))
+ temp <- aperm(temp, c(2,1,3)) # M by p2*r by nn
+ temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp <- m2adefault(temp, M=p2*r) # Note M != M here!
+ G <- solve(apply(temp,1:2,sum)) # p2*r by p2*r
+
+ dc.da <- array(NA, c(p2, r, M, r)) # different from other functions
+ if(length(Index.corner) == M)
+ stop("can't handle full rank models yet")
+ cbindex <- (1:M)[-Index.corner] # complement of Index.corner
+ resid2 <- if(length(x1mat))
+ mux22(t(wz), zmat - x1mat %*% Bmat, M=M, upper= FALSE, as.mat= TRUE) else
+ mux22(t(wz), zmat , M=M, upper= FALSE, as.mat= TRUE)
+
+ for(s in 1:r)
+ for(tt in cbindex) {
+ fred <- t(x2mat) * matrix(resid2[,tt], p2, nn, byrow= TRUE) # p2 * nn
+ temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
+ for(k in 1:r) {
+ Wiak <- mux22(t(wz), matrix(Aimat[,k], nn, M, byrow= TRUE),
+ M=M, upper= FALSE, as.mat= TRUE) # nn * M
+ wxx <- Wiak[,tt] * x2mat
+ blocki <- t(x2mat) %*% wxx
+ temp4a <- blocki %*% Cimat[,k]
+ if(k==1) {
+ temp4b <- blocki %*% Cimat[,s]
+ }
+ temp2 = temp2 - kronecker(ei(s,r), temp4a) -
+ kronecker(ei(k,r), temp4b)
+ }
+ dc.da[,,tt,s] <- G %*% temp2
+ }
+ ans1 <- dc.da[,,cbindex,,drop=FALSE] # p2 x r x (M-r) x r
+ ans1 <- aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
+
+ ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
+ ans1 <- t(ans1)
+ ans1
+}
+
+
+
+dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
+ intercept= TRUE, xij=NULL)
+{
+
+
+
+ nn <- nrow(xmat)
+
+ Aimat <- matrix(as.numeric(NA), M, r)
+ Aimat[Index.corner,] <- diag(r)
+ Aimat[-Index.corner,] <- theta # [-(1:M)]
+
+ if(intercept) {
+ Blist <- vector("list", pp+1)
+ Blist[[1]] <- diag(M)
+ for(i in 2:(pp+1))
+ Blist[[i]] <- Aimat
+ } else {
+ Blist <- vector("list", pp)
+ for(i in 1:(pp))
+ Blist[[i]] <- Aimat
+ }
+
+ coeffs <- vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
+ xij=xij)$mat.coef
+ c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
+
+
+ int.vec <- if(intercept) c3[,1] else 0 # \boldeta_0
+ Cimat <- if(intercept) t(c3[Index.corner,-1,drop=FALSE]) else
+ t(c3[Index.corner,,drop=FALSE])
+ if(nrow(Cimat)!=pp || ncol(Cimat)!=r)
+ stop("Cimat wrong shape")
+
+ fred <- kronecker(matrix(1,1,r), if(intercept) xmat[,-1,drop=FALSE] else xmat)
+ fred <- kronecker(fred, matrix(1,M,1))
+ barney <- kronecker(Aimat, matrix(1,1,pp))
+ barney <- kronecker(matrix(1,nn,1), barney)
+
+ temp <- array(t(barney*fred), c(r*pp,M,nn))
+ temp <- aperm(temp, c(2,1,3))
+ temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp <- m2adefault(temp, M=r*pp) # Note M != M here!
+ G <- solve(apply(temp,1:2,sum))
+
+ dc.da <- array(NA, c(pp,r,M,r)) # different from other functions
+ cbindex <- (1:M)[-Index.corner]
+ resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow= TRUE), M=M,
+ upper= FALSE, as.mat= TRUE) # mat= TRUE,
+
+ for(s in 1:r)
+ for(tt in cbindex) {
+ fred <- (if(intercept) t(xmat[,-1,drop=FALSE]) else
+ t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
+ temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
+
+ temp4 <- rep(0,pp)
+ for(k in 1:r) {
+ Wiak <- mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
+ M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ wxx <- Wiak[,tt] * (if(intercept) xmat[,-1,drop=FALSE] else xmat)
+ blocki <- (if(intercept) t(xmat[,-1,drop=FALSE]) else t(xmat)) %*% wxx
+ temp4 <- temp4 + blocki %*% Cimat[,k]
+ }
+ dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
+ }
+ ans1 <- dc.da[,,cbindex,,drop=FALSE] # pp x r x (M-r) x r
+ ans1 <- aperm(ans1, c(2,1,3,4)) # r x pp x (M-r) x r
+
+ ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow= TRUE)
+
+
+ detastar.da <- array(0,c(M,r,r,nn))
+ for(s in 1:r)
+ for(j in 1:r) {
+ t1 <- t(dc.da[,j,,s])
+ t1 <- matrix(t1, M, pp)
+ detastar.da[,j,s,] <- t1 %*% (if(intercept)
+ t(xmat[,-1,drop=FALSE]) else t(xmat))
+ }
+
+ etastar <- (if(intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
+ eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
+
+ sumWinv <- solve((m2adefault(t(apply(wz, 2, sum)), M=M))[,,1])
+
+ deta0.da <- array(0,c(M,M,r))
+ AtWi <- kronecker(matrix(1,nn,1), Aimat)
+ AtWi <- mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi <- array(t(AtWi), c(r,M,nn))
+ for(ss in 1:r) {
+ temp90 <- (m2adefault(t(apply(etastar[,ss]*wz,2,sum)), M=M))[,,1] #MxM
+ temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
+ temp93 <- mux7(temp92, AtWi)
+ temp91 <- apply(temp93, 1:2, sum) # M x M
+ deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
+ }
+ ans2 <- deta0.da[-(1:r),,,drop=FALSE] # (M-r) x M x r
+ ans2 <- aperm(ans2, c(1,3,2)) # (M-r) x r x M
+ ans2 <- matrix(c(ans2), (M-r)*r, M)
+
+ list(dc.da=ans1, dint.da=ans2)
+}
+
+
+
+rrr.deriv.rss <- function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner, intercept= TRUE,
+ xij=NULL)
+{
+
+ Amat <- matrix(as.numeric(NA), M, r)
+ Amat[Index.corner,] <- diag(r)
+ Amat[-Index.corner,] <- theta # [-(1:M)]
+
+ if(intercept) {
+ Blist <- vector("list", pp+1)
+ Blist[[1]] <- diag(M)
+ for(i in 2:(pp+1))
+ Blist[[i]] <- Amat
+ } else {
+ Blist <- vector("list", pp)
+ for(i in 1:(pp))
+ Blist[[i]] <- Amat
+ }
+
+ vlm.wfit(xmat, z, Blist, U=U, matrix.out= FALSE, rss= TRUE, xij=xij)$rss
+}
+
+
+
+
+rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner, intercept= TRUE)
+{
+
+
+
+
+ nn <- nrow(xmat)
+
+ Aimat <- matrix(as.numeric(NA), M, r)
+ Aimat[Index.corner,] <- diag(r)
+ Aimat[-Index.corner,] <- theta # [-(1:M)]
+
+ if(intercept) {
+ Blist <- vector("list", pp+1)
+ Blist[[1]] <- diag(M)
+ for(i in 2:(pp+1))
+ Blist[[i]] <- Aimat
+ } else {
+ Blist <- vector("list", pp)
+ for(i in 1:(pp))
+ Blist[[i]] <- Aimat
+ }
+
+ coeffs <- vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
+ xij=xij)$mat.coef
+ c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
+
+
+ int.vec <- if(intercept) c3[,1] else 0 # \boldeta_0
+ Cimat <- if(intercept) t(c3[Index.corner,-1,drop=FALSE]) else
+ t(c3[Index.corner,,drop=FALSE])
+ if(nrow(Cimat)!=pp || ncol(Cimat)!=r)
+ stop("Cimat wrong shape")
+
+ fred = kronecker(matrix(1,1,r), if(intercept) xmat[,-1,drop=FALSE] else xmat)
+ fred <- kronecker(fred, matrix(1,M,1))
+ barney <- kronecker(Aimat, matrix(1,1,pp))
+ barney <- kronecker(matrix(1,nn,1), barney)
+
+ temp <- array(t(barney*fred), c(r*pp,M,nn))
+ temp <- aperm(temp, c(2,1,3))
+ temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp <- m2adefault(temp, M=r*pp) # Note M != M here!
+ G <- solve(apply(temp,1:2,sum))
+
+ dc.da <- array(NA,c(pp,r,r,M))
+ cbindex <- (1:M)[-Index.corner]
+ resid2 <- mux22(t(wz), z - matrix(int.vec,nn,M,byrow= TRUE), M=M,
+ upper= FALSE, as.mat= TRUE) # mat= TRUE,
+
+ for(s in 1:r)
+ for(tt in cbindex) {
+ fred <- (if(intercept) t(xmat[,-1,drop=FALSE]) else
+ t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
+ temp2 <- kronecker(ei(s,r), apply(fred,1,sum))
+
+ temp4 <- rep(0,pp)
+ for(k in 1:r) {
+ Wiak <- mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
+ M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ wxx <- Wiak[,tt] * (if(intercept) xmat[,-1,drop=FALSE] else xmat)
+ blocki <- (if(intercept) t(xmat[,-1,drop=FALSE]) else t(xmat)) %*% wxx
+ temp4 <- temp4 + blocki %*% Cimat[,k]
+ }
+ dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
+ }
+
+ detastar.da <- array(0,c(M,r,r,nn))
+ for(s in 1:r)
+ for(j in 1:r) {
+ t1 <- t(dc.da[,j,s,])
+ t1 <- matrix(t1, M, pp)
+ detastar.da[,j,s,] <- t1 %*% (if(intercept)
+ t(xmat[,-1,drop=FALSE]) else t(xmat))
+ }
+
+ etastar <- (if(intercept) xmat[,-1,drop=FALSE] else xmat) %*% Cimat
+ eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
+
+ sumWinv <- solve((m2adefault(t(apply(wz, 2, sum)), M=M))[,,1])
+
+ deta0.da <- array(0,c(M,M,r))
+
+ AtWi <- kronecker(matrix(1,nn,1), Aimat)
+ AtWi <- mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi <- array(t(AtWi), c(r,M,nn))
+
+ for(ss in 1:r) {
+ temp90 <- (m2adefault(t(apply(etastar[,ss]*wz,2,sum)), M=M))[,,1] # M x M
+ temp92 <- array(detastar.da[,,ss,],c(M,r,nn))
+ temp93 <- mux7(temp92,AtWi)
+ temp91 <- apply(temp93,1:2,sum) # M x M
+ deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
+ }
+
+ ans <- matrix(0,M,r)
+ fred <- mux22(t(wz), z-eta, M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ fred.array <- array(t(fred %*% Aimat),c(r,1,nn))
+ for(s in 1:r) {
+ a1 <- apply(fred %*% t(deta0.da[,,s]),2,sum)
+ a2 <- apply(fred * etastar[,s],2,sum)
+ temp92 <- array(detastar.da[,,s,],c(M,r,nn))
+ temp93 <- mux7(temp92, fred.array)
+ a3 <- apply(temp93,1:2,sum)
+ ans[,s] <- a1 + a2 + a3
+ }
+
+ ans <- -2 * c(ans[cbindex,])
+
+ ans
+}
+
+
+
+
+vellipse = function(R, ratio=1, orientation=0, center=c(0,0), N=300) {
+ if(length(center) != 2) stop("center must be of length 2")
+ theta = 2*pi*(0:N)/N
+ x1 = R*cos(theta)
+ y1 = ratio*R*sin(theta)
+ x = center[1] + cos(orientation)*x1 - sin(orientation)*y1
+ y = center[2] + sin(orientation)*x1 + cos(orientation)*y1
+ cbind(x, y)
+}
+
+
+biplot.qrrvglm = function(x, ...) {
+ stop("biplot.qrrvglm has been replaced by the function lvplot.qrrvglm")
+}
+
+
+lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
+ add= FALSE, plot.it= TRUE, rug= TRUE, y = FALSE,
+ type=c("fitted.values", "predictors"),
+ xlab=paste("Latent Variable", if(Rank==1) "" else " 1", sep=""),
+ ylab=if(Rank==1) switch(type, predictors="Predictors",
+ fitted.values="Fitted values") else "Latent Variable 2",
+ pcex=par()$cex, pcol=par()$col, pch=par()$pch,
+ llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
+ label.arg= FALSE, adj.arg=-0.1,
+ ellipse=0.95, Absolute= FALSE,
+ elty=par()$lty, ecol=par()$col, elwd=par()$lwd, egrid=200,
+ chull.arg= FALSE, clty=2, ccol=par()$col, clwd=par()$lwd,
+ cpch = " ",
+ C = FALSE,
+ OriginC = c("origin","mean"),
+ Clty=par()$lty, Ccol=par()$col, Clwd=par()$lwd,
+ Ccex=par()$cex, Cadj.arg=-0.1, stretchC=1,
+ sites= FALSE, spch=NULL, scol=par()$col, scex=par()$cex,
+ sfont=par()$font,
+ check.ok = TRUE, ...)
+{
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("fitted.values", "predictors"))[1]
+
+ if(is.numeric(OriginC)) OriginC = rep(OriginC, len=2) else {
+ if(mode(OriginC) != "character" && mode(OriginC) != "name")
+ OriginC <- as.character(substitute(OriginC))
+ OriginC <- match.arg(OriginC, c("origin","mean"))[1]
+ }
+
+ if(length(ellipse) > 1) stop("ellipse must be of length 1 or 0")
+ if(is.logical(ellipse)) {ellipse = if(ellipse) 0.95 else NULL}
+
+ Rank <- object at control$Rank
+ if(Rank > 2)
+ stop("can only handle rank 1 or 2 models")
+ M = object at misc$M
+ NOS = ncol(object at y)
+ MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ n = object at misc$n
+ colx2.index = object at control$colx2.index
+ cx1i = object at control$colx1.index
+ if(check.ok)
+ if(!(length(cx1i)==1 && names(cx1i)=="(Intercept)"))
+ stop("latent variable plots allowable only for Norrr = ~ 1 models")
+
+ Coef.list = Coef(object, varlvI = varlvI, reference = reference)
+ if( C) Cmat = Coef.list at C
+ nustar = Coef.list at lv # n x Rank
+
+ if(!plot.it) return(nustar)
+
+ r.curves = slot(object, type) # n times M (\boldeta or \boldmu)
+ if(!add) {
+ if(Rank==1) {
+ matplot(nustar,
+ if( y && type=="fitted.values") object at y else r.curves,
+ type="n", xlab=xlab, ylab=ylab, ...)
+ } else { # Rank==2
+ matplot(c(Coef.list at Optimum[1,], nustar[,1]),
+ c(Coef.list at Optimum[2,], nustar[,2]),
+ type="n", xlab=xlab, ylab=ylab, ...)
+ }
+ }
+
+ if((length(pch) != 1 && length(pch) != ncol(r.curves)) ||
+ (length(pcol) != 1 && length(pcol) != ncol(r.curves)) ||
+ (length(pcex) != 1 && length(pcex) != ncol(r.curves)))
+ stop("pch, pcol and pcex must be of length 1 or ncol(r.curves)")
+
+ pch <- rep(pch, leng=ncol(r.curves))
+ pcol <- rep(pcol, leng=ncol(r.curves))
+ pcex <- rep(pcex, leng=ncol(r.curves))
+ llty <- rep(llty, leng=ncol(r.curves))
+ lcol <- rep(lcol, leng=ncol(r.curves))
+ llwd <- rep(llwd, leng=ncol(r.curves))
+ elty <- rep(elty, leng=ncol(r.curves))
+ ecol <- rep(ecol, leng=ncol(r.curves))
+ elwd <- rep(elwd, leng=ncol(r.curves))
+ adj.arg <- rep(adj.arg, leng=ncol(r.curves))
+ if( C ) {
+ Clwd <- rep(Clwd, leng=nrow(Cmat))
+ Clty <- rep(Clty, leng=nrow(Cmat))
+ Ccol <- rep(Ccol, leng=nrow(Cmat))
+ Cadj.arg <- rep(Cadj.arg, leng=nrow(Cmat))
+ Ccex <- rep(Ccex, leng=nrow(Cmat))
+ }
+
+ if(Rank==1) {
+ for(i in 1:ncol(r.curves)) {
+ xx = nustar
+ yy = r.curves[,i]
+ o = sort.list(xx)
+ xx = xx[o]
+ yy = yy[o]
+ lines(xx, yy, col=lcol[i], lwd=llwd[i], lty=llty[i])
+ if( y && type=="fitted.values") {
+ ypts = object at y
+ if(ncol(as.matrix(ypts)) == ncol(r.curves))
+ points(xx, ypts[o,i], col=pcol[i], cex=pcex[i], pch=pch[i])
+ }
+ }
+ if(rug) rug(xx)
+ } else {
+ for(i in 1:ncol(r.curves))
+ points(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
+ col=pcol[i], cex=pcex[i], pch=pch[i])
+ if(label.arg) {
+ for(i in 1:ncol(r.curves))
+ text(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
+ labels=(dimnames(Coef.list at Optimum)[[2]])[i],
+ adj=adj.arg[i], col=pcol[i], cex=pcex[i])
+ }
+ if(chull.arg) {
+ hull = chull(nustar[,1], nustar[,2])
+ hull = c(hull, hull[1])
+ lines(nustar[hull,1], nustar[hull,2], type="b", pch=cpch,
+ lty=clty, col=ccol, lwd=clwd)
+ }
+ if(length(ellipse)) {
+ ellipse.temp = if(ellipse > 0) ellipse else 0.95
+ if(ellipse < 0 && (!object at control$EqualTolerances || varlvI))
+ stop(paste("an equal-tolerances assumption and varlvI=FALSE",
+ "is needed for \"ellipse\" < 0"))
+ if( check.ok ) {
+ colx1.index = object at control$colx1.index
+ if(!(length(colx1.index)==1 &&
+ names(colx1.index)=="(Intercept)"))
+ stop("can only plot ellipses for intercept models only")
+ }
+ for(i in 1:ncol(r.curves)) {
+ cutpoint = object at family@link(if(Absolute) ellipse.temp
+ else Coef.list at Maximum[i] * ellipse.temp,
+ extra=object at extra)
+ if(MSratio > 1)
+ cutpoint = cutpoint[1,1]
+
+ cutpoint = object at family@link(Coef.list at Maximum[i],
+ extra=object at extra) - cutpoint
+ if(is.finite(cutpoint) && cutpoint > 0) {
+ Mmat = diag(rep(ifelse(object at control$Crow1positive, 1, -1),
+ len=Rank))
+ etoli = eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
+ A=ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
+ B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
+ if(ellipse < 0) A = B = -ellipse / 2
+
+ theta.angle = asin(etoli$vector[2,1]) *
+ ifelse(object at control$Crow1positive[2], 1, -1)
+ if(object at control$Crow1positive[1])
+ theta.angle = pi - theta.angle
+ if(all(is.finite(c(A,B))))
+ lines(vellipse(R=2*A, ratio=B/A, orient=theta.angle,
+ center=Coef.list at Optimum[,i], N=egrid),
+ lwd=elwd[i], col=ecol[i], lty=elty[i])
+ }
+ }
+ }
+
+ if( C ) {
+ if(is.character(OriginC) && OriginC=="mean")
+ OriginC = c(mean(nustar[,1]), mean(nustar[,2]))
+ if(is.character(OriginC) && OriginC=="origin")
+ OriginC = c(0,0)
+ for(i in 1:nrow(Cmat))
+ arrows(x0=OriginC[1], y0=OriginC[2],
+ x1=OriginC[1] + stretchC*Cmat[i,1],
+ y1=OriginC[2] + stretchC*Cmat[i,2],
+ lty=Clty[i], col=Ccol[i], lwd=Clwd[i])
+ if(label.arg) {
+ temp200 = dimnames(Cmat)[[1]]
+ for(i in 1:nrow(Cmat))
+ text(OriginC[1] + stretchC*Cmat[i,1],
+ OriginC[2] + stretchC*Cmat[i,2], col=Ccol[i],
+ labels=temp200[i], adj=Cadj.arg[i], cex=Ccex[i])
+ }
+ }
+ if(sites) {
+ text(nustar[,1], nustar[,2], adj=0.5,
+ labels=if(is.null(spch)) dimnames(nustar)[[1]] else
+ rep(spch, length=nrow(nustar)), col=scol, cex=scex, font=sfont)
+ }
+ }
+ invisible(nustar)
+}
+
+
+
+lvplot.rrvglm = function(object,
+ A=TRUE,
+ C=TRUE,
+ scores=FALSE, plot.it= TRUE,
+ groups=rep(1,n),
+ gapC=sqrt(sum(par()$cxy^2)), scaleA=1,
+ xlab="Latent Variable 1",
+ ylab="Latent Variable 2",
+ Alabels= if(length(object at misc$predictors.names))
+ object at misc$predictors.names else paste("LP", 1:M, sep=""),
+ Aadj=par()$adj,
+ Acex=par()$cex,
+ Acol=par()$col,
+ Apch=NULL,
+ Clabels=dimnames(Cmat)[[1]],
+ Cadj=par()$adj,
+ Ccex=par()$cex,
+ Ccol=par()$col,
+ Clty=par()$lty,
+ Clwd=par()$lwd,
+ chull.arg=FALSE,
+ ccex=par()$cex,
+ ccol=par()$col,
+ clty=par()$lty,
+ clwd=par()$lwd,
+ spch=NULL,
+ scex=par()$cex,
+ scol=par()$col,
+ slabels=dimnames(x2mat)[[1]],
+ ...)
+{
+
+
+ if(object at control$Rank != 2 && plot.it)
+ stop("can only handle rank-2 models")
+ M = object at misc$M
+ n = object at misc$n
+ colx1.index = object at control$colx1.index
+ colx2.index = object at control$colx2.index
+ p1 = length(colx1.index)
+ Coef.list = Coef(object)
+ Amat = Coef.list at A
+ Cmat = Coef.list at C
+
+ Amat = Amat * scaleA
+ dimnames(Amat) = list(object at misc$predictors.names, NULL)
+ Cmat = Cmat / scaleA
+
+ if(!length(object at x)) {
+ object at x = model.matrixvlm(object, type="lm")
+ }
+ x2mat = object at x[,colx2.index,drop=FALSE]
+ nuhat = x2mat %*% Cmat
+ if(!plot.it) return(as.matrix(nuhat))
+
+ index.nosz = 1:M # index of no structural zeros; zz
+ allmat = rbind(if(A) Amat else NULL,
+ if(C) Cmat else NULL,
+ if(scores) nuhat else NULL)
+
+ plot(allmat[,1], allmat[,2], type="n",
+ xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
+
+ if(A) {
+ Aadj = rep(Aadj, len=length(index.nosz))
+ Acex = rep(Acex, len=length(index.nosz))
+ Acol = rep(Acol, len=length(index.nosz))
+ if(length(Alabels) != M) stop(paste("Alabels must be of length", M))
+ if(length(Apch)) {
+ Apch = rep(Apch, len=length(index.nosz))
+ for(i in index.nosz)
+ points(Amat[i,1],Amat[i,2],pch=Apch[i],cex=Acex[i],col=Acol[i])
+ } else {
+ for(i in index.nosz)
+ text(Amat[i,1], Amat[i,2], Alabels[i], cex=Acex[i],
+ col=Acol[i], adj=Aadj[i])
+ }
+ }
+
+ if(C) {
+ p2 = nrow(Cmat)
+ gapC = rep(gapC, len=p2)
+ Cadj = rep(Cadj, len=p2)
+ Ccex = rep(Ccex, len=p2)
+ Ccol = rep(Ccol, len=p2)
+ Clwd = rep(Clwd, len=p2)
+ Clty = rep(Clty, len=p2)
+ if(length(Clabels) != p2)
+ stop(paste("length(Clabels) must be equal to", p2))
+ for(i in 1:p2) {
+ if(is.R()) arrows(0, 0, Cmat[i,1], Cmat[i,2],
+ lwd=Clwd[i], lty=Clty[i], col=Ccol[i]) else
+ arrows(0,0,Cmat[i,1],Cmat[i,2],open=TRUE,
+ lwd=Clwd[i], lty=Clty[i], col=Ccol[i])
+ const = 1 + gapC[i] / sqrt(Cmat[i,1]^2 + Cmat[i,2]^2)
+ text(const*Cmat[i,1], const*Cmat[i,2], Clabels[i], cex=Ccex[i],
+ adj=Cadj[i], col=Ccol[i])
+ }
+ }
+
+ if(scores) {
+ ugrp = unique(groups)
+ nlev = length(ugrp) # number of groups
+ clty = rep(clty, len=nlev)
+ clwd = rep(clwd, len=nlev)
+ ccol = rep(ccol, len=nlev)
+ if(length(spch))
+ spch = rep(spch, len=n)
+ scol = rep(scol, len=n)
+ scex = rep(scex, len=n)
+ for(i in ugrp) {
+ gp = groups==i
+ if(nlev > 1 && (length(unique(spch[gp])) != 1 ||
+ length(unique(scol[gp])) != 1 ||
+ length(unique(scex[gp])) != 1))
+ warning(paste("spch/scol/scex is different for individuals",
+ "from the same group"))
+
+ temp = nuhat[gp,,drop=FALSE]
+ if(length(spch)) {
+ points(temp[,1], temp[,2], cex=scex[gp], pch=spch[gp],
+ col=scol[gp])
+ } else {
+ text(temp[,1], temp[,2], label=slabels, cex=scex[gp],
+ col=scol[gp])
+ }
+ if(chull.arg) {
+ hull = chull(temp[,1],temp[,2])
+ hull = c(hull, hull[1])
+ lines(temp[hull,1], temp[hull,2], type="b", lty=clty[i],
+ col=ccol[i], lwd=clwd[i], pch=" ")
+ }
+ }
+ }
+
+ invisible(nuhat)
+}
+
+
+
+
+
+
+Coef.rrvglm <- function(object, ...) {
+ M <- object at misc$M
+ n <- object at misc$n
+ colx1.index = object at control$colx1.index
+ colx2.index = object at control$colx2.index
+ p1 = length(colx1.index)
+ Amat <- object at constraints[[colx2.index[1]]]
+ B1mat <- if(p1) coef(object, mat= TRUE)[colx1.index,,drop=FALSE] else NULL
+ C.try <- coef(object, mat= TRUE)[colx2.index,,drop=FALSE]
+ Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
+
+
+ Rank = object at control$Rank
+ lv.names = if(Rank>1) paste("lv", 1:Rank, sep="") else "lv"
+ dimnames(Amat) = list(object at misc$predictors.names, lv.names)
+ dimnames(Cmat) = list(dimnames(Cmat)[[1]], lv.names)
+
+ ans = new(Class="Coef.rrvglm",
+ A = Amat,
+ B1 = B1mat,
+ C = Cmat,
+ Rank = Rank,
+ colx1.index = colx1.index,
+ colx2.index = colx2.index)
+ if(object at control$Corner)
+ ans at Atilde = Amat[-c(object at control$Index.corner,
+ object at control$Structural.zero),,drop=FALSE]
+ ans
+}
+
+setMethod("Coef", "rrvglm", function(object, ...) Coef.rrvglm(object, ...))
+
+printCoef.rrvglm = function(x, ...) {
+
+ object = x
+
+ cat("\nA matrix:\n")
+ print(object at A, ...)
+ cat("\n")
+
+ cat("\nC matrix:\n")
+ print(object at C, ...)
+ cat("\n")
+
+ cat("\nB1 matrix:\n")
+ print(object at B1, ...)
+ cat("\n")
+
+ invisible(object)
+}
+
+
+if(is.R()) {
+ if(!isGeneric("biplot"))
+ setGeneric("biplot", function(x, ...) standardGeneric("biplot"))
+}
+
+
+setMethod("Coef", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
+
+
+
+setMethod("biplot", "qrrvglm",
+ function(x, ...) {
+ biplot.qrrvglm(x, ...)})
+
+setMethod("lvplot", "qrrvglm",
+ function(object, ...) {
+ invisible(lvplot.qrrvglm(object, ...))})
+
+setMethod("lvplot", "rrvglm",
+ function(object, ...) {
+ invisible(lvplot.rrvglm(object, ...))})
+
+
+biplot.rrvglm = function(x, ...)
+ lvplot(object=x, ...)
+
+setMethod("biplot", "rrvglm", function(x, ...)
+ invisible(biplot.rrvglm(x, ...)))
+
+
+
+
+summary.qrrvglm = function(object,
+ varlvI = FALSE, reference = NULL, ...) {
+ answer = object
+ answer at post$Coef = Coef(object, varlvI = varlvI, reference = reference,
+ ...) # Store it here; non-elegant
+
+ if(length((answer at post$Coef)@dispersion) &&
+ length(object at misc$estimated.dispersion) &&
+ object at misc$estimated.dispersion)
+ answer at dispersion =
+ answer at misc$dispersion = (answer at post$Coef)@dispersion
+
+ class(answer) = "summary.qrrvglm"
+ answer
+}
+
+printsummary.qrrvglm = function(x, ...) {
+
+
+
+ cat("\nCall:\n")
+ dput(x at call)
+
+ print(x at post$Coef, ...) # non-elegant programming
+
+ if(length(x at dispersion) > 1) {
+ cat("\nDispersion parameters:\n")
+ if(length(x at misc$ynames)) {
+ names(x at dispersion) = x at misc$ynames
+ print(x at dispersion, ...)
+ } else
+ cat(x at dispersion, fill=TRUE)
+ cat("\n")
+ } else if(length(x at dispersion) == 1) {
+ cat("\nDispersion parameter: ", x at dispersion, "\n")
+ }
+
+}
+
+setClass("summary.qrrvglm", representation("qrrvglm"))
+
+setMethod("summary", "qrrvglm",
+ function(object, ...)
+ summary.qrrvglm(object, ...))
+
+setMethod("print", "summary.qrrvglm",
+ function(x, ...)
+ invisible(printsummary.qrrvglm(x, ...)))
+
+setMethod("show", "summary.qrrvglm",
+ function(object)
+ invisible(printsummary.qrrvglm(object)))
+
+setMethod("print", "Coef.rrvglm", function(x, ...)
+ invisible(printCoef.rrvglm(x, ...)))
+
+setMethod("show", "Coef.rrvglm", function(object)
+ invisible(printCoef.rrvglm(object)))
+
+
+
+
+
+grc = function(y, Rank=1, Index.corner=2:(1+Rank), Structural.zero=1,
+ summary.arg= FALSE, h.step=0.0001, ...) {
+
+
+
+ myrrcontrol = rrvglm.control(Rank=Rank, Index.corner=Index.corner,
+ Structural.zero = Structural.zero, ...)
+ object.save = y
+ if(is(y, "rrvglm")) {
+ y = object.save at y
+ } else {
+ y = as.matrix(y)
+ class(y) = "matrix" # Needed in R
+ }
+ if(length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
+ stop("y must be a matrix with >= 3 rows & columns, or a rrvglm() object")
+
+ ei = function(i, n) diag(n)[,i,drop=FALSE]
+ .grc.df = data.frame(Row2 = ei(2, nrow(y)))
+
+ yn1 = if(length(dimnames(y)[[1]])) dimnames(y)[[1]] else
+ paste("x2", 1:nrow(y), sep="")
+ warn.save = options()$warn
+ options(warn=-3) # Suppress the warnings (hopefully, temporarily)
+ if(any(!is.na(as.numeric(substring(yn1, 1, 1)))))
+ yn1 = paste("x2", 1:nrow(y), sep="")
+ options(warn=warn.save)
+
+ Row = factor(1:nrow(y))
+ modmat.row = if(is.R()) model.matrix(~ Row) else {
+ tmp3 = contrasts(Row)
+ dimnames(tmp3) = list(dimnames(tmp3)[[1]], paste("Row", 2:nrow(y), sep=""))
+ cbind("(Intercept)"=1, tmp3)
+ }
+ Col = factor(1:ncol(y))
+ modmat.col = if(is.R()) model.matrix(~ Col) else {
+ tmp3 = contrasts(Col)
+ dimnames(tmp3) = list(dimnames(tmp3)[[1]], paste("Col", 2:ncol(y), sep=""))
+ cbind("(Intercept)"=1, tmp3)
+ }
+
+ cms = list("(Intercept)" = matrix(1, ncol(y), 1))
+ for(i in 2:nrow(y)) {
+ cms[[paste("Row", i, sep="")]] = matrix(1, ncol(y), 1)
+ .grc.df[[paste("Row", i, sep="")]] = modmat.row[,i]
+ }
+ for(i in 2:ncol(y)) {
+ cms[[paste("Col", i, sep="")]] = modmat.col[,i,drop=FALSE]
+ .grc.df[[paste("Col", i, sep="")]] = rep(1, nrow(y))
+ }
+ for(i in 2:nrow(y)) {
+ cms[[yn1[i]]] = diag(ncol(y))
+ .grc.df[[yn1[i]]] = ei(i, nrow(y))
+ }
+
+ dimnames(.grc.df) = list(if(length(dimnames(y)[[1]])) dimnames(y)[[1]] else
+ as.character(1:nrow(y)),
+ dimnames(.grc.df)[[2]])
+
+ str1 = "~ Row2"
+ if(nrow(y)>2)
+ for(i in 3:nrow(y))
+ str1 = paste(str1, paste("Row", i, sep=""), sep=" + ")
+ for(i in 2:ncol(y))
+ str1 = paste(str1, paste("Col", i, sep=""), sep=" + ")
+ str2 = paste("y ", str1)
+ for(i in 2:nrow(y))
+ str2 = paste(str2, yn1[i], sep=" + ")
+ myrrcontrol$Norrr = as.formula(str1) # Overwrite this
+
+ if(is.R()) assign(".grc.df", .grc.df, envir = VGAMenv) else
+ .grc.df <<- .grc.df
+
+ warn.save = options()$warn
+ options(warn=-3) # Suppress the warnings (hopefully, temporarily)
+ answer = if(is(object.save, "rrvglm")) object.save else
+ rrvglm(as.formula(str2), fam=poissonff,
+ constraints=cms, control=myrrcontrol, data=.grc.df)
+ options(warn=warn.save)
+
+ if(summary.arg) {
+ class(answer) = "rrvglm"
+ answer = summary.rrvglm(answer, h.step=h.step)
+ } else {
+ class(answer) = "grc"
+ }
+
+ if(is.R()) {
+ if(exists(".grc.df", envir = VGAMenv))
+ rm(".grc.df", envir = VGAMenv)
+ } else {
+ remove(".grc.df")
+ }
+
+ answer
+}
+
+summary.grc = function(object, ...) {
+ grc(object, summary.arg= TRUE, ...)
+}
+
+
+
+
+
+trplot.qrrvglm = function(object,
+ whichSpecies=NULL,
+ add=FALSE, plot.it=TRUE,
+ label.sites=FALSE,
+ sitenames = dimnames(object at y)[[1]],
+ axes.equal = TRUE,
+ cex=par()$cex,
+ col=1:(nos*(nos-1)/2),
+ log="",
+ lty = rep(par()$lty, len=nos*(nos-1)/2),
+ lwd = rep(par()$lwd, len=nos*(nos-1)/2),
+ tcol= rep(par()$col, len=nos*(nos-1)/2),
+ xlab = NULL, ylab = NULL,
+ main="", # "Trajectory plot",
+ type="b", check.ok=TRUE, ...) {
+ coef.obj = Coef(object) # use defaults for those two arguments
+ if(coef.obj at Rank != 1) stop("object must be a rank-1 model")
+ fv = fitted(object)
+ modelno = object at control$modelno # 1,2,3, or 0
+ NOS = ncol(fv) # Number of species
+ M = object at misc$M #
+ nn = nrow(fv) # Number of sites
+ if(length(sitenames))
+ sitenames = rep(sitenames, len=nn)
+ sppNames = dimnames(object at y)[[2]]
+ if(!length(whichSpecies)) {
+ whichSpecies = sppNames[1:NOS]
+ whichSpecies.numer = 1:NOS
+ } else
+ if(is.numeric(whichSpecies)) {
+ whichSpecies.numer = whichSpecies
+ whichSpecies = sppNames[whichSpecies.numer] # Convert to character
+ } else
+ whichSpecies.numer = match(whichSpecies, sppNames)
+ nos = length(whichSpecies) # nos = number of species to be plotted
+
+ if(length(whichSpecies.numer) <= 1)
+ stop("must have at least 2 species to be plotted")
+ cx1i = object at control$colx1.index
+ if(check.ok)
+ if(!(length(cx1i)==1 && names(cx1i)=="(Intercept)"))
+ stop("trajectory plots allowable only for Norrr = ~ 1 models")
+
+ first.spp = iam(1,1,M=M,both=TRUE,diag=FALSE)$row.index
+ second.spp = iam(1,1,M=M,both=TRUE,diag=FALSE)$col.index
+ myxlab = if(length(whichSpecies.numer)==2) {
+ paste("Fitted value for",
+ if(is.character(whichSpecies.numer)) whichSpecies.numer[1] else
+ sppNames[whichSpecies.numer[1]])
+ } else "Fitted value for 'first' species"
+ myxlab = if(length(xlab)) xlab else myxlab
+ myylab = if(length(whichSpecies.numer)==2) {
+ paste("Fitted value for",
+ if(is.character(whichSpecies.numer)) whichSpecies.numer[2] else
+ sppNames[whichSpecies.numer[2]])
+ } else "Fitted value for 'second' species"
+ myylab = if(length(ylab)) ylab else myylab
+ if(!add) {
+ xxx = if(axes.equal) fv[,whichSpecies.numer] else
+ fv[,whichSpecies.numer[first.spp]]
+ yyy = if(axes.equal) fv[,whichSpecies.numer] else
+ fv[,whichSpecies.numer[second.spp]]
+ matplot(xxx, yyy, type="n", log=log, xlab=myxlab,
+ ylab=myylab, main=main, ...)
+ }
+
+ lwd = rep(lwd, len=nos*(nos-1)/2)
+ col = rep(col, len=nos*(nos-1)/2)
+ lty = rep(lty, len=nos*(nos-1)/2)
+ tcol = rep(tcol, len=nos*(nos-1)/2)
+
+ oo = order(coef.obj at lv) # Sort by the latent variable
+ ii = 0
+ col = rep(col, length=nos*(nos-1)/2)
+ species.names = NULL
+ if(plot.it)
+ for(i1 in seq(whichSpecies.numer)) {
+ for(i2 in seq(whichSpecies.numer))
+ if(i1 < i2) {
+ ii = ii + 1
+ species.names = rbind(species.names,
+ cbind(sppNames[i1], sppNames[i2]))
+ matplot(fv[oo,whichSpecies.numer[i1]],
+ fv[oo,whichSpecies.numer[i2]],
+ type=type, add=TRUE,
+ lty=lty[ii], lwd=lwd[ii], col=col[ii],
+ pch = if(label.sites) " " else "*" )
+ if(label.sites && length(sitenames))
+ text(fv[oo,whichSpecies.numer[i1]],
+ fv[oo,whichSpecies.numer[i2]],
+ labels=sitenames[oo], cex=cex, col=tcol[ii])
+ }
+ }
+ invisible(list(species.names=species.names,
+ sitenames=sitenames[oo]))
+}
+
+if(!isGeneric("trplot"))
+ setGeneric("trplot", function(object, ...) standardGeneric("trplot"))
+setMethod("trplot", "qrrvglm", function(object, ...) trplot.qrrvglm(object, ...))
+
+
+
+
+vcovrrvglm = function(object, ...) {
+ summary.rrvglm(object, ...)@cov.unscaled
+}
+
+
+
+vcovqrrvglm = function(object,
+ ITolerances = object at control$EqualTolerances,
+ MaxScale = c("predictors", "response"),
+ dispersion = rep(if(length(sobj at dispersion)) sobj at dispersion else 1,
+ len=M), ...) {
+ stop("this function is not yet completed")
+
+ if(mode(MaxScale) != "character" && mode(link) != "name")
+ MaxScale <- as.character(substitute(MaxScale))
+ MaxScale <- match.arg(MaxScale, c("predictors", "response"))[1]
+ if(MaxScale != "predictors")
+ stop("can currently only handle MaxScale=\"predictors\"")
+
+ sobj = summary(object)
+ cobj = Coef(object, ITolerances = ITolerances, ...)
+ M = nrow(cobj at A)
+ dispersion = rep(dispersion, len=M)
+ if(cobj at Rank != 1)
+ stop("object must be a rank 1 model")
+
+ dvecMax = cbind(1, -0.5 * cobj at A / c(cobj at D), (cobj at A / c(2*cobj at D))^2)
+ dvecTol = cbind(0, 0, 1 / c(-2 * cobj at D)^1.5)
+ dvecOpt = cbind(0, -0.5 / c(cobj at D), 0.5 * cobj at A / c(cobj at D^2))
+
+ if((length(object at control$colx1.index) != 1) ||
+ (names(object at control$colx1.index) != "(Intercept)"))
+ stop("Can only handle Norrr=~1 models")
+ okvals=c(3*M,2*M+1) # Tries to correspond to EqualTol==c(FALSE,TRUE) resp.
+ if(all(length(coef(object)) != okvals))
+ stop("Can only handle intercepts-only model with EqualTolerances=FALSE")
+
+ answer = NULL
+ Cov.unscaled = array(NA, c(3,3,M), dimnames=list(
+ c("(Intercept)", "lv", "lv^2"),
+ c("(Intercept)", "lv", "lv^2"), dimnames(cobj at D)[[3]]))
+ for(spp in 1:M) {
+ index = c(M+ifelse(object at control$EqualTolerances, 1, M) + spp,
+ spp,
+ M+ifelse(object at control$EqualTolerances, 1, spp))
+ vcov = Cov.unscaled[,,spp] =
+ sobj at cov.unscaled[index,index] # Order is A, D, B1
+ se2Max = dvecMax[spp,,drop=FALSE] %*% vcov %*% cbind(dvecMax[spp,])
+ se2Tol = dvecTol[spp,,drop=FALSE] %*% vcov %*% cbind(dvecTol[spp,])
+ se2Opt = dvecOpt[spp,,drop=FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
+ answer = rbind(answer, dispersion[spp]^0.5 *
+ c(se2Opt=se2Opt, se2Tol=se2Tol, se2Max=se2Max))
+ }
+
+ link.function = if(MaxScale=="predictors")
+ remove.arg(object at misc$predictors.names[1]) else ""
+ dimnames(answer) = list(dimnames(cobj at D)[[3]], c("Optimum", "Tolerance",
+ if(nchar(link.function)) paste(link.function,"(Maximum)",sep="") else
+ "Maximum"))
+ NAthere = is.na(answer %*% rep(1, len=3))
+ answer[NAthere,] = NA # NA in tolerance means NA everywhere else
+ new("vcov.qrrvglm",
+ Cov.unscaled=Cov.unscaled,
+ dispersion=dispersion,
+ se=sqrt(answer))
+}
+
+
+setMethod("vcov", "rrvglm", function(object, ...)
+ vcovrrvglm(object, ...))
+
+setMethod("vcov", "qrrvglm", function(object, ...)
+ vcovqrrvglm(object, ...))
+
+setClass("vcov.qrrvglm", representation(
+ Cov.unscaled="array", # permuted cov.unscaled
+ dispersion="numeric",
+ se="matrix"))
+
+
+
+model.matrix.qrrvglm <- function(object, type=c("lv", "vlm"), ...) {
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type = as.character(substitute(type))
+ type = match.arg(type, c("lv","vlm"))[1]
+
+ switch(type, lv=Coef(object, ...)@lv, vlm=object at x)
+}
+
+setMethod("model.matrix", "qrrvglm", function(object, ...)
+ model.matrix.qrrvglm(object, ...))
+
+
+
+
+
+
+
+persp.qrrvglm = function(x, varlvI = FALSE, reference = NULL,
+ plot.it=TRUE,
+ xlim=NULL, ylim=NULL, zlim=NULL, # zlim ignored if Rank==1
+ gridlength=if(Rank==1) 301 else c(51,51),
+ whichSpecies = NULL,
+ xlab = if(Rank==1) "Latent Variable" else "Latent Variable 1",
+ ylab = if(Rank==1) "Expected Value" else "Latent Variable 2",
+ zlab="Expected value",
+ labelSpecies = FALSE, # For Rank==1 only
+ stretch = 1.05, # quick and dirty, Rank==1 only
+ main="",
+ ticktype = "detailed",
+ col = if(Rank==1) par()$col else "white",
+ add1 = FALSE,
+ ...) {
+ oylim = ylim
+ object = x # don't like x as the primary argument
+ coef.obj = Coef(object, varlvI = varlvI, reference = reference)
+ if((Rank <- coef.obj at Rank) > 2)
+ stop("object must be a rank-1 or rank-2 model")
+ fv = fitted(object)
+ NOS = ncol(fv) # Number of species
+ M = object at misc$M #
+
+ xlim = rep(if(length(xlim)) xlim else range(coef.obj at lv[,1]), length=2)
+ if(!length(oylim)) {
+ ylim = if(Rank==1) c(0, max(fv)*stretch) else
+ rep(range(coef.obj at lv[,2]), length=2)
+ }
+ gridlength = rep(gridlength, length=Rank)
+ lv1 = seq(xlim[1], xlim[2], length=gridlength[1])
+ if(Rank==1) {
+ m = cbind(lv1)
+ } else {
+ lv2 = seq(ylim[1], ylim[2], length=gridlength[2])
+ m = expand.grid(lv1,lv2)
+ }
+
+ if(dim(coef.obj at B1)[1] != 1 || dimnames(coef.obj at B1)[[1]] != "(Intercept)")
+ stop("Norrr = ~ 1 is needed")
+ LP = coef.obj at A %*% t(cbind(m)) # M by n
+ LP = LP + c(coef.obj at B1) # Assumes \bix_1 = 1 (intercept only)
+
+ mm = as.matrix(m)
+ N = ncol(LP)
+ for(j in 1:M) {
+ for(i in 1:N) {
+ LP[j,i] = LP[j,i] + mm[i,,drop=FALSE] %*% coef.obj at D[,,j] %*%
+ t(mm[i,,drop=FALSE])
+ }
+ }
+ LP = t(LP) # n by M
+
+
+ fitvals = object at family@inverse(LP) # n by NOS
+ dimnames(fitvals) = list(NULL, dimnames(fv)[[2]])
+ sppNames = dimnames(object at y)[[2]]
+ if(!length(whichSpecies)) {
+ whichSpecies = sppNames[1:NOS]
+ whichSpecies.numer = 1:NOS
+ } else
+ if(is.numeric(whichSpecies)) {
+ whichSpecies.numer = whichSpecies
+ whichSpecies = sppNames[whichSpecies.numer] # Convert to character
+ } else
+ whichSpecies.numer = match(whichSpecies, sppNames)
+ if(Rank==1) {
+ if(plot.it) {
+ if(!length(oylim))
+ ylim = c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
+ col = rep(col, len=length(whichSpecies.numer))
+ if(!add1)
+ matplot(lv1, fitvals, xlab=xlab, ylab=ylab, type="n",
+ main=main, xlim=xlim, ylim=ylim, ...)
+ for(j in 1:length(whichSpecies.numer)) {
+ ptr2 = whichSpecies.numer[j] # points to species column
+ lines(lv1, fitvals[,ptr2], col=col[j], ...)
+ if(labelSpecies) {
+ ptr1=(1:nrow(fitvals))[max(fitvals[,ptr2])==fitvals[,ptr2]]
+ ptr1 = ptr1[1]
+ text(lv1[ptr1], fitvals[ptr1,ptr2]+
+ (stretch-1)*diff(range(ylim)),
+ label=sppNames[j], col=col[j], ...)
+ }
+ }
+ }
+ } else {
+ maxfitted = matrix(fitvals[,whichSpecies[1]], length(lv1), length(lv2))
+ if(length(whichSpecies) > 1)
+ for(j in whichSpecies[-1]) {
+ maxfitted = pmax(maxfitted, matrix(fitvals[,j],
+ length(lv1), length(lv2)))
+ }
+ if(!length(zlim))
+ zlim = range(maxfitted, na.rm = TRUE)
+
+ if(plot.it)
+ graphics:::persp.default(lv1, lv2, maxfitted,
+ zlim=zlim,
+ xlab=xlab, ylab=ylab, zlab=zlab,
+ ticktype = ticktype, col = col, main=main, ...)
+ }
+
+ invisible(list(fitted=fitvals,
+ lv1grid=lv1,
+ lv2grid=if(Rank==2) lv2 else NULL,
+ maxfitted=if(Rank==2) maxfitted else NULL))
+}
+
+if(!isGeneric("persp"))
+ setGeneric("persp", function(x, ...) standardGeneric("persp"))
+setMethod("persp", "qrrvglm", function(x, ...) persp.qrrvglm(x=x, ...))
+
+
+
+
+ccoef.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@C
+}
+
+ccoef.Coef.qrrvglm = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ object at C
+}
+
+lv.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@lv
+}
+
+lv.rrvglm = function(object, ...) {
+ ans = lvplot(object, plot.it=FALSE)
+ if(ncol(ans) == 1) dimnames(ans) = list(dimnames(ans)[[1]], "lv")
+ ans
+}
+
+lv.Coef.qrrvglm = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ object at lv
+}
+
+Max.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Maximum
+}
+
+Max.Coef.qrrvglm = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ if(any(slotNames(object) == "Maximum")) object at Maximum else
+ Max(object, ...)
+}
+
+Opt.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Optimum
+}
+
+Opt.Coef.qrrvglm = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ Coef(object, ...)@Optimum
+}
+
+Tol.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Tolerance
+}
+
+Tol.Coef.qrrvglm = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ if(any(slotNames(object) == "Tolerance")) object at Tolerance else
+ Tol(object, ...)
+}
+
+
+if(!isGeneric("ccoef"))
+ setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+setMethod("ccoef", "rrvglm", function(object, ...) ccoef.qrrvglm(object, ...))
+setMethod("ccoef", "qrrvglm", function(object, ...) ccoef.qrrvglm(object, ...))
+setMethod("ccoef", "Coef.rrvglm", function(object, ...) ccoef.Coef.qrrvglm(object, ...))
+setMethod("ccoef", "Coef.qrrvglm", function(object, ...) ccoef.Coef.qrrvglm(object, ...))
+
+setMethod("coef", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
+setMethod("coefficients", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
+
+if(!isGeneric("lv"))
+ setGeneric("lv", function(object, ...) standardGeneric("lv"))
+setMethod("lv", "rrvglm", function(object, ...) lv.rrvglm(object, ...))
+setMethod("lv", "qrrvglm", function(object, ...) lv.qrrvglm(object, ...))
+setMethod("lv", "Coef.rrvglm", function(object, ...) lv.Coef.qrrvglm(object, ...))
+setMethod("lv", "Coef.qrrvglm", function(object, ...) lv.Coef.qrrvglm(object, ...))
+
+if(!isGeneric("Max"))
+ setGeneric("Max", function(object, ...) standardGeneric("Max"))
+setMethod("Max", "qrrvglm", function(object, ...) Max.qrrvglm(object, ...))
+setMethod("Max", "Coef.qrrvglm", function(object, ...) Max.Coef.qrrvglm(object, ...))
+
+if(!isGeneric("Opt"))
+ setGeneric("Opt", function(object, ...) standardGeneric("Opt"))
+setMethod("Opt", "qrrvglm", function(object, ...) Opt.qrrvglm(object, ...))
+setMethod("Opt", "Coef.qrrvglm", function(object, ...) Opt.Coef.qrrvglm(object, ...))
+
+if(!isGeneric("Tol"))
+ setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
+setMethod("Tol", "qrrvglm", function(object, ...) Tol.qrrvglm(object, ...))
+setMethod("Tol", "Coef.qrrvglm", function(object, ...) Tol.Coef.qrrvglm(object, ...))
+
+
+
+cgo <- function(...) {
+ stop("The function \"cgo\" has been renamed \"cqo\". Ouch! Sorry!")
+}
+
+clo <- function(...) {
+ stop("Constrained linear ordination is fitted with the function \"rrvglm\"")
+}
+
+
+
+
+is.bell.vlm <-
+is.bell.rrvglm <- function(object, ...) {
+ M = object at misc$M
+ ynames = object at misc$ynames
+ ans = rep(FALSE, len=M)
+ if(length(ynames)) names(ans) = ynames
+ ans
+}
+
+is.bell.uqo <-
+is.bell.qrrvglm <- function(object, ...) {
+ is.finite(Max(object, ...))
+}
+
+is.bell.cao <- function(object, ...) {
+ NA * Max(object, ...)
+}
+
+if(!isGeneric("is.bell"))
+ setGeneric("is.bell", function(object, ...) standardGeneric("is.bell"))
+setMethod("is.bell","uqo", function(object, ...) is.bell.uqo(object, ...))
+setMethod("is.bell","qrrvglm", function(object,...) is.bell.qrrvglm(object,...))
+setMethod("is.bell","rrvglm", function(object, ...) is.bell.rrvglm(object, ...))
+setMethod("is.bell","vlm", function(object, ...) is.bell.vlm(object, ...))
+setMethod("is.bell","cao", function(object, ...) is.bell.cao(object, ...))
+setMethod("is.bell","Coef.qrrvglm", function(object,...) is.bell.qrrvglm(object,...))
+
+
+
+
+
diff --git a/R/family.survival.q b/R/family.survival.q
new file mode 100644
index 0000000..8e8c3c3
--- /dev/null
+++ b/R/family.survival.q
@@ -0,0 +1,284 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+dcnormal1 = function(r1=0, r2=0, link.sd="loge",
+ isd=NULL, zero=NULL)
+{
+ if(!is.Numeric(r1, allow=1, integ=TRUE) || r1<0) stop("bad input for r1")
+ if(!is.Numeric(r2, allow=1, integ=TRUE) || r2<0) stop("bad input for r2")
+ if(mode(link.sd) != "character" && mode(link.sd) != "name")
+ link.sd = as.character(substitute(link.sd))
+
+ new("vglmff",
+ blurb=c("Univariate Normal distribution with double censoring\n\n",
+ "Links: ",
+ "mean; ", namesof("sd", link.sd, tag= TRUE),
+ "\n",
+ "Variance: sd^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }) , list( .zero=zero))),
+ initialize=eval(substitute(expression({
+ predictors.names = c("mean", namesof("sd", .link.sd, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+ if(length(w) != n || !is.Numeric(w, integ=TRUE, posit=TRUE))
+ stop(paste("the argument \"weights\" must be a vector",
+ "of positive integers"))
+ sumw = sum(w)
+ extra$bign = sumw + .r1 + .r2 # Tot num of censored & uncensored obsns
+ if(!length(etastart)) {
+ sd.y.est = if(length(.isd)) rep(.isd, len=n) else {
+ junk = if(is.R()) lm.wfit(x=x, y=y, w=w) else
+ lm.wfit(x=x, y=y, w=w, method="qr")
+ 1.25 * sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ }
+ etastart = cbind(mu=y, theta2eta(sd.y.est, .link.sd))
+ }
+ }) , list( .link.sd=link.sd, .r1=r1, .r2=r2, .isd=isd))),
+ inverse=function(eta, extra=NULL) eta[,1],
+ last=eval(substitute(expression({
+ misc$link = c(mu="identity", sd= .link.sd)
+ misc$expected = TRUE
+ misc$r1 = .r1
+ misc$r2 = .r2
+ }) , list( .link.sd=link.sd, .r1=r1, .r2=r2))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sd = eta2theta(eta[,2], .link.sd)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(sd) - 0.5 * ((y - mu)/sd)^2)) +
+ (if(.r1==0) 0 else {z1=min((y-mu)/sd); Fz1=pnorm(z1); .r1*log(Fz1)}) +
+ (if(.r2==0) 0 else {z2=max((y-mu)/sd); Fz2=pnorm(z2); .r2*log(1-Fz2)})
+ } , list( .link.sd=link.sd, .r1=r1, .r2=r2))),
+ vfamily=c("dcnormal1"),
+ deriv=eval(substitute(expression({
+ sd = eta2theta(eta[,2], .link.sd)
+ q1 = .r1 / extra$bign
+ q2 = .r2 / extra$bign
+ pee = 1 - q1 - q2 # 1 if r1==r2==0
+ z1 = if(.r1 == 0) -100 else min((y - mu) / sd) # 100==Inf
+ z2 = if(.r2 == 0) +100 else max((y - mu) / sd) # 100==Inf
+ fz1 = if(.r1 == 0) 0 else dnorm(z1)
+ fz2 = if(.r2 == 0) 0 else dnorm(z2)
+ Fz1 = if(.r1 == 0) 0.02 else pnorm(z1) # 0/0 undefined
+ Fz2 = if(.r2 == 0) 0.99 else pnorm(z2)
+ dl.dmu = (y-mu) / sd^2 +
+ ((- .r1 * fz1/Fz1 + .r2 * fz2/(1-Fz2)) / sd) / (n*w)
+ dl.dsd = -1/sd + (y-mu)^2 / sd^3 +
+ ((- .r1 * z1*fz1/Fz1 + .r2 * z2*fz2/(1-Fz2)) / sd) / (n*w)
+ dmu.deta = dtheta.deta(mu, "identity")
+ dsd.deta = dtheta.deta(sd, .link.sd)
+ cbind(w * dl.dmu * dmu.deta, w * dl.dsd * dsd.deta)
+ }) , list( .link.sd=link.sd, .r1=r1, .r2=r2))),
+ weight=expression({
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ Q1 = ifelse(q1==0, 1, q1) # Saves division by 0 below; not elegant
+ Q2 = ifelse(q2==0, 1, q2) # Saves division by 0 below; not elegant
+ ed2l.dmu2 = 1 / (sd^2) +
+ ((fz1*(z1+fz1/Q1) - fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ed2l.dmusd = ((fz1-fz2 + z1*fz1*(z1+fz1/Q1) -
+ z2*fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ed2l.dsd2 = 2 / (sd^2) +
+ ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q1) -
+ z2^2 *fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ wz[,iam(1,1,M)] = w * ed2l.dmu2 * dmu.deta^2
+ wz[,iam(2,2,M)] = w * ed2l.dsd2 * dsd.deta^2
+ wz[,iam(1,2,M)] = w * ed2l.dmusd * dsd.deta * dmu.deta
+ wz
+ }))
+}
+
+
+
+
+
+dbisa = function(x, shape, scale=1, log = FALSE) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ xifun = function(x) {temp <- sqrt(x); temp - 1/temp}
+ ans = if(log)
+ dnorm(xifun(x/scale) / shape, log=TRUE) + log(1 + scale/x) - log(2) -
+ 0.5 * log(x) - 0.5 * log(scale) - log(shape) else
+ dnorm(xifun(x/scale) / shape) * (1 + scale/x) / (2 * sqrt(x) *
+ sqrt(scale) * shape)
+ ans[scale < 0 | shape < 0] = NA
+ ans[x <= 0] = if(log) log(0) else 0
+ ans
+}
+
+pbisa = function(q, shape, scale=1) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ ans = pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
+ ans[scale < 0 | shape < 0] = NA
+ ans[q <= 0] = 0
+ ans
+}
+
+qbisa = function(p, shape, scale=1) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("argument \"p\" must have values inside the interval (0,1)")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ A = qnorm(p)
+ temp1 = A * shape * sqrt(4 + A^2 * shape^2)
+ ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
+ ans2 = (2 + A^2 * shape^2 - temp1) * scale / 2
+ ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+}
+
+rbisa = function(n, shape, scale=1) {
+ if(!is.Numeric(n,integ=TRUE, allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(shape, pos=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, pos=TRUE)) stop("bad input for argument \"scale\"")
+ A = rnorm(n)
+ temp1 = A * shape
+ temp1 = temp1 * sqrt(4 + temp1^2)
+ ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
+ ans2 = (2 + A^2 * shape^2 - temp1) * scale / 2
+
+
+ ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
+}
+
+
+
+bisa.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+bisa = function(lshape="loge",
+ lscale="loge",
+ ishape=NULL, iscale=1,
+ method.init=1, fsmax=9001, zero=NULL)
+{
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(length(ishape) && !is.Numeric(ishape, posit=TRUE))
+ stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(iscale, posit=TRUE))
+ stop("bad input for argument \"iscale\"")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2)
+ stop("method.init must be 1 or 2")
+ if(!is.Numeric(fsmax, allow=1, integ=TRUE))
+ stop("bad input for \"fsmax\"")
+
+ new("vglmff",
+ blurb=c("Birnbaum-Saunders distribution\n\n",
+ "Links: ",
+ namesof("shape", lshape, tag= TRUE), "; ",
+ namesof("scale", lscale, tag= TRUE)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }) , list( .zero=zero))),
+ initialize=eval(substitute(expression({
+ useFS <- intercept.only || n < .fsmax
+ save.weight <- control$save.weight <- !useFS
+ predictors.names = c(namesof("shape", .lshape, tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+ if(!length(etastart)) {
+ scale.init = rep( .iscale, len=n)
+ shape.init = if( .method.init==2) sqrt(2*( pmax(y, scale.init+0.1) /
+ scale.init - 1)) else {
+ ybar = rep(weighted.mean(y, w), len=n)
+ sqrt(2*( pmax(ybar, scale.init+0.1) / scale.init - 1))
+ }
+ etastart = cbind(theta2eta(shape.init, .lshape),
+ theta2eta(scale.init, .lscale))
+ }
+ }) , list( .lshape=lshape, .lscale=lscale, .ishape=ishape, .iscale=iscale,
+ .fsmax=fsmax, .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ sh = eta2theta(eta[,1], .lshape)
+ sc = eta2theta(eta[,2], .lscale)
+ sc * (1 + sh^2 / 2)
+ }, list( .lshape=lshape, .lscale=lscale ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape, scale= .lscale)
+ misc$expected = useFS
+ misc$BFGS = !useFS
+ }) , list( .lshape=lshape, .lscale=lscale ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sh = eta2theta(eta[,1], .lshape)
+ sc = eta2theta(eta[,2], .lscale)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(sh) - 0.5 * log(sc) + log(1 + sc/y) -
+ (y/sc - 2 + sc/y) / (2*sh^2)))
+ } , list( .lshape=lshape, .lscale=lscale ))),
+ vfamily=c("bisa"),
+ deriv=eval(substitute(expression({
+ useFS <- intercept.only || n < .fsmax # must be same as above
+ sh = eta2theta(eta[,1], .lshape)
+ sc = eta2theta(eta[,2], .lscale)
+ dl.dsh = ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
+ dl.dsc = -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
+ (sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
+ dsh.deta = dtheta.deta(sh, .lshape)
+ dsc.deta = dtheta.deta(sc, .lscale)
+ if(useFS) {
+ w * cbind(dl.dsh * dsh.deta, dl.dsc * dsc.deta)
+ } else {
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dsh * dsh.deta, dl.dsc * dsc.deta)
+ derivnew
+ }
+ }) , list( .lshape=lshape, .lscale=lscale, .fsmax=fsmax ))),
+ weight=eval(substitute(expression({
+ if(useFS) {
+ wz = matrix(as.numeric(NA), n, M) # Diagonal!!
+ wz[,iam(1,1,M)] = 2 * dsh.deta^2 / sh^2
+ invxi = function(y) {
+ 1 + 0.5 * y^2 + y * sqrt(4 + y^2) / 2
+ }
+ myfun = function(x, alpha)
+ 2 * (1 / (1 + invxi(alpha*x)) - 0.5)^2 * dnorm(x)
+ if(intercept.only) {
+ temp9 = integrate(f=myfun, lower=0, upper=Inf, alpha=sh[1])
+ if(temp9$message != "OK") stop("integration was unsuccessful")
+ wz[,iam(2,2,M)] = dsc.deta^2 * (0.25+ 1/sh^2 +temp9$value)/sc^2
+ } else {
+ for(iii in 1:n) {
+ temp9= integrate(f=myfun, lower=0, upper=Inf, alpha=sh[iii])
+ if(temp9$message!="OK") stop("integration was unsuccessful")
+ wz[iii,iam(2,2,M)] = dsc.deta[iii]^2 *
+ (0.25 + 1 / sh[iii]^2 + temp9$value) / sc[iii]^2
+ }
+ }
+ w * wz
+ } else {
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }
+ }), list( .fsmax=fsmax ))))
+}
+
+
diff --git a/R/family.ts.q b/R/family.ts.q
new file mode 100644
index 0000000..2720eb2
--- /dev/null
+++ b/R/family.ts.q
@@ -0,0 +1,440 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+ rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
+ index <- cumsum(c(aa, MM*Ranks.))
+ ans<-matrix(coeffs[(index[i]+1):index[i+1]], Ranks.[i], MM, byrow=TRUE)
+ t(ans)
+ }
+ rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
+ ptr <- 0
+ Ak1 <- diag(MM)
+ for(j in 1:MM) {
+ for(i in 1:MM) {
+ if(i>j && (MM+1)-(Ranks.[j]-1) <= i) {
+ ptr <- ptr + 1
+ Ak1[i,j] <- coeffs[ptr]
+ }
+ }
+ }
+ if(aa>0 && ptr != aa) stop("something wrong")
+ Ak1
+ }
+ rrar.Di <- function(i, Ranks.) {
+ if(Ranks.[1]==Ranks.[i]) diag(Ranks.[i]) else
+ rbind(diag(Ranks.[i]), matrix(0, Ranks.[1]-Ranks.[i], Ranks.[i]))
+ }
+ rrar.Mi <- function(i, MM, Ranks., ki) {
+ if(Ranks.[ki[i]]==MM)
+ return(NULL)
+ hi <- Ranks.[ki[i]] - Ranks.[ki[i+1]]
+ Ji <- matrix(0, hi, Ranks.[1])
+ for(j in 1:hi) {
+ Ji[j,j+Ranks.[ki[i+1]]] <- 1
+ }
+ Mi <- matrix(0, MM-Ranks.[ki[i]], MM) # dim(Oi) == dim(Ji)
+ for(j in 1:(MM-Ranks.[ki[i]])) {
+ Mi[j,j+Ranks.[ki[i ]]] <- 1
+ }
+ kronecker(Mi, Ji)
+ }
+ rrar.Mmat <- function(MM, uu, Ranks., ki) {
+ Mmat <- NULL
+ for(i in uu:1) {
+ Mmat <- rbind(Mmat, rrar.Mi(i, MM, Ranks., ki))
+ }
+ Mmat
+ }
+ block.diag <- function(A, B) {
+ if(is.null(A) && is.null(B))
+ return(NULL)
+ if(!is.null(A) && is.null(B))
+ return(A)
+ if(is.null(A) && !is.null(B))
+ return(B)
+
+ A <- as.matrix(A)
+ B <- as.matrix(B)
+ temp <- cbind(A, matrix(0, nrow(A), ncol(B)))
+ rbind(temp, cbind(matrix(0, nrow(B), ncol(A)), B))
+ }
+ rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
+ Htop <- Hbot <- NULL
+ Mmat <- rrar.Mmat(MM, uu, Ranks., ki) # NULL if full rank
+ Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
+
+ if(!is.null(Mmat))
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
+ temp <- Di %*% t(Ci)
+ Htop <- cbind(Htop, Mmat %*% kronecker(diag(MM), temp))
+ }
+
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ temp <- kronecker(t(Di) %*% t(Ak1), diag(MM))
+ Hbot <- block.diag(Hbot, temp)
+ }
+ rbind(Htop, Hbot)
+ }
+ rrar.Ut <- function(y, tt, plag, MM) {
+ Ut <- NULL
+ if(plag>1)
+ for(i in 1:plag) {
+ Ut <- rbind(Ut, kronecker(diag(MM), cbind(y[tt-i,])))
+ }
+ Ut
+ }
+ rrar.UU <- function(y, plag, MM, n) {
+ UU <- NULL
+ for(i in (plag+1):n) {
+ UU <- rbind(UU, t(rrar.Ut(y, i, plag, MM)))
+ }
+ UU
+ }
+ rrar.Wmat <- function(y, Ranks., MM, ki, plag, aa, uu, n, coeffs) {
+ temp1 <- rrar.UU(y, plag, MM, n)
+ temp2 <- t(rrar.Ht(plag, MM, Ranks., coeffs, aa, uu, ki))
+ list(UU=temp1, Ht=temp2)
+ }
+
+
+
+rrar.control <- function(stepsize=0.5, save.weight=TRUE, ...)
+{
+
+ if(stepsize <= 0 || stepsize > 1) {
+ warning("bad value of stepsize; using 0.5 instead")
+ stepsize <- 0.5
+ }
+ list(stepsize=stepsize, save.weight = as.logical(save.weight)[1])
+}
+
+
+rrar <- function(Ranks=1, coefstart=NULL)
+{
+ lag.p <- length(Ranks)
+ new("vglmff",
+ blurb=c("Nested reduced-rank vector autoregressive model AR(", lag.p,
+ ")\n\n",
+ "Link: ",
+ namesof("mu_t", "identity"),
+ ", t = ", paste(paste(1:lag.p, coll=",", sep="")) ,
+ ""),
+ initialize=eval(substitute(expression({
+ Ranks. <- .Ranks
+ plag <- length(Ranks.)
+ nn <- nrow(x) # original n
+ pp <- ncol(x)
+ indices <- 1:plag
+
+ copyxbig <- TRUE # xbig.save matrix changes at each iteration
+
+ dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
+ if(any(dsrank != Ranks.))
+ stop("Ranks must be a non-increasing sequence")
+ if(!is.matrix(y) || ncol(y) ==1) {
+ stop("response must be a matrix with more than one column")
+ } else {
+ MM <- ncol(y)
+ ki <- udsrank <- unique(dsrank)
+ uu <- length(udsrank)
+ for(i in 1:uu)
+ ki[i] <- max((1:plag)[dsrank==udsrank[i]])
+ ki <- c(ki, plag+1) # For computing a
+ Ranks. <- c(Ranks., 0) # For computing a
+ aa <- sum( (MM-Ranks.[ki[1:uu]]) * (Ranks.[ki[1:uu]]-Ranks.[ki[-1]]) )
+ }
+ if(!intercept.only)
+ warning("ignoring explanatory variables")
+
+ if(any(MM < Ranks.))
+ stop(paste("max(Ranks) can only be", MM, "or less"))
+ y.save <- y # Save the original
+ if(any(w != 1))
+ stop("all weights should be 1")
+
+ new.coeffs <- .coefstart # Needed for iter=1 of $weight
+ new.coeffs <- if(length(new.coeffs))
+ rep(new.coeffs, len=aa+sum(Ranks.)*MM) else
+ runif(aa+sum(Ranks.)*MM)
+ temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
+ xbig.save <- temp8$UU %*% temp8$Ht
+
+ if(!length(etastart)) {
+ etastart <- xbig.save %*% new.coeffs
+ etastart <- matrix(etastart, ncol=ncol(y), byrow=TRUE) # So M=ncol(y)
+ }
+
+ extra$Ranks. <- Ranks.; extra$aa <- aa
+ extra$plag <- plag; extra$nn <- nn
+ extra$MM <- MM; extra$coeffs <- new.coeffs;
+ extra$y.save <- y.save
+
+ keep.assign <- attr(x, "assign")
+ x <- x[-indices,,drop=FALSE]
+ if(is.R())
+ attr(x, "assign") <- keep.assign
+ y <- y[-indices,,drop=FALSE]
+ w <- w[-indices]
+ n.save <- n <- nn - plag
+ }), list( .Ranks=Ranks, .coefstart=coefstart ))),
+ inverse=function(eta, extra=NULL) {
+ aa <- extra$aa
+ coeffs <- extra$coeffs
+ MM <- extra$MM
+ nn <- extra$nn
+ plag <- extra$plag
+ Ranks. <- extra$Ranks.
+ y.save <- extra$y.save
+
+ tt <- (1+plag):nn
+ mu <- matrix(0, nn-plag, MM)
+ Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
+ mu <- mu + y.save[tt-i,,drop=FALSE] %*% t(Ak1 %*% Di %*% t(Ci))
+ }
+ mu
+ },
+ last=expression({
+ misc$plag <- plag
+ misc$Ranks <- Ranks.
+ misc$Ak1 <- Ak1
+ misc$omegahat <- omegahat
+ misc$Cmatrices <- Cmatrices
+ misc$Dmatrices <- Dmatrices
+ misc$Hmatrix <- temp8$Ht
+ misc$Phimatrices <- vector("list", plag)
+ for(i in 1:plag) {
+ misc$Phimatrices[[i]] = Ak1 %*% Dmatrices[[i]] %*% t(Cmatrices[[i]])
+ }
+ misc$Z <- y.save %*% t(solve(Ak1))
+ }),
+ vfamily="rrar",
+ deriv=expression({
+ temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
+ xbig.save <- temp8$UU %*% temp8$Ht
+
+ extra$coeffs <- new.coeffs
+
+ resmat <- y
+ tt <- (1+plag):nn
+ Ak1 <- rrar.Ak1(MM, new.coeffs, Ranks., aa)
+ Cmatrices <- Dmatrices <- vector("list", plag)
+ for(i in 1:plag) {
+ Dmatrices[[i]] <- Di <- rrar.Di(i, Ranks.)
+ Cmatrices[[i]] <- Ci <- rrar.Ci(i, new.coeffs, aa, Ranks., MM)
+ resmat <- resmat - y.save[tt-i,,drop=FALSE] %*% t(Ak1 %*% Di %*% t(Ci))
+ NULL
+ }
+ omegahat <- (t(resmat) %*% resmat) / n # MM x MM
+ omegainv <- solve(omegahat)
+
+ omegainv <- solve(omegahat)
+ ind1 <- iam(NA,NA,MM,both=TRUE)
+
+ wz = matrix(omegainv[cbind(ind1$row,ind1$col)],
+ nn-plag, length(ind1$row), byrow=TRUE)
+ mux22(t(wz), y-mu, M=extra$MM, as.mat=TRUE)
+ }),
+ weight=expression({
+ wz
+ }))
+}
+
+
+
+
+vglm.garma.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight = as.logical(save.weight)[1])
+}
+
+
+garma <- function(link=c("identity","loge","reciprocal",
+ "logit","probit","cloglog","cauchit"),
+ p.ar.lag=1, q.lag.ma=0,
+ coefstart=NULL,
+ step=1.0,
+ constant=0.1)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ link = match.arg(link, c("identity","loge","reciprocal",
+ "logit","probit","cloglog","cauchit"))[1]
+ if(!is.Numeric(p.ar.lag, integer=TRUE))
+ stop("bad input for argument \"p.ar.lag\"")
+ if(!is.Numeric(q.lag.ma, integer=TRUE))
+ stop("bad input for argument \"q.lag.ma\"")
+ if(q.lag.ma != 0)
+ stop("sorry, only q.lag.ma=0 is currently implemented")
+
+ new("vglmff",
+ blurb=c("GARMA(", p.ar.lag, ",", q.lag.ma, ")\n\n",
+ "Link: ",
+ namesof("mu_t", link),
+ ", t = ", paste(paste(1:p.ar.lag, coll=",", sep=""))),
+ initialize=eval(substitute(expression({
+ plag <- .p.ar.lag
+ predictors.names = namesof("mu", .link, tag=FALSE)
+ indices <- 1:plag
+ tt <- (1+plag):nrow(x)
+ pp <- ncol(x)
+
+ copyxbig <- TRUE # x matrix changes at each iteration
+
+ if( .link == "logit" || .link == "probit" || .link == "cloglog" ||
+ .link == "cauchit") {
+ delete.zero.colns <- TRUE
+ eval(process.categorical.data.vgam)
+ mustart <- mustart[tt,2]
+ y <- y[,2]
+ }
+
+ x.save <- x # Save the original
+ y.save <- y # Save the original
+ w.save <- w # Save the original
+
+ new.coeffs <- .coefstart # Needed for iter=1 of @weight
+ new.coeffs <- if(length(new.coeffs)) rep(new.coeffs, len=pp+plag) else
+ c(runif(pp), rep(0, plag))
+ if(!length(etastart)) {
+ etastart <- x[-indices,,drop=FALSE] %*% new.coeffs[1:pp]
+ }
+ x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
+ dx <- dimnames(x.save)
+ morenames <- paste("(lag", 1:plag, ")", sep="")
+ dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
+
+ x <- x[-indices,,drop=FALSE]
+ class(x) = if(is.R()) "matrix" else "model.matrix" # Added 27/2/02; 26/2/04
+ y <- y[-indices]
+ w <- w[-indices]
+ n.save <- n <- n - plag
+ more <- vector("list", plag)
+ names(more) <- morenames
+ for(i in 1:plag)
+ more[[i]] <- i + max(unlist(attr(x.save, "assign")))
+ attr(x, "assign") <- c(attr(x.save, "assign"), more)
+ }), list( .link=link, .p.ar.lag=p.ar.lag, .coefstart=coefstart ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, link= .link)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link <- c(mu = .link)
+ misc$plag <- plag
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ if(residuals) switch( .link,
+ identity=y-mu,
+ loge=w*(y/mu - 1),
+ inverse=w*(y/mu - 1),
+ w*(y/mu - (1-y)/(1-mu))) else
+ switch( .link,
+ identity=sum(w*(y-mu)^2),
+ loge=sum(w*(-mu + y*log(mu))),
+ inverse=sum(w*(-mu + y*log(mu))),
+ sum(w*(y*log(mu) + (1-y)*log(1-mu))))
+ }, list( .link=link ))),
+ middle2=eval(substitute(expression({
+ realfv <- fv
+ for(i in 1:plag) {
+ realfv <- realfv + old.coeffs[i+pp] *
+ (x.save[tt-i,1:pp,drop=FALSE] %*% new.coeffs[1:pp]) # +
+ }
+
+ true.eta <- realfv + offset
+ mu <- family at inverse(true.eta, extra) # overwrite mu with correct one
+ }), list( .link=link ))),
+ vfamily=c("garma", "vglmgam"),
+ deriv=eval(substitute(expression({
+ dl.dmu <- switch( .link,
+ identity=y-mu,
+ loge=(y-mu)/mu,
+ inverse=(y-mu)/mu,
+ (y-mu) / (mu*(1-mu)))
+ dmu.deta <- dtheta.deta(mu, .link)
+ step <- .step # This is another method of adjusting step lengths
+ step * w * dl.dmu * dmu.deta
+ }), list( .link=link, .step=step ))),
+ weight=eval(substitute(expression({
+ x[,1:pp] <- x.save[tt,1:pp] # Reinstate
+
+ for(i in 1:plag) {
+ temp = theta2eta(y.save[tt-i], .link,
+ earg=if( any( .link == c("loge","logit")) )
+ .constant else NULL)
+ x[,1:pp] <- x[,1:pp] - x.save[tt-i,1:pp] * new.coeffs[i+pp]
+ x[,pp+i] <- temp - x.save[tt-i,1:pp,drop=FALSE] %*% new.coeffs[1:pp]
+ }
+ class(x)=if(is.R()) "matrix" else "model.matrix" # Added 27/2/02; 26/2/04
+
+ if(iter==1)
+ old.coeffs <- new.coeffs
+
+ xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+
+ vary = switch( .link,
+ identity=1,
+ loge=mu,
+ inverse=mu^2,
+ mu*(1-mu))
+ w * dtheta.deta(mu, link= .link)^2 / vary
+ }), list( .link=link, .constant=constant ))))
+}
+
+
+
+
+
+
+if(FALSE) {
+setClass("Coef.rrar", representation(
+ "plag" = "integer",
+ "Ranks" = "integer",
+ "omega" = "integer",
+ "C" = "matrix",
+ "D" = "matrix",
+ "H" = "matrix",
+ "Z" = "matrix",
+ "Phi" = "list", # list of matrices
+ "Ak1" = "matrix"))
+
+
+
+Coef.rrar = function(object, ...) {
+ result = new("Coef.rrar",
+ "plag" = object at misc$plag,
+ "Ranks" = object at misc$Ranks,
+ "omega" = object at misc$omega,
+ "C" = object at misc$C,
+ "D" = object at misc$D,
+ "H" = object at misc$H,
+ "Z" = object at misc$Z,
+ "Phi" = object at misc$Phi,
+ "Ak1" = object at misc$Ak1)
+}
+
+print.Coef.rrar = function(x, ...) {
+ cat(x at plag)
+}
+
+
+setMethod("Coef", "rrar",
+ function(object, ...)
+ Coef(object, ...))
+
+setMethod("print", "Coef.rrar",
+ function(x, ...)
+ invisible(print.Coef.rrar(x, ...)))
+}
+
+
diff --git a/R/family.univariate.q b/R/family.univariate.q
new file mode 100644
index 0000000..39b2884
--- /dev/null
+++ b/R/family.univariate.q
@@ -0,0 +1,7967 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+mccullagh89 = function(ltheta="rhobit", lnu="logoff",
+ itheta=NULL, inu=NULL,
+ etheta=list(),
+ enu=if(lnu == "logoff") list(offset=0.5) else list(),
+ zero=NULL)
+{
+ if(mode(ltheta) != "character" && mode(ltheta) != "name")
+ ltheta = as.character(substitute(ltheta))
+ if(mode(lnu) != "character" && mode(lnu) != "name")
+ lnu = as.character(substitute(lnu))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.list(etheta)) etheta = list()
+ if(!is.list(enu)) enu = list()
+
+ new("vglmff",
+ blurb=c("McCullagh (1989)'s distribution \n",
+ "f(y) = (1-2*theta*y+theta^2)^(-nu) * [1 - y^2]^(nu-1/2) /\n",
+ " Beta[nu+1/2, 1/2], ",
+ " -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
+ "Links: ",
+ namesof("theta", ltheta, earg=etheta), ", ",
+ namesof("nu", lnu, earg=enu),
+ "\n",
+ "\n",
+ "Mean: nu*theta/(1+nu)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y <= -1 | y >= 1))
+ stop("all y values must be in (-1,1)")
+ predictors.names= c(namesof("theta", .ltheta, earg= .etheta,tag=FALSE),
+ namesof("nu", .lnu, earg= .enu, tag= FALSE))
+ if(!length(etastart)) {
+ theta.init = if(length(.itheta)) rep(.itheta, length=n) else {
+ theta.grid = rvar = seq(-0.9, 0.9, by=0.1)
+ for(ii in 1:length(theta.grid))
+ rvar[ii] = mean((y-theta.grid[ii])*(theta.grid[ii]^2-1)/
+ (1-2*theta.grid[ii]*y+theta.grid[ii]^2))
+ try.this = theta.grid[abs(rvar) == min(abs(rvar))]
+ try.this = rep(try.this, len=n)
+ try.this
+ }
+ tmp = y/(theta.init-y)
+ tmp[tmp < -0.4] = -0.4
+ tmp[tmp > 10.] = 10.
+ nu.init = rep(if(length(.inu)) .inu else tmp, length=n)
+ nu.init[!is.finite(nu.init)] = 0.4
+ etastart = cbind(theta2eta(theta.init, .ltheta, earg=.etheta ),
+ theta2eta(nu.init, .lnu, earg= .enu ))
+ }
+ }),list( .ltheta=ltheta, .lnu=lnu, .inu=inu, .itheta=itheta,
+ .etheta = etheta, .enu=enu ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg= .enu )
+ nu*Theta/(1+nu)
+ }, list( .ltheta=ltheta, .lnu=lnu,
+ .etheta = etheta, .enu=enu ))),
+ last=eval(substitute(expression({
+ misc$link = c("theta"= .ltheta, "nu"= .lnu)
+ misc$earg = list(theta = .etheta, nu= .enu )
+ }), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg= .enu )
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((nu-0.5)*log(1-y^2) - nu * log(1 - 2*Theta*y + Theta^2) -
+ lbeta(nu+0.5,0.5 )))
+ }, list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ vfamily=c("mccullagh89"),
+ deriv=eval(substitute(expression({
+ Theta = eta2theta(eta[,1], .ltheta, earg= .etheta )
+ nu = eta2theta(eta[,2], .lnu, earg= .enu )
+ dTheta.deta = dtheta.deta(Theta, .ltheta, earg= .etheta )
+ dnu.deta = dtheta.deta(nu, .lnu, earg= .enu )
+ dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
+ dl.dnu = log(1-y^2) - log(1 -2*Theta*y + Theta^2) -
+ digamma(nu+0.5) + digamma(nu+1)
+ w * cbind(dl.dTheta * dTheta.deta, dl.dnu * dnu.deta)
+ }), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ weight=eval(substitute(expression({
+ d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
+ d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
+ wz = matrix(as.numeric(NA), n, M) #diagonal matrix
+ wz[,iam(1,1,M)] = d2l.dTheta2 * dTheta.deta^2
+ wz[,iam(2,2,M)] = d2l.dnu2 * dnu.deta^2
+ w * wz
+ }), list( .ltheta=ltheta, .lnu=lnu ))))
+}
+
+
+
+
+
+hzeta = function(link="loglog", init.alpha=NULL)
+{
+ if(length(init.alpha) && !is.Numeric(init.alpha, positive=TRUE))
+ stop("'init.alpha' must be > 0")
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c(
+ "Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),\n",
+ " alpha>0, y=1,2,..\n\n",
+ "Link: ",
+ namesof("alpha", link), "\n\n",
+ "Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
+ "\n",
+ "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y < 1))
+ stop("all y values must be in 1,2,3,...")
+
+ predictors.names = namesof("alpha", .link, tag= FALSE)
+
+ if(!length(etastart)) {
+ ainit = if(length( .init.alpha)) .init.alpha else {
+ if((meany <- mean(y)) < 1.5) 3.0 else
+ if(meany < 2.5) 1.4 else 1.1
+ }
+ ainit = rep(ainit, length=n)
+ etastart = theta2eta(ainit, .link)
+ }
+ }), list( .link=link, .init.alpha=init.alpha ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ alpha = eta2theta(eta, .link)
+ mu = (1-2^(-alpha)) * zeta(alpha)
+ mu[alpha <= 1] = Inf
+ mu
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$d3 = d3 # because save.weights=F
+ misc$link = c(alpha= .link)
+ misc$pooled.weight = pooled.weight
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ alpha = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * log((2*y-1)^(-alpha) - (2*y+1)^(-alpha )))
+ }, list( .link=link ))),
+ vfamily=c("hzeta"),
+ deriv=eval(substitute(expression({
+ if(iter==1) {
+ d3 = deriv3(~ w * log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
+ "alpha", hessian= TRUE)
+ }
+
+ alpha = eta2theta(eta, .link)
+ eval.d3 = eval(d3)
+ dl.dalpha = attr(eval.d3, "gradient")
+ dalpha.deta = dtheta.deta(alpha, .link)
+ dl.dalpha * dalpha.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ d2l.dalpha2 = as.vector(attr(eval.d3, "hessian"))
+ wz = -dalpha.deta^2 * d2l.dalpha2 -
+ dl.dalpha * d2theta.deta2(alpha, .link)
+
+ if(FALSE && intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+ c(wz)
+ }), list( .link=link ))))
+}
+
+
+
+dhzeta = function(x, alpha)
+{
+ if(!is.Numeric(alpha, posit=TRUE))
+ stop("'alpha' must be numeric and have positive values")
+ nn = max(length(x), length(alpha))
+ x = rep(x, len=nn)
+ alpha = rep(alpha, len=nn)
+ ox = !is.finite(x)
+ zero = ox | round(x) != x | x < 1
+ ans = 0 * x
+ ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) - (2*x[!zero]+1)^(-alpha[!zero])
+ ans
+}
+
+
+phzeta = function(q, alpha)
+{
+ if(!is.Numeric(alpha, posit=TRUE))
+ stop("'alpha' must be numeric and have positive values")
+ nn = max(length(q), length(alpha))
+ q = rep(q, len=nn)
+ alpha = rep(alpha, len=nn)
+ oq = !is.finite(q)
+ zero = oq | q < 1
+ q = floor(q)
+ ans = 0 * q
+ ans[!zero] = 1 - (2*q[!zero]+1)^(-alpha[!zero])
+ ans
+}
+
+
+qhzeta = function(p, alpha)
+{
+ if(!is.Numeric(alpha, posit=TRUE))
+ stop("'alpha' must be numeric and have positive values")
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("argument \"p\" must have values inside the interval (0,1)")
+ nn = max(length(p), length(alpha))
+ p = rep(p, len=nn)
+ alpha = rep(alpha, len=nn)
+ ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
+ floor(ans+1)
+}
+
+rhzeta = function(n, alpha)
+{
+ if(!is.Numeric(alpha, posit=TRUE))
+ stop("'alpha' must be numeric and have positive values")
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("argument \"n\" must be a positive integer")
+ ans = ((runif(n)^(-1/alpha) - 1) / 2)
+ floor(ans+1)
+}
+
+
+dirmultinomial = function(lphi="logit",
+ iphi = 0.10, parallel= FALSE, zero="M")
+{
+
+ if(mode(lphi) != "character" && mode(lphi) != "name")
+ lphi = as.character(substitute(lphi))
+ if(length(zero) &&
+ !(is.Numeric(zero, integer=TRUE, posit=TRUE) || is.character(zero )))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(iphi, positive=TRUE) || max(iphi) >= 1.0)
+ stop("bad input for argument \"iphi\"")
+
+ new("vglmff",
+ blurb=c("Dirichlet-multinomial distribution\n\n",
+ "Links: ",
+ "log(prob[1]/prob[M]), ..., log(prob[M-1]/prob[M]), ",
+ namesof("phi", lphi), "\n", "\n",
+ "Mean: shape_j / sum_j(shape_j)"),
+ constraints=eval(substitute(expression({
+ .ZERO = .zero
+ if(is.character(.ZERO)) .ZERO = eval(parse(text = .ZERO))
+ .PARALLEL = .parallel
+ if(is.logical(.PARALLEL) && .PARALLEL) {
+ mycmatrix = if(length(.ZERO))
+ stop("can only handle parallel=TRUE when zero=NULL") else
+ cbind(rbind(matrix(1,M-1,1), 0), rbind(matrix(0,M-1,1), 1))
+ } else
+ mycmatrix = if(M==1) diag(1) else diag(M)
+ constraints=cm.vgam(mycmatrix, x, .PARALLEL, constraints, int=TRUE)
+ constraints = cm.zero.vgam(constraints, x, .ZERO, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ initialize=eval(substitute(expression({
+ delete.zero.colns <- TRUE
+ eval(process.categorical.data.vgam)
+
+ y = as.matrix(y)
+ ycount = as.matrix(y * w)
+ M = ncol(y)
+ if(max(abs(ycount - round(ycount ))) > 1.0e-6)
+ warning("there appears to be non-integer responses")
+ if(min(ycount) < 0)
+ stop("all values of the response (matrix) must be non-negative")
+ predictors.names =
+ c(paste("log(prob[,",1:(M-1),"]/prob[,",M,"])", sep=""),
+ namesof("phi", .lphi, short= TRUE))
+ extra$n2 = w # aka omega, must be integer # as.vector(apply(y, 1, sum))
+ if(!length(etastart)) {
+ prob.init = apply(ycount, 2, sum)
+ prob.init = prob.init / sum(prob.init)
+ prob.init = matrix(prob.init, n, M, byrow=TRUE)
+ phi.init = rep( .iphi, len=n)
+ etastart = cbind(log(prob.init[,-M]/prob.init[,M]),
+ theta2eta(phi.init, .lphi))
+ }
+ }), list( .lphi=lphi, .iphi=iphi ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ temp = cbind(exp(eta[,-M]), 1)
+ temp / as.vector(temp %*% rep(1, M))
+ }, list( .lphi=lphi ))),
+ last=eval(substitute(expression({
+ misc$link = c(rep("noLinkFunction", length=M-1), .lphi)
+ names(misc$link) = c(paste("prob", 1:(M-1), sep=""), "phi")
+ misc$expected = TRUE
+ if(intercept.only) {
+ misc$shape=probs[1,]*(1/phi[1]-1) # phi & probs computed in @deriv
+ }
+ }), list( .lphi=lphi ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ probs = cbind(exp(eta[,-M]), 1)
+ probs = probs / as.vector(probs %*% rep(1, M))
+ phi = eta2theta(eta[,M], .lphi)
+ n = length(phi)
+ ycount = as.matrix(y * w)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ ans = rep(0.0, len=n)
+ omega = extra$n2
+ for(jay in 1:M) {
+ maxyj = max(ycount[,jay])
+ loopOveri = n < maxyj
+ if(loopOveri) {
+ for(iii in 1:n) {
+ rrr = 1:ycount[iii,jay] # a vector
+ if(ycount[iii,jay] > 0)
+ ans[iii] = ans[iii] + sum(log((1-phi[iii]) *
+ probs[iii,jay] + (rrr-1)*phi[iii]))
+
+ }
+ } else {
+ for(rrr in 1:maxyj) {
+ index = (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
+ if(any(index))
+ ans[index] = ans[index] + log((1-phi[index]) *
+ probs[index,jay] + (rrr-1)*phi[index])
+ }
+ }
+ } # end of jay loop
+
+ maxomega = max(omega)
+ loopOveri = n < maxomega
+ if(loopOveri) {
+ for(iii in 1:n) {
+ rrr = 1:omega[iii]
+ ans[iii]= ans[iii] - sum(log(1-phi[iii] + (rrr-1)*phi[iii]))
+ }
+ } else {
+ for(rrr in 1:maxomega) {
+ ind8 = rrr <= omega
+ ans[ind8] = ans[ind8] - log(1-phi[ind8] + (rrr-1)*phi[ind8])
+ }
+ }
+ sum(ans)
+ }
+ }, list( .lphi=lphi ))),
+ vfamily=c("dirmultinomial "),
+ deriv=eval(substitute(expression({
+ probs = cbind(exp(eta[,-M]), 1)
+ probs = probs / as.vector(probs %*% rep(1, M))
+ phi = eta2theta(eta[,M], .lphi)
+ dl.dprobs = matrix(0.0, n, M-1)
+ dl.dphi = rep(0.0, len=n)
+ omega = extra$n2
+ ycount = as.matrix(y * w)
+ for(jay in 1:M) {
+ maxyj = max(ycount[,jay])
+ loopOveri = n < maxyj
+ if(loopOveri) {
+ for(iii in 1:n) {
+ rrr = 1:ycount[iii,jay]
+ if(ycount[iii,jay] > 0) {
+ PHI = phi[iii]
+ dl.dphi[iii]=dl.dphi[iii] + sum((rrr-1-probs[iii,jay]) /
+ ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI))
+
+ tmp9 = (1-PHI) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)
+ if(jay < M) {
+ dl.dprobs[iii,jay] = dl.dprobs[iii,jay] + sum(tmp9)
+ } else {
+ for(jay2 in 1:(M-1))
+ dl.dprobs[iii,jay2]=dl.dprobs[iii,jay2]-sum(tmp9)
+ }
+ }
+ }
+ } else {
+ for(rrr in 1:maxyj) {
+ index = (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
+ PHI = phi[index]
+ dl.dphi[index] = dl.dphi[index] + (rrr-1-probs[index,jay]) /
+ ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
+ tmp9 = (1-PHI) / ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
+ if(jay < M) {
+ dl.dprobs[index,jay] = dl.dprobs[index,jay] + tmp9
+ } else {
+ for(jay2 in 1:(M-1))
+ dl.dprobs[index,jay2] = dl.dprobs[index,jay2] - tmp9
+ }
+ }
+ }
+ } # end of jay loop
+ maxomega = max(omega)
+ loopOveri = n < maxomega
+ if(loopOveri) {
+ for(iii in 1:n) {
+ rrr = 1:omega[iii]
+ dl.dphi[iii]=dl.dphi[iii] - sum((rrr-2)/(1 + (rrr-2)*phi[iii]))
+ }
+ } else {
+ for(rrr in 1:maxomega) {
+ index = rrr <= omega
+ dl.dphi[index]=dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
+ }
+ }
+ dprobs.deta = probs[,-M] * (1 - probs[,-M]) # n x (M-1)
+ dphi.deta = dtheta.deta(phi, .lphi)
+ ans = cbind(dl.dprobs * dprobs.deta, dl.dphi * dphi.deta)
+ ans
+ }), list( .lphi=lphi ))),
+ weight=eval(substitute(expression({
+ wz = matrix(0, n, dimm(M))
+ loopOveri = n < maxomega
+ if(loopOveri) {
+ for(iii in 1:n) {
+ rrr = 1:omega[iii] # A vector
+ PHI = phi[iii]
+ pYiM.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
+ shape1=probs[iii,M]*(1/PHI-1),
+ shape2=(1-probs[iii,M])*(1/PHI-1)) # A vector
+ denomM = ((1-PHI)*probs[iii,M] + (rrr-1)*PHI)^2 # A vector
+ wz[iii,iam(M,M,M)] = wz[iii,iam(M,M,M)] +
+ sum(probs[iii,M]^2 * pYiM.ge.rrr / denomM) -
+ sum(1 / (1 + (rrr-2)*PHI)^2)
+ for(jay in 1:(M-1)) {
+ denomj = ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[iii],
+ shape1=probs[iii,jay]*(1/PHI-1),
+ shape2=(1-probs[iii,jay])*(1/PHI-1))
+ wz[iii,iam(jay,jay,M)] = wz[iii,iam(jay,jay,M)] +
+ sum(pYij.ge.rrr / denomj) +
+ sum(pYiM.ge.rrr / denomM)
+ for(kay in jay:(M-1)) if(kay > jay) {
+ wz[iii,iam(jay,kay,M)] = wz[iii,iam(jay,kay,M)] +
+ sum(pYiM.ge.rrr / denomM)
+ }
+ wz[iii,iam(jay,M,M)] = wz[iii,iam(jay,M,M)] +
+ sum(probs[iii,jay] * pYij.ge.rrr / denomj) -
+ sum(probs[iii,M] * pYiM.ge.rrr / denomM)
+ wz[iii,iam(M,M,M)] = wz[iii,iam(M,M,M)] +
+ sum(probs[iii,jay]^2 * pYij.ge.rrr / denomj)
+ } # end of jay loop
+ } # end of iii loop
+ } else {
+ for(rrr in 1:maxomega) {
+ ind5 = rrr <= omega
+ PHI = phi[ind5]
+ pYiM.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
+ shape1=probs[ind5,M]*(1/PHI-1),
+ shape2=(1-probs[ind5,M])*(1/PHI-1))
+ denomM = ((1-PHI)*probs[ind5,M] + (rrr-1)*PHI)^2
+ wz[ind5,iam(M,M,M)] = wz[ind5,iam(M,M,M)] +
+ probs[ind5,M]^2 * pYiM.ge.rrr / denomM -
+ 1 / (1 + (rrr-2)*PHI)^2
+ for(jay in 1:(M-1)) {
+ denomj = ((1-PHI)*probs[ind5,jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr = 1 - pbetabin.ab(q=rrr-1, size=omega[ind5],
+ shape1=probs[ind5,jay]*(1/PHI-1),
+ shape2=(1-probs[ind5,jay])*(1/PHI-1))
+ wz[ind5,iam(jay,jay,M)] = wz[ind5,iam(jay,jay,M)] +
+ pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM
+ for(kay in jay:(M-1)) if(kay > jay) {
+ wz[ind5,iam(jay,kay,M)] = wz[ind5,iam(jay,kay,M)] +
+ pYiM.ge.rrr / denomM
+ }
+ wz[ind5,iam(jay,M,M)] = wz[ind5,iam(jay,M,M)] +
+ probs[ind5,jay] * pYij.ge.rrr / denomj -
+ probs[ind5,M] * pYiM.ge.rrr / denomM
+ wz[ind5,iam(M,M,M)] = wz[ind5,iam(M,M,M)] +
+ probs[ind5,jay]^2 * pYij.ge.rrr / denomj
+ } # end of jay loop
+ } # end of rrr loop
+ }
+
+ for(jay in 1:(M-1))
+ for(kay in jay:(M-1))
+ wz[,iam(jay,kay,M)] = wz[,iam(jay,kay,M)] * (1-phi)^2
+ for(jay in 1:(M-1))
+ wz[,iam(jay,M,M)] = wz[,iam(jay,M,M)] * (phi-1) / phi
+ wz[,iam(M,M,M)] = wz[,iam(M,M,M)] / phi^2
+
+ d1Thetas.deta = cbind(dprobs.deta, dphi.deta)
+ index = iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz = wz * d1Thetas.deta[,index$row] * d1Thetas.deta[,index$col]
+ wz
+ }), list( .lphi=lphi ))))
+}
+
+
+dirmul.old = function(link="loge", init.alpha = 0.01,
+ parallel= FALSE, zero=NULL)
+{
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(init.alpha, posit=TRUE))
+ stop("'init.alpha' must contain positive values only")
+
+ new("vglmff",
+ blurb=c("Dirichlet-Multinomial distribution\n\n",
+ "Links: ",
+ namesof("shape1", link), ", ..., ",
+ namesof("shapeM", link), "\n\n",
+ "Posterior mean: (n_j + shape_j)/(2*sum(n_j) + sum(shape_j))\n"),
+ constraints=eval(substitute(expression({
+ constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints, int= TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel=parallel, .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = as.matrix(y)
+ M = ncol(y)
+ if(any(y != round(y )))
+ stop("all y values must be integer-valued")
+
+ predictors.names = namesof(paste("shape", 1:M, sep=""), .link, short= TRUE)
+ extra$n2 = as.vector(apply(y, 1, sum)) # Nb. don't multiply by 2
+ extra$y = y
+ if(!length(etastart)) {
+ yy = if(is.numeric(.init.alpha))
+ matrix(.init.alpha, n, M, byrow= TRUE) else
+ matrix(runif(n*M), n, M)
+ etastart = theta2eta(yy, .link)
+ }
+ }), list( .link=link, .init.alpha=init.alpha ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta, .link)
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, len=M))
+ (extra$y + shape) / (extra$n2 + sumshape)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = rep(.link, length=M)
+ names(misc$link) = paste("shape", 1:M, sep="")
+ misc$pooled.weight = pooled.weight
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ shape = eta2theta(eta, .link)
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, len=M))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
+ sum(w * (lgamma(y + shape) - lgamma(shape )))
+ }, list( .link=link ))),
+ vfamily=c("dirmul.old"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta, .link)
+ sumshape = as.vector(shape %*% rep(1, len=M))
+ dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
+ digamma(y + shape) - digamma(shape)
+ dsh.deta = dtheta.deta(shape, .link)
+ w * dl.dsh * dsh.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ index = iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz = matrix(trigamma(sumshape)-trigamma(extra$n2 + sumshape),
+ nrow=n, ncol=dimm(M))
+ wz[,1:M] = wz[,1:M] + trigamma(y + shape) - trigamma(shape)
+ wz = -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+
+
+ if(TRUE && intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .link=link ))))
+}
+
+
+
+
+rdiric = function(n, shape, dimension=NULL) {
+ if(!is.numeric(dimension))
+ dimension = length(shape)
+ shape = rep(shape, len=dimension)
+
+ ans = if(is.R()) rgamma(n*dimension, rep(shape, rep(n, dimension ))) else
+ rgamma(n*dimension, rep(shape, each=n))
+ dim(ans) = c(n, dimension)
+
+
+ ans = ans / apply(ans, 1, sum)
+ ans
+}
+
+
+dirichlet = function(link="loge", zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Dirichlet distribution\n\n",
+ "Links: ",
+ namesof("shapej", link), "\n\n",
+ "Mean: shape_j/(1 + sum(shape_j)), j=1,..,ncol(y)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = as.matrix(y)
+ M = ncol(y)
+ if(any(y <= 0) || any(y>=1))
+ stop("all y values must be > 0 and < 1")
+ predictors.names = namesof(paste("shape", 1:M, sep=""), .link, short= TRUE)
+ if(!length(etastart)) {
+ yy = matrix(t(y) %*% rep(1/nrow(y), nrow(y)), nrow(y), M, byrow= TRUE)
+ etastart = theta2eta(yy, .link)
+ }
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta, .link)
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, len=M)) # apply(shape, 1, sum)
+ shape / sumshape
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ shape = eta2theta(eta, .link)
+ M = if(is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, len=M))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (lgamma(sumshape) - lgamma(shape) + (shape-1)*log(y )))
+ }, list( .link=link ))),
+ vfamily=c("dirichlet"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta, .link)
+ sumshape = as.vector(shape %*% rep(1, len=M))
+ dl.dsh = digamma(sumshape) - digamma(shape) + log(y)
+ dsh.deta = dtheta.deta(shape, .link)
+ w * dl.dsh * dsh.deta
+ }), list( .link=link ))),
+ weight=expression({
+ index = iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz = matrix(trigamma(sumshape), nrow=n, ncol=dimm(M))
+ wz[,1:M] = wz[,1:M] - trigamma(shape)
+ wz = -w * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+ wz
+ }))
+}
+
+
+
+zeta = function(x, deriv=0) {
+
+
+ deriv.arg = deriv
+ if(!is.Numeric(deriv.arg, allow=1, integer=TRUE, positi=TRUE) && deriv.arg!=0)
+ stop("'deriv' must be a single non-negative integer")
+ if(!(deriv.arg==0 || deriv.arg==1 || deriv.arg==2))
+ stop("'deriv' must be 0, 1, or 2")
+
+
+
+ if(deriv.arg > 0)
+ return(zeta.derivative(x, deriv=deriv))
+
+
+
+ if(any(special <- Re(x) <= 1)) {
+ ans <- x
+ ans[special] <- Inf # For Re(x)==1
+
+ special3 <- Re(x) < 1
+ ans[special3] <- NA # For 0 < Re(x) < 1
+
+ special4 <- (0 < Re(x)) & (Re(x) < 1) & (Im(x) == 0)
+ ans[special4] <- zeta.derivative(x[special4], deriv=deriv)
+
+
+ special2 <- Re(x) < 0
+ if(any(special2)) {
+ x2 = x[special2]
+ cx = 1-x2
+ ans[special2] = 2^(x2) * pi^(x2-1) * sin(pi*x2/2) * gamma(cx) * Recall(cx)
+ }
+
+ if(any(!special)) {
+ ans[!special] <- Recall(x[!special])
+ }
+ return(ans)
+ }
+
+ a=12; k=8 # Markman paper
+ B = c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
+ ans = 0
+ for(i in 1:(a-1))
+ ans = ans + 1.0 / i^x
+ ans = ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
+
+ term = (x/2) / a^(x+1)
+ ans = ans + term * B[1]
+
+ for(m in 2:k) {
+ term = term * (x+2*m-2) * (x+2*m-3) / (a*a* 2*m * (2*m-1))
+ ans = ans + term * B[m]
+ }
+ ans
+}
+
+
+zeta.derivative = function(x, deriv=0)
+{
+
+
+ deriv.arg = deriv
+ if(!is.Numeric(deriv.arg, allow=1, integer=TRUE, positi=TRUE) && deriv.arg!=0)
+ stop("'deriv' must be a single non-negative integer")
+ if(!(deriv.arg==0 || deriv.arg==1 || deriv.arg==2))
+ stop("'deriv' must be 0, 1, or 2")
+
+ if(any(Im(x) != 0))
+ stop("Sorry, currently can only handle x real, not complex")
+ if(any(x < 0))
+ stop("Sorry, currently cannot handle x < 0")
+
+ ok = is.finite(x) & x > 0 & x != 1 # Handles NAs
+ ans = rep(as.numeric(NA), length(x))
+ nn = sum(ok) # Effective length (excludes x < 0 and x = 1 values)
+ if(nn)
+ ans[ok] = dotFortran(name="vzetawr", as.double(x[ok]), ans=double(nn),
+ as.integer(deriv.arg), as.integer(nn))$ans
+
+
+
+ if(deriv==0)
+ ans[is.finite(x) & abs(x) < 1.0e-12] = -0.5
+
+ ans
+}
+
+
+dzeta = function(x, p)
+{
+ if(!is.Numeric(p, allow=1, posit=TRUE) || p <= 1)
+ stop("'p' must be numeric and > 1")
+ p = rep(p, len=length(x))
+
+ ox = !is.finite(x)
+ zero = ox | round(x) != x | x < 1
+ if(any(zero)) warning("non-integer x and/or x < 1 or NAs")
+ ans = rep(0.0, len=length(x))
+ if(any(!zero))
+ ans[!zero] = x[!zero]^(-p[!zero]) / zeta(p[!zero])
+ if(any(ox)) ans[ox] = NA
+ ans
+}
+
+zetaff = function(link="loge", init.p=NULL)
+{
+
+ if(length(init.p) && !is.Numeric(init.p, positi=TRUE))
+ stop("argument \"init.p\" must be > 0")
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Zeta distribution f(y) = 1/(y^(p+1) zeta(p+1)), p>0, y=1,2,..\n\n",
+ "Link: ",
+ namesof("p", link), "\n\n",
+ "Mean: zeta(p) / zeta(p+1), provided p>1\n",
+ "Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y < 1))
+ stop("all y values must be in 1,2,3,...")
+ if(any(y != round(y )))
+ warning("y should be integer-valued")
+
+ predictors.names = namesof("p", .link, tag= FALSE)
+
+ if(!length(etastart)) {
+ llfun = function(pp, y, w) {
+ sum(w * (-(pp+1) * log(y) - log(zeta(pp+1 ))))
+ }
+ pp.init = if(length( .init.p )) .init.p else
+ getInitVals(gvals=seq(0.1, 3.0, len=19), llfun=llfun, y=y, w=w)
+ pp.init = rep(pp.init, length=length(y))
+ if( .link == "loglog") pp.init[pp.init <= 1] = 1.2
+ etastart = theta2eta(pp.init, .link)
+ }
+ }), list( .link=link, .init.p=init.p ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ ans = pp = eta2theta(eta, .link)
+ ans[pp>1] = zeta(pp[pp>1]) / zeta(pp[pp>1]+1)
+ ans[pp<=1] = NA
+ ans
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(pp= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ pp = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-(pp+1) * log(y) - log(zeta(pp+1 ))))
+ }, list( .link=link ))),
+ vfamily=c("zeta"),
+ deriv=eval(substitute(expression({
+ pp = eta2theta(eta, .link)
+ fred1 = zeta(pp+1)
+ fred2 = zeta(pp+1, deriv=1)
+ dl.dpp = -log(y) - fred2 / fred1
+ dpp.deta = dtheta.deta(pp, .link)
+ w * dl.dpp * dpp.deta
+ }), list( .link=link ))),
+ weight=expression({
+ ed2l.dpp2 = zeta(pp+1, deriv=2) / fred1 - (fred2/fred1)^2
+ wz = w * dpp.deta^2 * ed2l.dpp2
+ wz
+ }))
+}
+
+
+
+gharmonic = function(n, s=1, lognexponent=0) {
+ if(!is.Numeric(n, integ=TRUE, posit=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(s, posit=TRUE))
+ stop("bad input for argument \"s\"")
+ if(!is.Numeric(lognexponent, allow=1))
+ stop("bad input for argument \"lognexponent\"")
+ if(length(n) == 1 && length(s) == 1) {
+ if(lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
+ sum((1:n)^(-s))
+ } else {
+ LEN = max(length(n), length(s))
+ n = rep(n, len=LEN)
+ ans = s = rep(s, len=LEN)
+ if(lognexponent != 0) {
+ for(i in 1:LEN)
+ ans[i] = sum(log(1:n[i])^lognexponent * (1:n[i])^(-s[i]))
+ } else
+ for(i in 1:LEN)
+ ans[i] = sum((1:n[i])^(-s[i]))
+ ans
+ }
+}
+
+dzipf = function(x, N, s)
+{
+ if(!is.Numeric(x))
+ stop("bad input for argument \"x\"")
+ if(!is.Numeric(N, integ=TRUE, posit=TRUE))
+ stop("bad input for argument \"N\"")
+ if(!is.Numeric(s, posit=TRUE))
+ stop("bad input for argument \"s\"")
+ nn = max(length(x), length(N), length(s))
+ x = rep(x, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
+ ox = !is.finite(x)
+ zero = ox | round(x) != x | x < 1 | x > N
+ ans = 0 * x
+ if(any(!zero))
+ ans[!zero] = x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
+ ans
+}
+
+
+
+pzipf = function(q, N, s) {
+ if(!is.Numeric(q))
+ stop("bad input for argument \"q\"")
+ if(!is.Numeric(N, integ=TRUE, posit=TRUE))
+ stop("bad input for argument \"N\"")
+ if(!is.Numeric(s, posit=TRUE))
+ stop("bad input for argument \"s\"")
+
+ nn = max(length(q), length(N), length(s))
+ q = rep(q, len=nn); N = rep(N, len=nn); s = rep(s, len=nn);
+ oq = !is.finite(q)
+ zeroOR1 = oq | q < 1 | q >= N
+ floorq = floor(q)
+ ans = 0 * floorq
+ ans[oq | q >= N] = 1
+ if(any(!zeroOR1))
+ ans[!zeroOR1] = gharmonic(floorq[!zeroOR1], s[!zeroOR1]) /
+ gharmonic(N[!zeroOR1], s[!zeroOR1])
+ ans
+}
+
+
+zipf = function(N=NULL, link="loge", init.s=NULL)
+{
+
+ if(length(N) &&
+ (!is.Numeric(N, positi=TRUE, integ=TRUE, allow=1) || N <= 1))
+ stop("bad input for argument \"N\"")
+ enteredN = length(N)
+ if(length(init.s) && !is.Numeric(init.s, positi=TRUE))
+ stop("argument \"init.s\" must be > 0")
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
+ " s>0, y=1,2,...,N", ifelse(enteredN, paste("=",N,sep=""), ""),
+ "\n\n",
+ "Link: ",
+ namesof("s", link),
+ "\n\n",
+ "Mean: gharmonic(N,s-1) / gharmonic(N,s)"),
+ initialize=eval(substitute(expression({
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ y = as.numeric(y)
+ if(any(y != round(y )))
+ stop("y must be integer-valued")
+ predictors.names = namesof("s", .link, tag= FALSE)
+ NN = .N
+ if(!is.Numeric(NN, allow=1, posit=TRUE, integ=TRUE))
+ NN = max(y)
+ if(max(y) > NN)
+ stop("maximum of the response is greater than argument \"N\"")
+ if(any(y < 1))
+ stop(paste("all response values must be in 1,2,3,...,N=",NN,sep=""))
+ extra$N = NN
+ if(!length(etastart)) {
+ llfun = function(ss, y, N, w) {
+ sum(w * ((-ss) * log(y) - log(gharmonic(N, ss))))
+ }
+ ss.init = if(length( .init.s )) .init.s else
+ getInitVals(gvals=seq(0.1, 3.0, len=19), llfun=llfun,
+ y=y, N=extra$N, w=w)
+ ss.init = rep(ss.init, length=length(y))
+ if( .link == "loglog") ss.init[ss.init <= 1] = 1.2
+ etastart = theta2eta(ss.init, .link)
+ }
+ }), list( .link=link, .init.s=init.s, .N=N ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ ss = eta2theta(eta, .link)
+ gharmonic(extra$N, s=ss-1) / gharmonic(extra$N, s=ss)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$expected = FALSE
+ misc$link = c(s= .link)
+ misc$N = extra$N
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ ss = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((-ss) * log(y) - log(gharmonic(extra$N, ss ))))
+ }, list( .link=link ))),
+ vfamily=c("zipf"),
+ deriv=eval(substitute(expression({
+ ss = eta2theta(eta, .link)
+ fred1 = gharmonic(extra$N, ss)
+ fred2 = gharmonic(extra$N, ss, lognexp=1)
+ dl.dss = -log(y) + fred2 / fred1
+ dss.deta = dtheta.deta(ss, .link)
+ d2ss.deta2 = d2theta.deta2(ss, .link)
+ w * dl.dss * dss.deta
+ }), list( .link=link ))),
+ weight=expression({
+ d2l.dss = gharmonic(extra$N, ss, lognexp=2) / fred1 - (fred2/fred1)^2
+ wz = w * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
+ wz
+ }))
+}
+
+
+cauchy1 = function(scale.arg=1, llocation="identity",
+ ilocation=NULL, method.init=1)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(!is.Numeric(scale.arg, posit=TRUE)) stop("bad input for \"scale.arg\"")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 3)
+ stop("'method.init' must be 1 or 2 or 3")
+
+ new("vglmff",
+ blurb=c("One parameter Cauchy distribution (location unknown, scale known)\n\n",
+ "Link: ",
+ namesof("location", llocation), "\n\n",
+ "Mean: doesn't exist\n",
+ "Variance: doesn't exist"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("location", .llocation, tag= FALSE)
+
+ if(!length(etastart)) {
+ loc.init = if(length(.ilocation)) .ilocation else {
+ if( .method.init == 2) median(rep(y,w)) else
+ if( .method.init == 3) y else {
+ Loglikfun = function(y, loc, scal, w)
+ sum(w * (-log(1+((y-loc)/scal)^2) - log(scal )))
+ loc.grid = rvar = quantile(y, probs=seq(0.1,0.9,by=0.1))
+ for(ii in 1:length(loc.grid))
+ rvar[ii] = Loglikfun(y=y, loc=loc.grid[ii],
+ scal= .scale.arg, w=w)
+ try.this = loc.grid[rvar == max(rvar)]
+ try.this = rep(try.this, len=n)
+ try.this
+ }
+ }
+ loc.init = rep(loc.init, len=n)
+ if(.llocation == "loge") loc.init = abs(loc.init)+0.01
+ etastart = theta2eta(loc.init, .llocation)
+ }
+ }), list( .scale.arg=scale.arg, .ilocation=ilocation,
+ .llocation=llocation, .method.init=method.init ))),
+ inverse=function(eta, extra=NULL) {
+ rep(as.numeric(NA), length(eta))
+ },
+ last=eval(substitute(expression({
+ misc$link = c("location"= .llocation)
+ misc$scale.arg = .scale.arg
+ }), list( .scale.arg=scale.arg, .llocation=llocation ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = eta2theta(eta, .llocation)
+ temp = (y-location)/ .scale.arg
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(1+ temp^2) - log(pi) - log(.scale.arg )))
+ }, list( .scale.arg=scale.arg, .llocation=llocation ))),
+ vfamily=c("cauchy1"),
+ deriv=eval(substitute(expression({
+ location = eta2theta(eta, .llocation)
+ temp = (y-location)/.scale.arg
+ dl.dlocation = 2 * temp / ((1 + temp^2) * .scale.arg)
+ dlocation.deta = dtheta.deta(location, .llocation)
+ w * dl.dlocation * dlocation.deta
+ }), list( .scale.arg=scale.arg, .llocation=llocation ))),
+ weight=eval(substitute(expression({
+ wz = w * dlocation.deta^2 / (.scale.arg^2 * 2)
+ wz
+ }), list( .scale.arg=scale.arg, .llocation=llocation ))))
+}
+
+
+
+
+logistic1 = function(llocation="identity",
+ scale.arg=1, method.init=1)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(!is.Numeric(scale.arg, allow=1, posit=TRUE))
+ stop("'scale.arg' must be a single positive number")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2)
+ stop("'method.init' must be 1 or 2")
+
+ new("vglmff",
+ blurb=c("One-parameter logistic distribution (location unknown, scale known)\n\n",
+ "Link: ",
+ namesof("location", llocation), "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: (pi*scale)^2 / 3"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("location", .llocation, tag= FALSE)
+ if(!length(etastart)) {
+ location.init = if( .method.init == 1) y else median(rep(y, w))
+ location.init = rep(location.init, len=n)
+ if(.llocation == "loge") location.init = abs(location.init) + 0.001
+ etastart = theta2eta(location.init, .llocation)
+ }
+ }), list( .method.init=method.init, .llocation=llocation ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, .llocation)
+ }, list( .llocation=llocation ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation)
+ misc$scale.arg = .scale.arg
+ }), list( .llocation=llocation, .scale.arg=scale.arg ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = eta2theta(eta, .llocation)
+ zedd = (y-location)/.scale.arg
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-zedd - 2 * log(1+exp(-zedd)) - log(.scale.arg )))
+ }, list( .llocation=llocation, .scale.arg=scale.arg ))),
+ vfamily=c("logistic1"),
+ deriv=eval(substitute(expression({
+ location = eta2theta(eta, .llocation)
+ ezedd = exp(-(y-location)/.scale.arg)
+ dl.dlocation = (1 - ezedd) / ((1 + ezedd) * .scale.arg)
+ dlocation.deta = dtheta.deta(location, .llocation)
+ w * dl.dlocation * dlocation.deta
+ }), list( .llocation=llocation, .scale.arg=scale.arg ))),
+ weight=eval(substitute(expression({
+ wz = w * dlocation.deta^2 / (.scale.arg^2 * 3)
+ wz
+ }), list( .scale.arg=scale.arg ))))
+}
+
+
+
+
+erlang = function(shape.arg, link="loge", method.init=1)
+{
+
+ if(!is.Numeric(shape.arg, allow=1, integer=TRUE, positi=TRUE))
+ stop("\"shape\" must be a positive integer")
+ if(!is.Numeric(method.init, allow=1, integer=TRUE, positi=TRUE) ||
+ method.init > 2)
+ stop("\"method.init\" must be 1 or 2")
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Erlang distribution\n\n",
+ "Link: ", namesof("scale", link), "\n", "\n",
+ "Mean: shape * scale", "\n",
+ "Variance: shape * scale^2"),
+ initialize=eval(substitute(expression({
+ if(ncol(y <- as.matrix(y)) > 1)
+ stop("erlang cannot handle matrix responses yet")
+ if(any(y < 0))
+ stop("all y values must be >= 0")
+
+ predictors.names = namesof("scale", .link, tag= FALSE)
+
+ if(!length(etastart)) {
+ if(.method.init==1)
+ sc.init = y / .shape.arg
+ if(.method.init==2) {
+ sc.init = median(y) / .shape.arg
+ sc.init = rep(sc.init, length=n)
+ }
+ etastart = theta2eta(sc.init, .link)
+ }
+ }), list( .link=link, .shape.arg=shape.arg, .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ sc = eta2theta(eta, .link)
+ .shape.arg * sc
+ }, list( .link=link, .shape.arg=shape.arg ))),
+ last=eval(substitute(expression({
+ misc$link = c(scale= .link)
+ misc$shape.arg = .shape.arg
+ }), list( .link=link, .shape.arg=shape.arg ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sc = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((.shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
+ lgamma( .shape.arg )))
+ }, list( .link=link, .shape.arg=shape.arg ))),
+ vfamily=c("erlang"),
+ deriv=eval(substitute(expression({
+ sc = eta2theta(eta, .link)
+ dl.dsc = (y / sc - .shape.arg) / sc
+ dsc.deta = dtheta.deta(sc, .link)
+ w * dl.dsc * dsc.deta
+ }), list( .link=link, .shape.arg=shape.arg ))),
+ weight=eval(substitute(expression({
+ ed2l.dsc2 = .shape.arg / sc^2 # Use the expected info matrix
+ wz = w * dsc.deta^2 * ed2l.dsc2
+ wz
+ }), list( .shape.arg=shape.arg ))))
+}
+
+
+
+borel.tanner = function(shape.arg, link="logit")
+{
+
+ if(!is.Numeric(shape.arg, allow=1, integ=TRUE))
+ stop("bad input for argument \"shape.arg\"")
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Borel-Tanner distribution\n\n",
+ "Link: ",
+ namesof("a", link), "\n\n",
+ "Mean: n/(1-a)",
+ "\n",
+ "Variance: n*a / (1-a)^3"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y < .shape.arg))
+ stop("all y values must be >= n")
+ if(max(abs(y - round(y )))>0.00001)
+ stop("response must be integer-valued")
+
+ predictors.names = namesof("a", .link, tag= FALSE)
+
+
+ if(!length(etastart)) {
+ a.init = .shape.arg / y
+ etastart = theta2eta(a.init, .link)
+ }
+ }), list( .link=link, .shape.arg=shape.arg ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ .shape.arg / (1 - a)
+ }, list( .link=link, .shape.arg=shape.arg ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link)
+ misc$shape.arg = .shape.arg
+ }), list( .link=link, .shape.arg=shape.arg ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((y-.shape.arg) * log(a) - a * y))
+ }, list( .link=link, .shape.arg=shape.arg ))),
+ vfamily=c("borel.tanner"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta, .link)
+ dl.da = (y- .shape.arg)/a - y
+ da.deta = dtheta.deta(a, .link)
+ w * dl.da * da.deta
+ }), list( .link=link, .shape.arg=shape.arg ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = .shape.arg/(a*(1-a)) # Use the expected info matrix
+ wz = w * da.deta^2 * ed2l.da2
+ wz
+ }), list( .shape.arg=shape.arg ))))
+}
+
+
+dsnorm = function(x, location=0, scale=1, shape=0) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ zedd = (x - location) / scale
+ 2 * dnorm(zedd) * pnorm(shape * zedd) / scale
+}
+
+
+
+rsnorm = function(n, location=0, scale=1, shape=0) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ if(!is.Numeric(shape)) stop("bad input for argument \"shape\"")
+ rho = shape / sqrt(1 + shape^2)
+ u0 = rnorm(n)
+ v = rnorm(n)
+ u1 = rho*u0 + sqrt(1 - rho^2) * v
+ location + scale * ifelse(u0 >= 0, u1, -u1)
+}
+
+
+skewnormal1 = function(lshape="identity", ishape=NULL)
+{
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+
+ new("vglmff",
+ blurb=c("1-parameter Skew-normal distribution\n\n",
+ "Link: ",
+ namesof("shape", lshape), "\n",
+ "Mean: shape * sqrt(2 / (pi * (1+shape^2 )))\n",
+ "Variance: 1-mu^2"),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y) != 1)
+ stop("response must be a vector or a 1-column matrix")
+ predictors.names = namesof("shape", .lshape, tag= FALSE)
+ if(!length(etastart)) {
+ init.shape = if(length( .ishape)) rep( .ishape, len=n) else {
+ temp = y
+ index = abs(y) < sqrt(2/pi)-0.01
+ temp[!index] = y[!index]
+ temp[index] = sign(y[index])/sqrt(2/(pi*y[index]*y[index])-1)
+ temp
+ }
+ etastart = matrix(init.shape, n, ncol(y))
+ }
+ }), list( .lshape=lshape, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ alpha = eta2theta(eta, .lshape)
+ alpha * sqrt(2/(pi * (1+alpha^2 )))
+ }, list( .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape)
+ }), list( .lshape=lshape ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ alpha = mu / sqrt(2/pi - mu^2)
+ theta2eta(alpha, .lshape)
+ }, list( .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ alpha = eta2theta(eta, .lshape)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (pnorm(y*alpha, log=TRUE )))
+ }, list( .lshape=lshape ))),
+ vfamily=c("skewnormal1"),
+ deriv=eval(substitute(expression({
+ alpha = eta2theta(eta, .lshape)
+ zedd = y*alpha
+ tmp76 = pnorm(zedd)
+ tmp86 = dnorm(zedd)
+ dl.dshape = tmp86 * y / tmp76
+ dshape.deta = dtheta.deta(alpha, .lshape)
+ w * dl.dshape * dshape.deta
+ }), list( .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ d2shape.deta2 = d2theta.deta2(alpha, .lshape) # 0 with identity link
+ d2l.dshape = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
+ wz = -(dshape.deta^2) * d2l.dshape - d2shape.deta2 * dl.dshape
+ wz = w * wz
+ wz[wz < .Machine$double.eps] = .Machine$double.eps
+ wz
+ }), list( .lshape=lshape ))))
+}
+
+
+betaff = function(link="loge", i1=NULL, i2=NULL, trim=0.05,
+ A=0, B=1, earg=list(), zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(!is.list(earg)) earg = list()
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ if(!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
+ stop("A must be < B, and both must be of length one")
+ stdbeta = (A==0 && B==1) # stdbeta==T iff standard beta distribution
+
+ new("vglmff",
+ blurb=c("Two-parameter Beta distribution\n",
+ if(stdbeta)
+ paste("y^(shape1-1) * (1-y)^(shape2-1) / B(shape1,shape2),",
+ "0<=y<=1, shape1>0, shape2>0\n\n")
+ else
+ paste("(y-",A,")^(shape1-1) * (",B,
+ "-y)^(shape2-1) / [B(shape1,shape2) * (",
+ B, "-", A, ")^(shape1+shape2-1)], ",
+ A,"<=y<=",B," shape1>0, shape2>0\n\n", sep=""),
+ "Links: ",
+ namesof("shape1", link, earg=earg), ", ",
+ namesof("shape2", link, earg=earg)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+ predictors.names = c(namesof("shape1", .link, earg= .earg, short= TRUE),
+ namesof("shape2", .link, earg= .earg, short= TRUE))
+ if(is.numeric( .i1 ) && is.numeric( .i2 )) {
+ vec = c(.i1, .i2)
+ vec = c(theta2eta(vec[1], .link, earg= .earg ),
+ theta2eta(vec[2], .link, earg= .earg ))
+ etastart = matrix(vec, n, 2, byrow= TRUE)
+ }
+ if(!length(etastart)) {
+ mu1d = mean(y, trim=.trim)
+ uu = (mu1d-.A) / (.B - .A)
+ DD = (.B - .A)^2
+ pinit = uu^2 * (1-uu)*DD/var(y) - uu # But var(y) is not robust
+ qinit = pinit * (1-uu) / uu
+ etastart = matrix(theta2eta(c(pinit,qinit), .link, earg= .earg ),
+ n, 2, byrow=TRUE)
+ }
+ }), list( .link=link, .i1=i1, .i2=i2, .trim=trim, .A=A, .B=B,
+ .earg=earg ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shapes = eta2theta(eta, .link, earg= .earg )
+ .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .link=link, .A=A, .B=B, .earg=earg ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape1 = .link, shape2 = .link)
+ misc$limits = c(.A, .B)
+ misc$earg = list(shape1= .earg, shape2= .earg)
+ }), list( .link=link, .A=A, .B=B, .earg=earg ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ shapes = eta2theta(eta, .link, earg= .earg )
+ temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
+ lgamma(shapes[,1]) + lgamma(shapes[,2]) -
+ lgamma(shapes[,1]+shapes[,2])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((shapes[,1]-1) * log(y- .A) + (shapes[,2]-1) * log( .B -y) -
+ temp - (shapes[,1]+shapes[,2]-1) * log( .B - .A )))
+ }, list( .link=link, .A=A, .B=B, .earg=earg ))),
+ vfamily="betaff",
+ deriv=eval(substitute(expression({
+ shapes = eta2theta(eta, .link, earg= .earg )
+ dshapes.deta = dtheta.deta(shapes, .link, earg= .earg )
+ dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ w * dl.dshapes * dshapes.deta
+ }), list( .link=link, .A=A, .B=B, .earg=earg ))),
+ weight=expression({
+ temp2 = trigamma(shapes[,1]+shapes[,2])
+ d2l.dshape12 = temp2 - trigamma(shapes[,1])
+ d2l.dshape22 = temp2 - trigamma(shapes[,2])
+ d2l.dshape1shape2 = temp2
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+
+ -w * wz
+ }))
+}
+
+
+
+beta4 = function(link="loge", i1=2.3, i2=2.4, iA=NULL, iB=NULL)
+{
+
+
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Four-parameter Beta distribution\n",
+ "(y-A)^(shape1-1) * (B-y)^(shape2-1), A < y < B \n\n",
+ "Links: ",
+ namesof("shape1", link), ", ",
+ namesof("shape2", link), ", ",
+ " A, B"),
+ initialize=eval(substitute(expression({
+ if(!is.vector(y) || (is.matrix(y) && ncol(y) != 1))
+ stop("y must be a vector or a 1-column matrix")
+
+ if(length(.iA) && any(y < .iA))
+ stop("initial A value out of range")
+ if(length(.iB) && any(y > .iB))
+ stop("initial B value out of range")
+
+ predictors.names = c(
+ namesof("shape1", .link, short= TRUE),
+ namesof("shape2", .link, short= TRUE), "A", "B")
+ my.range = diff(range(y))
+ if(!length(etastart)) {
+ etastart = cbind(shape1= rep(.i1, len=length(y)),
+ shape2= .i2,
+ A = if(length(.iA)) .iA else min(y)-my.range/70,
+ B = if(length(.iB)) .iB else max(y)+my.range/70)
+ }
+ }), list( .i1=i1, .i2=i2, .iA=iA, .iB=iB, .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shapes = eta2theta(eta[,1:2], .link)
+ .A = eta[,3]
+ .B = eta[,4]
+ .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape1 = .link, shape2 = .link,
+ A="identity", B="identity")
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ shapes = eta2theta(eta[,1:2], .link)
+ .A = eta[,3]
+ .B = eta[,4]
+ temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
+ lgamma(shapes[,1]) + lgamma(shapes[,2]) -
+ lgamma(shapes[,1]+shapes[,2])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
+ (shapes[,1]+shapes[,2]-1)*log(.B-.A )))
+ }, list( .link=link ))),
+ vfamily="beta4",
+ deriv=eval(substitute(expression({
+ shapes = eta2theta(eta[,1:2], .link)
+ .A = eta[,3]
+ .B = eta[,4]
+ dshapes.deta = dtheta.deta(shapes, .link)
+ rr1 = (.B - .A)
+ temp3 = (shapes[,1] + shapes[,2] - 1)
+ temp1 = temp3 / rr1
+ dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ dl.dA = -(shapes[,1]-1) / (y- .A) + temp1
+ dl.dB = (shapes[,2]-1) / (.B - y) - temp1
+ w * cbind(dl.dshapes * dshapes.deta, dl.dA, dl.dB)
+ }), list( .link=link ))),
+ weight=expression({
+
+ temp2 = trigamma(shapes[,1]+shapes[,2])
+ d2l.dshape12 = temp2 - trigamma(shapes[,1])
+ d2l.dshape22 = temp2 - trigamma(shapes[,2])
+ d2l.dshape1shape2 = temp2
+
+ ed2l.dAA = -temp3 * shapes[,2] / ((shapes[,1]-2) * rr1^2)
+ ed2l.dBB = -temp3 * shapes[,1] / ((shapes[,2]-2) * rr1^2)
+ ed2l.dAB = -temp3 / (rr1^2)
+ ed2l.dAshape1 = -shapes[,2] / ((shapes[,1]-1) * rr1)
+ ed2l.dAshape2 = 1/rr1
+ ed2l.dBshape1 = -1/rr1
+ ed2l.dBshape2 = shapes[,1] / ((shapes[,2]-1) * rr1)
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #10=dimm(M)
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+
+ wz[,iam(3,3,M)] = ed2l.dAA
+ wz[,iam(4,4,M)] = ed2l.dBB
+ wz[,iam(4,3,M)] = ed2l.dAB
+
+ wz[,iam(3,1,M)] = ed2l.dAshape1 * dshapes.deta[,1]
+ wz[,iam(3,2,M)] = ed2l.dAshape2 * dshapes.deta[,2]
+ wz[,iam(4,1,M)] = ed2l.dBshape1 * dshapes.deta[,1]
+ wz[,iam(4,2,M)] = ed2l.dBshape2 * dshapes.deta[,2]
+
+
+ -w * wz
+ }))
+}
+
+
+
+simple.exponential = function()
+{
+ new("vglmff",
+ blurb=c("Simple Exponential distribution\n",
+ "Link: log(rate)\n"),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ devy = -log(y) - 1
+ devmu = -log(mu) - y/mu
+ devi = 2 * (devy - devmu)
+ if(residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
+ },
+ initialize=expression({
+ predictors.names = "log(rate)"
+ mustart = y + (y == 0) / 8
+ }),
+ inverse=function(eta, extra=NULL)
+ exp(-eta),
+ link=function(mu, extra=NULL)
+ -log(mu),
+ vfamily="simple.exponential",
+ deriv=expression({
+ rate = 1 / mu
+ dl.drate = mu - y
+ drate.deta = dtheta.deta(rate, "loge")
+ w * dl.drate * drate.deta
+ }),
+ weight=expression({
+ ed2l.drate2 = -1 / rate^2
+ wz = -w * drate.deta^2 * ed2l.drate2
+ wz
+ }))
+}
+
+
+exponential = function(link="loge", location=0, expected=TRUE, earg=NULL)
+{
+ if(!is.Numeric(location, allow=1))
+ stop("bad input for argument \"location\"")
+
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Exponential distribution\n\n",
+ "Link: ", namesof("rate", link, tag= TRUE), "\n",
+ "Mean: ", "mu =", location, "+ 1 / ",
+ namesof("rate", link, tag= TRUE, earg=earg), "\n",
+ "Variance: ",
+ if(location==0) "Exponential: mu^2" else
+ paste("(mu-", location, ")^2", sep="")),
+ initialize=eval(substitute(expression({
+ extra$loc = .location # This is passed into, e.g., link, deriv etc.
+ if(any(y <= extra$loc))
+ stop(paste("all responses must be greater than",extra$loc))
+ predictors.names = namesof("rate", .link, tag= FALSE)
+ mu = y + (y == extra$loc) / 8
+ if(!length(etastart))
+ etastart = theta2eta(1/(mu-extra$loc), .link, earg=.earg)
+ }), list( .location=location, .link=link, .earg=earg ))),
+ inverse=eval(substitute(function(eta, extra=NULL)
+ extra$loc + 1 / eta2theta(eta, .link, earg=.earg),
+ list( .link=link, .earg=earg ))),
+ last=eval(substitute(expression({
+ misc$location = extra$loc
+ misc$link = c(rate = .link)
+ misc$earg = .earg
+ }), list( .link=link, .earg=earg ))),
+ link=eval(substitute(function(mu, extra=NULL)
+ theta2eta(1/(mu-extra$loc), .link, earg=.earg),
+ list( .link=link, .earg=earg ))),
+ deviance=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ devy = -log(y - .location) - 1
+ devmu = -log(mu - .location) - (y - .location)/(mu - .location)
+ devi = 2 * (devy - devmu)
+ if(residuals)
+ sign(y - mu) * sqrt(abs(devi) * w) else
+ sum(w * devi)
+ }, list( .location=location, .earg=earg ))),
+ vfamily=c("exponential"),
+ deriv=eval(substitute(expression({
+ rate = 1 / (mu - extra$loc)
+ dl.drate = mu - y
+ drate.deta = dtheta.deta(rate, .link, earg=.earg)
+ w * dl.drate * drate.deta
+ }), list( .link=link, .earg=earg ))),
+ weight=eval(substitute(expression({
+ d2l.drate2 = - ((mu-extra$loc)^2)
+ wz = -(drate.deta^2) * d2l.drate2
+ if(! .expected) {
+ # Use the observed info matrix rather than the expected
+ d2rate.deta2 = d2theta.deta2(rate, .link, earg=.earg)
+ wz = wz - dl.drate * d2rate.deta2
+ }
+ w * wz
+ }), list( .link=link, .expected=expected, .earg=earg ))))
+}
+
+
+
+
+gamma1 = function(link="loge")
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("1-parameter Gamma distribution\n",
+ "Link: ",
+ namesof("shape", link, tag= TRUE), "\n",
+ "Mean: mu (=shape)\n",
+ "Variance: mu (=shape)"),
+ initialize=eval(substitute(expression({
+ if(any(y <= 0))
+ stop("all responses must be positive")
+ M = if(is.matrix(y)) ncol(y) else 1
+ predictors.names = if(M == 1) namesof("shape", .link, short=TRUE) else
+ namesof(paste("shape", 1:M, sep=""), .link, short=TRUE)
+ if(!length(etastart))
+ etastart = cbind(theta2eta(y+1/8, .link ))
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL)
+ eta2theta(eta, .link)),
+ list( .link=link)),
+ last=eval(substitute(expression({
+ misc$expected = TRUE
+ misc$link = rep( .link, length=M)
+ names(misc$link) = if(M == 1) "shape" else paste("shape", 1:M, sep="")
+ }), list( .link=link ))),
+ link=eval(substitute(function(mu, extra=NULL)
+ theta2eta(mu, .link)),
+ list( .link=link)),
+ loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra=NULL)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((mu-1)*log(y) - y - lgamma(mu))),
+ vfamily=c("gamma1"),
+ deriv=eval(substitute(expression({
+ shape = mu
+ dl.dshape = log(y) - digamma(shape)
+ dshape.deta = dtheta.deta(shape, .link)
+ w * dl.dshape * dshape.deta
+ }), list( .link=link ))),
+ weight=expression({
+ d2l.dshape = -trigamma(shape)
+ wz = -(dshape.deta^2) * d2l.dshape
+ w * wz
+ }))
+}
+
+
+gamma2.ab = function(lrate="loge", lshape="loge",
+ irate=NULL, ishape=NULL, expected=TRUE, zero=2)
+{
+ if(mode(lrate) != "character" && mode(lrate) != "name")
+ lrate = as.character(substitute(lrate))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(length( irate) && !is.Numeric(irate, posit=TRUE))
+ stop("bad input for argument \"irate\"")
+ if(length( ishape) && !is.Numeric(ishape, posit=TRUE))
+ stop("bad input for argument \"ishape\"")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument \"expected\"")
+
+ new("vglmff",
+ blurb=c("2-parameter Gamma distribution\n",
+ "Links: ",
+ namesof("rate", lrate), ", ",
+ namesof("shape", lshape), "\n",
+ "Mean: mu = shape/rate\n",
+ "Variance: (mu^2)/shape = shape/rate^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ # Error check
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a 1-column matrix")
+ if(any(y <= 0))
+ stop("all responses must be positive")
+ predictors.names = c(namesof("rate", .lrate, tag= FALSE),
+ namesof("shape", .lshape, tag= FALSE))
+ if(!length(etastart)) {
+ mymu = y + 0.167 * (y == 0)
+ junk = lsfit(x, y, wt = w, intercept = FALSE)
+ var.y.est = sum(w * junk$resid^2) / (nrow(x) - length(junk$coef))
+ init.shape = if(length( .ishape)) .ishape else mymu^2 / var.y.est
+ init.rate = if(length( .irate)) .irate else init.shape / mymu
+ init.rate = rep(init.rate, len=n)
+ init.shape = rep(init.shape, len=n)
+ if( .lshape == "loglog")
+ init.shape[init.shape <= 1] = 3.1 #Hopefully value is big enough
+ etastart = cbind(theta2eta(init.rate, .lrate),
+ theta2eta(init.shape, .lshape))
+ }
+ }), list( .lrate=lrate, .lshape=lshape, .irate=irate, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,2], .lshape) / eta2theta(eta[,1], .lrate)
+ }, list( .lrate=lrate, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(rate= .lrate, shape= .lshape)
+ }), list( .lrate=lrate, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ rate = eta2theta(eta[,1], .lrate)
+ shape = eta2theta(eta[,2], .lshape)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(-rate * y + (shape-1)*log(y) + shape*log(rate) - lgamma(shape )))
+ }, list( .lrate=lrate, .lshape=lshape ))),
+ vfamily=c("gamma2.ab"),
+ deriv=eval(substitute(expression({
+ rate = eta2theta(eta[,1], .lrate)
+ shape = eta2theta(eta[,2], .lshape)
+ dl.drate = mu - y
+ dl.dshape = log(y*rate) - digamma(shape)
+ dratedeta = dtheta.deta(rate, .lrate)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * cbind(dl.drate * dratedeta, dl.dshape * dshape.deta)
+ }), list( .lrate=lrate, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ d2l.dshape2 = -trigamma(shape)
+ d2l.drate2 = -shape/(rate^2)
+ d2l.drateshape = 1/rate
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.drate2 * dratedeta^2
+ wz[,iam(2,2,M)] = -d2l.dshape2 * dshape.deta^2
+ wz[,iam(1,2,M)] = -d2l.drateshape * dratedeta * dshape.deta
+ if(! .expected) {
+ d2ratedeta2 = d2theta.deta2(rate, .lrate)
+ d2shapedeta2 = d2theta.deta2(shape, .lshape)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.drate * d2ratedeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dshape * d2shapedeta2
+ }
+ w * wz
+ }), list( .lrate=lrate, .lshape=lshape, .expected=expected ))))
+}
+
+
+
+gamma2 = function(lmu="loge", lshape="loge", method.init=1,
+ deviance.arg=FALSE, ishape=NULL, zero=-2)
+{
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(length( ishape) && !is.Numeric(ishape, posit=TRUE))
+ stop("bad input for argument \"ishape\"")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2)
+ stop("'method.init' must be 1 or 2")
+
+ ans =
+ new("vglmff",
+ blurb=c("2-parameter Gamma distribution",
+ " (McCullagh \& Nelder 1989 parameterization)\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("shape", lshape), "\n",
+ "Mean: mu\n",
+ "Variance: (mu^2)/shape"),
+ constraints=eval(substitute(expression({
+ temp752 = .zero
+ if(length(temp752) && all(temp752 == -2))
+ temp752 = 2*(1:ncol(y))
+ constraints = cm.zero.vgam(constraints, x, temp752, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(is.R()) assign("CQO.FastAlgorithm",
+ ( .lmu == "loge" && .lshape == "loge"), envir = VGAMenv) else
+ CQO.FastAlgorithm <<- ( .lmu == "loge" && .lshape == "loge")
+ if(any(function.name == c("cqo","cao")) &&
+ is.Numeric( .zero, allow=1) && .zero != -2)
+ stop("argument zero=-2 is required")
+
+ y = as.matrix(y)
+ M = 2 * ncol(y)
+ NOS = ncoly = ncol(y) # Number of species
+ predictors.names = c(namesof(if(NOS==1) "mu" else
+ paste("mu", 1:NOS, sep=""), .lmu, tag= FALSE),
+ namesof(if(NOS==1) "shape" else paste("shape", 1:NOS, sep=""),
+ .lshape, tag= FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+
+
+ # Error check
+ if(any(y <= 0))
+ stop("all responses must be positive") # see @loglikelihood
+ if(!length(etastart)) {
+ init.shape = matrix(1.0, n, NOS)
+ mymu = y # + 0.167 * (y == 0) # method.init == 1 (the default)
+ if( .method.init == 2) {
+ for(ii in 1:ncol(y)) {
+ mymu[,ii] = weighted.mean(y[,ii], w=w)
+ }
+ }
+ for(spp in 1:NOS) {
+ junk = lsfit(x, y[,spp], wt = w, intercept = FALSE)
+ var.y.est = sum(w * junk$resid^2) / (n - length(junk$coef))
+ init.shape[,spp] = if(length( .ishape)) .ishape else
+ mymu[,spp]^2 / var.y.est
+ if( .lshape == "loglog") init.shape[init.shape[,spp] <=
+ 1,spp] = 3.1 # Hopefully value is big enough
+ }
+ etastart = cbind(theta2eta(mymu, .lmu),
+ theta2eta(init.shape, .lshape))
+ etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
+ }
+ }), list( .lmu=lmu, .lshape=lshape, .ishape=ishape, .zero=zero,
+ .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ if(is.R()) {
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
+ } else {
+ while(exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ }
+ tmp34 = c(rep( .lmu, length=NOS), rep( .lshape, length=NOS))
+ names(tmp34) = c(if(NOS==1) "mu" else paste("mu", 1:NOS, sep=""),
+ if(NOS==1) "shape" else paste("shape", 1:NOS, sep=""))
+ tmp34 = tmp34[interleave.VGAM(M, M=2)]
+ misc$link = tmp34 # Already named
+ misc$expected = TRUE
+ }), list( .lmu=lmu, .lshape=lshape ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ temp = theta2eta(mu, .lmu)
+ temp = cbind(temp, NA * temp)
+ temp[,interleave.VGAM(ncol(temp), M=2),drop=FALSE]
+ }, list( .lmu=lmu ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ mymu = mu # eta2theta(eta[,2*(1:NOS)-1], .lmu)
+ shapemat = eta2theta(eta[,2*(1:NOS),drop=FALSE], .lshape)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*((shapemat - 1) * log(y) + shapemat *
+ (log(shapemat) - y / mymu - log(mymu)) - lgamma(shapemat )))
+ }, list( .lmu=lmu, .lshape=lshape ))),
+ vfamily=c("gamma2"),
+ deriv=eval(substitute(expression({
+ NOS = ncol(eta) / 2
+ mymu = eta2theta(eta[,2*(1:NOS)-1], .lmu)
+ shape = eta2theta(eta[,2*(1:NOS)], .lshape)
+ dl.dmu = shape * (y / mymu - 1) / mymu
+ dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
+ y / mymu
+ dmu.deta = dtheta.deta(mymu, .lmu)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ myderiv = w * cbind(dl.dmu * dmu.deta, dl.dshape * dshape.deta)
+ myderiv[,interleave.VGAM(M, M=2)]
+ }), list( .lmu=lmu, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ ed2l.dmu2 = shape / (mymu^2)
+ ed2l.dshape2 = trigamma(shape) - 1 / shape
+ wz = matrix(as.numeric(NA), n, M) #2=M; diagonal!
+ NOS = M / 2
+ wz[,2*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
+ wz[,2*(1:NOS)] = ed2l.dshape2 * dshape.deta^2
+ w * wz
+ }), list( .lmu=lmu ))))
+
+ if(deviance.arg) ans at deviance=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ NOS = ncol(eta) / 2
+ temp300 = eta[,2*(1:NOS),drop=FALSE]
+ if( .lshape == "loge") {
+ bigval = 28
+ temp300[temp300 > bigval] = bigval
+ temp300[temp300 < -bigval] = -bigval
+ } else stop("can only handle the 'loge' link")
+ shape = eta2theta(temp300, .lshape)
+ devi = -2 * (log(y/mu) - y/mu + 1)
+ if(residuals) {
+ warning("not 100% sure about these deviance residuals!")
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else
+ sum(w * devi)
+ }, list( .lshape=lshape )))
+ ans
+}
+
+
+geometric =function(link="logit", expected=TRUE)
+{
+ if(!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument \"expected\"")
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Geometric distribution (P[Y=y] = prob*(1-prob)^y, y=0,1,2,...)\n",
+ "Link: ",
+ namesof("prob", link), "\n",
+ "Mean: mu = (1-prob)/prob\n",
+ "Variance: mu*(1+mu) = (1-prob)/prob^2"),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a 1-column matrix")
+ if(any(y < 0)) stop("all responses must be >= 0")
+ if(any(y!=round(y ))) stop("response should be integer-valued")
+ predictors.names = c(namesof("prob", .link, tag= FALSE))
+ if(!length(etastart)) {
+ prob.init = 1 / (1 + y + 1/16)
+ etastart = theta2eta(prob.init, .link)
+ }
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ prob = eta2theta(eta, .link)
+ (1-prob)/prob
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(prob= .link)
+ misc$expected = .expected
+ }), list( .link=link, .expected=expected ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ prob = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R()) sum(w * dgeom(x=y, prob=prob, log=TRUE)) else
+ sum(w*(y * log(1.0-prob) + log(prob )))
+ }
+ }, list( .link=link ))),
+ vfamily=c("geometric"),
+ deriv=eval(substitute(expression({
+ prob = eta2theta(eta, .link)
+ dl.dprob = -y/(1-prob) + 1/prob
+ dprobdeta = dtheta.deta(prob, .link)
+ w * cbind(dl.dprob * dprobdeta)
+ }), list( .link=link, .expected=expected ))),
+ weight=eval(substitute(expression({
+ ed2l.dprob2 = if( .expected ) 1 / (prob^2 * (1-prob)) else
+ y / (1-prob)^2 + 1 / prob^2
+ wz = ed2l.dprob2 * dprobdeta^2
+ if( !( .expected )) wz = wz - dl.dprob * d2theta.deta2(prob, .link)
+ w * wz
+ }), list( .link=link, .expected=expected ))))
+}
+
+
+dbetageom = function(x, shape1, shape2, log=FALSE) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ N = max(length(x), length(shape1), length(shape2))
+ x = rep(x, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
+ if(!is.R()) {
+ beta = function(x,y) gamma(x) * gamma(y) / gamma(x+y)
+ lbeta = function(x,y) lgamma(x) + lgamma(y) - lgamma(x+y)
+ }
+ ans = if(log) lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2) else
+ beta(1+shape1, shape2+abs(x)) / beta(shape1, shape2)
+ ifelse(x == round(x) & x >= 0, ans, if(log) -Inf else 0)
+}
+
+
+pbetageom = function(q, shape1, shape2, log.p=FALSE) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ N = max(length(q), length(shape1), length(shape2))
+ q = rep(q, len=N); shape1 = rep(shape1, len=N); shape2 = rep(shape2, len=N)
+ ans = q * 0 # Retains names(q)
+ if(max(abs(shape1-shape1[1])) < 1.0e-08 &&
+ max(abs(shape2-shape2[1])) < 1.0e-08) {
+ qstar = floor(q)
+ temp = if(max(qstar) >= 0) dbetageom(x=0:max(qstar),
+ shape1=shape1[1], shape2=shape2[1]) else 0*qstar
+ unq = unique(qstar)
+ for(i in unq) {
+ index = qstar == i
+ ans[index] = if(i >= 0) sum(temp[1:(1+i)]) else 0
+ }
+ } else
+ for(i in 1:N) {
+ qstar = floor(q[i])
+ ans[i] = if(qstar >= 0) sum(dbetageom(x=0:qstar,
+ shape1=shape1[i], shape2=shape2[i])) else 0
+ }
+ if(log.p) log(ans) else ans
+}
+
+rbetageom = function(n, shape1, shape2) {
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(shape1, pos=TRUE)) stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, pos=TRUE)) stop("bad input for argument \"shape2\"")
+ rgeom(n=n, prob = rbeta(n=n, shape1=shape1, shape2=shape2))
+}
+
+
+
+
+tobit = function(Lower = 0, Upper = Inf, lmu="identity",
+ lsd="loge", imethod=1, zero=2)
+{
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(!is.Numeric(imethod, allow=1, integer=TRUE, positi=TRUE) || imethod > 2)
+ stop("imethod must be 1 or 2")
+ if(length(Lower) != 1 || length(Upper) != 1 ||
+ !is.numeric(Lower) || !is.numeric(Upper) || Lower >= Upper)
+ stop("Lower and Upper must have length 1 and be numeric with Lower < Upper")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Tobit model\n\n",
+ "Links: ", namesof("mu", lmu, tag= TRUE), "; ",
+ namesof("sd", lsd, tag= TRUE), "\n",
+ "Conditional variance: sd^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = cbind(y)
+ if(ncol(y)>1) stop("the response must be a vector or a 1-column matrix")
+ extra$censoredL = (y <= .Lower)
+ extra$censoredU = (y >= .Upper)
+ if(min(y) < .Lower) {
+ warning(paste("replacing response values less than the value ",
+ .Lower, "by", .Lower))
+ y[y < .Lower] = .Lower
+ }
+ if(max(y) > .Upper) {
+ warning(paste("replacing response values greater than the value",
+ .Upper, "by", .Upper))
+ y[y > .Upper] = .Upper
+ }
+ predictors.names = c(namesof("mu", .lmu, tag= FALSE),
+ namesof("sd", .lsd, tag= FALSE))
+ if(!length(etastart)) {
+ anyc = extra$censoredL | extra$censoredU
+ i11 = if( .imethod == 1) anyc else FALSE # can be all data
+ junk=if(is.R()) lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11]) else
+ lm.wfit(x=cbind(x[!i11,]), y=y[!i11], w=w[!i11],method="qr")
+ sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
+ etastart = cbind(mu=y, rep(theta2eta(sd.y.est, .lsd), length=n))
+ if(any(anyc)) etastart[anyc,1] = x[anyc,,drop=FALSE] %*% junk$coeff
+ }
+ }), list( .Lower=Lower, .Upper=Upper, .lmu=lmu, .lsd=lsd, .imethod=imethod ))),
+ inverse=eval(substitute( function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$link = c("mu"= .lmu, "sd"= .lsd)
+ misc$expected = TRUE
+ misc$Lower = .Lower
+ misc$Upper = .Upper
+ }), list( .lmu=lmu, .lsd=lsd, .Lower=Lower, .Upper=Upper ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ cenL = extra$censoredL
+ cenU = extra$censoredU
+ cen0 = !cenL & !cenU # uncensored obsns
+ mum = eta2theta(eta[,1], .lmu)
+ sd = eta2theta(eta[,2], .lsd)
+ ell1 = -log(sd[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sd[cen0])^2
+ ell2 = log(1 - pnorm((mum[cenL] - .Lower)/sd[cenL]))
+ ell3 = log(1 - pnorm(( .Upper - mum[cenU])/sd[cenU]))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ }, list( .lmu=lmu, .lsd=lsd, .Lower=Lower, .Upper=Upper ))),
+ vfamily=c("tobit"),
+ deriv=eval(substitute(expression({
+ cenL = extra$censoredL
+ cenU = extra$censoredU
+ cen0 = !cenL & !cenU # uncensored obsns
+ mum = eta2theta(eta[,1], .lmu)
+ sd = eta2theta(eta[,2], .lsd)
+ dl.dmu = (y-mum) / sd^2
+ dl.dsd = (((y-mum)/sd)^2 - 1) / sd
+ dmu.deta = dtheta.deta(mum, .lmu)
+ dsd.deta = dtheta.deta(sd, .lsd)
+ if(any(cenL)) {
+ mumL = mum - .Lower
+ temp21L = mumL[cenL] / sd[cenL]
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ fred21 = phiL / (1 - PhiL)
+ dl.dmu[cenL] = -fred21 / sd[cenL]
+ dl.dsd[cenL] = mumL[cenL] * fred21 / sd[cenL]^2
+ rm(fred21)
+ }
+ if(any(cenU)) {
+ mumU = .Upper - mum
+ temp21U = mumU[cenU] / sd[cenU]
+ PhiU = pnorm(temp21U)
+ phiU = dnorm(temp21U)
+ fred21 = phiU / (1 - PhiU)
+ dl.dmu[cenU] = fred21 / sd[cenU] # Negated
+ dl.dsd[cenU] = mumU[cenU] * fred21 / sd[cenU]^2
+ rm(fred21)
+ }
+ w * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
+ }), list( .lmu=lmu, .lsd=lsd, .Lower=Lower, .Upper=Upper ))),
+ weight=eval(substitute(expression({
+ A1 = 1 - pnorm((mum - .Lower) / sd) # Lower
+ A3 = 1 - pnorm(( .Upper - mum) / sd) # Upper
+ A2 = 1 - A1 - A3 # Middle; uncensored
+ wz = matrix(0, n, 3)
+ wz[,iam(1,1,M)] = A2 * 1 / sd^2 # ed2l.dmu2
+ wz[,iam(2,2,M)] = A2 * 2 / sd^2 # ed2l.dsd2
+ mumL = mum - .Lower
+ temp21L = mumL / sd
+ PhiL = pnorm(temp21L)
+ phiL = dnorm(temp21L)
+ temp31L = ((1-PhiL) * sd)^2
+ wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
+ wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
+ mumL * phiL / sd) / (sd * temp31L)
+ wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) - temp21L*phiL) / temp31L
+ wz.cenL11[!is.finite(wz.cenL11)] = 0
+ wz.cenL22[!is.finite(wz.cenL22)] = 0
+ wz.cenL12[!is.finite(wz.cenL12)] = 0
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A1 * wz.cenL11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A1 * wz.cenL22
+ wz[,iam(1,2,M)] = A1 * wz.cenL12
+ mumU = .Upper - mum # often Inf
+ temp21U = mumU / sd # often Inf
+ PhiU = pnorm(temp21U) # often 1
+ phiU = dnorm(temp21U) # often 0
+ temp31U = ((1-PhiU) * sd)^2 # often 0
+ tmp8 = (1-PhiU)*temp21U
+ wzcenU11 = phiU * (phiU - tmp8) / temp31U
+ tmp9 = (1-PhiU) * (2 - temp21U^2)
+ wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sd) / (sd * temp31U)
+ wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) - temp21U*phiU) / temp31U
+ wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when .Upper==Inf
+ wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when .Upper==Inf
+ wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when .Upper==Inf
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A3 * wzcenU11
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A3 * wzcenU22
+ wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + A3 * wzcenU12
+ wz[,iam(1,1,M)] = w * wz[,iam(1,1,M)] * dmu.deta^2
+ wz[,iam(2,2,M)] = w * wz[,iam(2,2,M)] * dsd.deta^2
+ wz[,iam(1,2,M)] = w * wz[,iam(1,2,M)] * dmu.deta * dsd.deta
+ wz
+ }), list( .lmu=lmu, .Lower=Lower, .Upper=Upper, .lsd=lsd ))))
+}
+
+
+
+normal1 = function(lmean="identity", lsd="loge", zero=NULL)
+{
+
+ if(mode(lmean) != "character" && mode(lmean) != "name")
+ lmean = as.character(substitute(lmean))
+ if(mode(lsd) != "character" && mode(lsd) != "name")
+ lsd = as.character(substitute(lsd))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Univariate Normal distribution\n\n",
+ "Links: ",
+ namesof("mean", lmean, tag= TRUE), "; ",
+ namesof("sd", lsd, tag= TRUE),
+ "\n",
+ "Variance: sd^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("mean", .lmean, tag= FALSE),
+ namesof("sd", .lsd, tag= FALSE))
+ if(ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(!length(etastart)) {
+ junk = if(is.R()) lm.wfit(x=x, y=y, w=w) else
+ lm.wfit(x=x, y=y, w=w, method="qr")
+ sd.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ mean.init = if( .lmean == "loge") pmax(1/1024, y) else y
+ etastart = cbind(theta2eta(mean.init, .lmean),
+ theta2eta(sd.y.est, .lsd))
+ }
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmean)
+ }, list( .lmean=lmean ))),
+ last=eval(substitute(expression({
+ misc$link = c("mu"= .lmean, "sd"= .lsd)
+ misc$expected = TRUE
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sd = eta2theta(eta[,2], .lsd)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R())
+ sum(w*dnorm(y, m=mu, sd=sd, log=TRUE)) else
+ sum(w * (-log(sd*sqrt(2*pi)) - 0.5 * ((y - mu)/sd)^2))
+ }
+ }, list( .lsd=lsd ))),
+ vfamily=c("normal1"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmean)
+ sd = eta2theta(eta[,2], .lsd)
+ dl.dmu = (y-mymu) / sd^2
+ dl.dsd = -1/sd + (y-mymu)^2 / sd^3
+ dmu.deta = dtheta.deta(mymu, .lmean)
+ dsd.deta = dtheta.deta(sd, .lsd)
+ cbind(w * dl.dmu * dmu.deta, w * dl.dsd * dsd.deta)
+ }), list( .lmean=lmean, .lsd=lsd ))),
+ weight=expression({
+ wz = matrix(as.numeric(NA), n, 2) # diagonal matrix; y is one-column too
+ ed2l.dmu2 = -1 / sd^2
+ ed2l.dsd2 = -2 / sd^2 # zz; replace 2 by 0.5 ??
+ wz[,iam(1,1,M)] = -w * ed2l.dmu2 * dmu.deta^2
+ wz[,iam(2,2,M)] = -w * ed2l.dsd2 * dsd.deta^2
+ wz
+ }))
+}
+
+
+
+
+
+lognormal = function(lmeanlog="identity", lsdlog="loge", zero=NULL)
+{
+ if(mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
+ lmeanlog = as.character(substitute(lmeanlog))
+ if(mode(lsdlog) != "character" && mode(lsdlog) != "name")
+ lsdlog = as.character(substitute(lsdlog))
+ if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
+ zero > 2)) stop("bad input for argument argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Two-parameter (univariate) lognormal distribution\n\n",
+ "Links: ", namesof("meanlog", lmeanlog, tag= TRUE), ", ",
+ namesof("sdlog", lsdlog, tag= TRUE)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(min(y) <= 0) stop("response must be positive")
+ predictors.names = c(namesof("meanlog", .lmeanlog, tag= FALSE),
+ namesof("sdlog", .lsdlog, tag= FALSE))
+ if(!length(etastart)) {
+ junk = if(is.R()) lm.wfit(x=x, y=log(y), w=w) else
+ lm.wfit(x=x, y=log(y), w=w, method="qr")
+ sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ etastart = cbind(meanlog= rep(theta2eta(log(median(y)), .lmeanlog), length=n),
+ sdlog= rep(theta2eta(sdlog.y.est, .lsdlog), length=n))
+ }
+ }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mulog = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ exp(mulog + 0.5 * sdlog^2)
+ }, list( .lmeanlog = lmeanlog, .lsdlog=lsdlog ))),
+ last=eval(substitute(expression({
+ misc$link = c("meanlog"= .lmeanlog, "sdlog"= .lsdlog)
+ misc$expected = TRUE
+ }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mulog = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R())
+ sum(w*dlnorm(y, meanlog=mulog, sdlog=sdlog, log=TRUE)) else
+ sum(w * (-log(y*sdlog*sqrt(2*pi)) - 0.5 * ((log(y) - mulog)/sdlog)^2))
+ }
+ }, list( .lmeanlog = lmeanlog, .lsdlog=lsdlog ))),
+ vfamily=c("lognormal"),
+ deriv=eval(substitute(expression({
+ mulog = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ dl.dmulog = (log(y)-mulog) / sdlog^2
+ dl.dsdlog = -1/sdlog + (log(y)-mulog)^2 / sdlog^3
+ dl.dlambda = (1 + (log(y)-mulog) / sdlog^2) / y
+ dmulog.deta = dtheta.deta(mulog, .lmeanlog)
+ dsdlog.deta = dtheta.deta(sdlog, .lsdlog)
+ w * cbind(dl.dmulog * dmulog.deta,
+ dl.dsdlog * dsdlog.deta)
+ }), list( .lmeanlog = lmeanlog, .lsdlog=lsdlog ))),
+ weight=expression({
+ wz = matrix(as.numeric(NA), n, 2) # Diagonal!
+ ed2l.dmulog2 = 1 / sdlog^2
+ ed2l.dsdlog2 = 2 * ed2l.dmulog2
+ wz[,iam(1,1,M)] = ed2l.dmulog2 * dmulog.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsdlog2 * dsdlog.deta^2
+ wz = w * wz
+ wz
+ }))
+}
+
+if(!is.R()) {
+
+ qlognormal = function(p, meanlog=0, sdlog=1, lambda=0)
+ lambda + exp(qnorm(p=p, mean=meanlog, sd=sdlog))
+
+ dlognormal = function(x, meanlog=0, sdlog=1, lambda=0)
+ (x > lambda) *
+ dnorm(x=log(abs(x-lambda)), mean=meanlog, sd=sdlog) / (x-lambda)
+
+ rlognormal = function(n, meanlog=0, sdlog=1, lambda=0)
+ lambda + exp(rnorm(n, mean=meanlog, sd=sdlog))
+
+ plognormal = function(q, meanlog=0, sdlog=1, lambda=0)
+ (q>lambda) * pnorm(q=log(abs(q-lambda)), mean=meanlog, sd=sdlog)
+}
+
+
+
+
+lognormal3 = function(lmeanlog="identity", lsdlog="loge",
+ powers.try = (-3):3,
+ delta=NULL, zero=NULL)
+{
+ if(length(delta) && !is.Numeric(delta, positive=TRUE))
+ stop("bad input for argument argument \"delta\"")
+ if(mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
+ lmeanlog = as.character(substitute(lmeanlog))
+ if(mode(lsdlog) != "character" && mode(lsdlog) != "name")
+ lsdlog = as.character(substitute(lsdlog))
+ if(length(zero) && (!is.Numeric(zero, integer=TRUE, posit=TRUE) ||
+ zero > 3))
+ stop("bad input for argument argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Three-parameter (univariate) lognormal distribution\n\n",
+ "Links: ",
+ namesof("meanlog", lmeanlog, tag= TRUE),
+ "; ", namesof("sdlog", lsdlog, tag= TRUE),
+ "; ", namesof("lambda", "identity", tag= TRUE)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names = c(namesof("meanlog", .lmeanlog, tag= FALSE),
+ namesof("sdlog", .lsdlog, tag= FALSE), "lambda")
+
+ if(!length(etastart)) {
+ miny = min(y)
+ if(length( .delta)) {
+ lambda.init = rep(miny- .delta, length=n)
+ } else {
+ pvalue.vec = NULL
+ powers.try = .powers.try
+ for(delta in 10^powers.try) {
+ pvalue.vec = c(pvalue.vec,
+ shapiro.test(sample(log(y-miny+delta),
+ size=min(5000, length(y ))))$p.value)
+ }
+ index.lambda=(1:length(powers.try))[pvalue.vec==max(pvalue.vec)]
+ lambda.init = miny - 10^powers.try[index.lambda]
+ }
+ junk = if(is.R()) lm.wfit(x=x, y=log(y-lambda.init), w=w) else
+ lm.wfit(x=x, y=log(y-lambda.init), w=w, method="qr")
+ sdlog.y.est = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ etastart = cbind(mu=log(median(y - lambda.init)),
+ sdlog=rep(theta2eta(sdlog.y.est, .lsdlog), length=n),
+ lambda = lambda.init)
+ }
+ }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog,
+ .delta = delta, .powers.try=powers.try ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ lambda = eta2theta(eta[,3], "identity")
+ lambda + exp(mymu + 0.5 * sdlog^2)
+ }, list( .lmeanlog=lmeanlog, .lsdlog=lsdlog ))),
+ last=eval(substitute(expression({
+ misc$link = c("meanlog"= .lmeanlog,"sdlog"= .lsdlog,"lambda"="identity")
+ misc$expected = TRUE
+ }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ lambda = eta2theta(eta[,3], "identity")
+ if(any(y < lambda))
+ cat("warning: bad y\n")
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R())
+ sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog=sdlog, log=TRUE)) else
+ sum(w * (-log((y-lambda)*sdlog*sqrt(2*pi)) -
+ 0.5 * ((log(y-lambda) - mymu)/sdlog)^2))
+ }
+ }, list( .lmeanlog=lmeanlog, .lsdlog=lsdlog ))),
+ vfamily=c("lognormal3"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], .lmeanlog)
+ sdlog = eta2theta(eta[,2], .lsdlog)
+ lambda = eta2theta(eta[,3], "identity")
+ if(any(y < lambda))
+ cat("warning: bad y\n")
+ dl.dmymu = (log(y-lambda)-mymu) / sdlog^2
+ dl.dsdlog = -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
+ dl.dlambda = (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
+ dmymu.deta = dtheta.deta(mymu, .lmeanlog)
+ dsdlog.deta = dtheta.deta(sdlog, .lsdlog)
+ dlambda.deta = dtheta.deta(lambda, "identity")
+ w * cbind(dl.dmymu * dmymu.deta,
+ dl.dsdlog * dsdlog.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lmeanlog=lmeanlog, .lsdlog=lsdlog ))),
+ weight=expression({
+ wz = matrix(0, n, dimm(M))
+ ed2l.dmymu2 = 1 / sdlog^2
+ ed2l.dsdlog = 2 / sdlog^2
+ temp9 = exp(-mymu+sdlog^2 / 2)
+ ed2l.dlambda2 = exp(2*(-mymu+sdlog^2)) * (1+sdlog^2) / sdlog^2
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsdlog * dsdlog.deta^2
+ wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
+ wz[,iam(1,3,M)] = temp9 * dmymu.deta * dlambda.deta / sdlog^2
+ wz[,iam(2,3,M)] = -2 * temp9 / sdlog * dsdlog.deta * dlambda.deta
+ wz = w * wz
+ wz
+ }))
+}
+
+
+
+
+
+
+
+
+interleave.VGAM = function(L, M) c(matrix(1:L, nrow=M, byrow=TRUE))
+
+negbinomial = function(lmu = "loge", lk = "loge",
+ ik = NULL, cutoff = 0.995, Maxiter=5000,
+ deviance.arg=FALSE, method.init=1,
+ zero = -2)
+{
+
+
+
+
+ if(length(ik) && !is.Numeric(ik, posit=TRUE))
+ stop("bad input for argument \"ik\"")
+ if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument \"cutoff\"")
+ if(!is.Numeric(Maxiter, integ=TRUE, allow=1) || Maxiter < 100)
+ stop("bad input for argument \"Maxiter\"")
+
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lk) != "character" && mode(lk) != "name")
+ lk = as.character(substitute(lk))
+
+ ans =
+ new("vglmff",
+ blurb=c("Negative-binomial distribution\n\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("k", lk), "\n",
+ "Mean: mu\n",
+ "Variance: mu * (1 + mu/k)"),
+ constraints=eval(substitute(expression({
+ temp752 = .zero
+ if(length(temp752) && all(temp752 == -2))
+ temp752 = 2*(1:ncol(y))
+ constraints = cm.zero.vgam(constraints, x, temp752, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(is.R())
+ assign("CQO.FastAlgorithm", ( .lmu == "loge") &&
+ ( .lk == "loge"), envir = VGAMenv) else
+ CQO.FastAlgorithm <<- ( .lmu == "loge") && ( .lk == "loge")
+ if(any(function.name == c("cqo","cao")) &&
+ is.Numeric( .zero, allow=1) && .zero != -2)
+ stop("argument zero=-2 is required")
+
+ y = as.matrix(y)
+ M = 2 * ncol(y)
+ NOS = ncoly = ncol(y) # Number of species
+ predictors.names = c(namesof(if(NOS==1) "mu" else
+ paste("mu", 1:NOS, sep=""), .lmu, tag= FALSE),
+ namesof(if(NOS==1) "k" else paste("k", 1:NOS, sep=""), .lk,
+ tag= FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M=2)]
+ if(!length(etastart)) {
+ if( .method.init >= 4) {
+ mu.init = y
+ for(iii in 1:ncol(y)) {
+ mu.init[,iii] = if( .method.init == 4)
+ weighted.mean(y[,iii], w=w) else
+ median(rep(y[,iii], w)) + 1/8
+ }
+ for(iii in 1:ncol(y)) {
+ mu.init[,iii] = pmin(y[,iii] + 1/8, mu.init[,iii])
+ }
+ } else if( .method.init == 3) {
+ mu.init = y + 1/8
+ } else {
+ mu.init = y
+ for(iii in 1:ncol(y))
+ mu.init[,iii] = if( .method.init == 2)
+ weighted.mean(y[,iii], w=w) else
+ median(rep(y[,iii], w)) + 1/8
+ }
+ if( is.Numeric( .k.init )) {
+ kay.init = matrix( .k.init, nr=n, nc=NOS, byrow=TRUE)
+ } else {
+ kay.init = matrix(0, nr=n, nc=NOS)
+ Loglikfun = function(y, mu, kmat, w)
+ sum(w * (y * log(mu/(mu+kmat)) + kmat*log(kmat/(mu+kmat)) +
+ lgamma(y+kmat) - lgamma(kmat) - lgamma(y+1 )))
+ k.grid = rvar = 2^((-3):6)
+ for(spp. in 1:NOS) {
+ for(ii in 1:length(k.grid))
+ rvar[ii] = Loglikfun(y=y[,spp.], mu=mu.init[,spp.],
+ kmat=k.grid[ii], w=w)
+ try.this = k.grid[rvar == max(rvar)]
+ kay.init[,spp.] = try.this
+ }
+ }
+ etastart = cbind(theta2eta(mu.init, .lmu),
+ theta2eta(kay.init, .lk))
+ etastart = etastart[,interleave.VGAM(M, M=2),drop=FALSE]
+ }
+ }), list( .lmu=lmu, .lk=lk, .k.init=ik, .zero=zero,
+ .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ eta2theta(eta[,2*(1:NOS)-1,drop=FALSE], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ if(is.R()) {
+ if(exists("CQO.FastAlgorithm", envir = VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
+ } else {
+ while(exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ }
+ temp0303 = c(rep( .lmu, length=NOS), rep( .lk, length=NOS))
+ names(temp0303) = c(if(NOS==1) "mu" else paste("mu", 1:NOS, sep=""),
+ if(NOS==1) "k" else paste("k", 1:NOS, sep=""))
+ temp0303 = temp0303[interleave.VGAM(M, M=2)]
+ misc$link = temp0303 # Already named
+ misc$cutoff = .cutoff
+ misc$method.init = .method.init
+ }), list( .lmu=lmu, .lk=lk, .cutoff=cutoff,
+ .method.init=method.init ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ temp = theta2eta(mu, .lmu)
+ temp = cbind(temp, NA * temp)
+ temp[,interleave.VGAM(ncol(temp), M=2),drop=FALSE]
+ }, list( .lmu=lmu ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ NOS = ncol(eta) / 2
+ temp300 = eta[,2*(1:NOS),drop=FALSE]
+ if( .lk == "loge") {
+ bigval = 28
+ temp300 = ifelse(temp300 > bigval, bigval, temp300)
+ temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
+ }
+ kmat = eta2theta(temp300, .lk)
+ ans =
+ sum(w * (y * log(mu/(mu+kmat)) + kmat*log(kmat/(mu+kmat)) +
+ lgamma(y+kmat) - lgamma(kmat) - lgamma(y+1 )))
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ ans
+ }, list( .lk=lk ))),
+ vfamily=c("negbinomial"),
+ deriv=eval(substitute(expression({
+ NOS = ncol(eta) / 2
+ M = ncol(eta)
+ temp3 = eta[,2*(1:NOS),drop=FALSE]
+ bigval = 28
+ temp3 = ifelse(temp3 > bigval, bigval, temp3)
+ temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
+ kmat = eta2theta(temp3, .lk)
+ dl.dmu = y/mu - (y+kmat)/(kmat+mu)
+ dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(mu+kmat) + 1 +
+ log(kmat/(kmat+mu))
+ dmu.deta = dtheta.deta(mu, .lmu)
+ dk.deta = dtheta.deta(kmat, .lk)
+ myderiv = w * cbind(dl.dmu * dmu.deta, dl.dk * dk.deta)
+ myderiv[,interleave.VGAM(M, M=2)]
+ }), list( .lmu=lmu, .lk=lk ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
+ ed2l.dmu2 = 1/mu - 1/(mu+kmat)
+ fred2 = dotFortran(name="enbin9",
+ ans=double(n*NOS), as.double(kmat),
+ as.double(mu), as.double( .cutoff ),
+ as.integer(n), ok=as.integer(1), as.integer(NOS),
+ sumpdf=double(1), macheps=as.double(.Machine$double.eps),
+ as.integer( .Maxiter ))
+ if(fred2$ok != 1)
+ stop("error in Fortran subroutine exnbin9")
+ dim(fred2$ans) = c(n, NOS)
+ ed2l.dk2 = -fred2$ans - 1/kmat + 1/(kmat+mu)
+ wz[,2*(1:NOS)-1] = dmu.deta^2 * ed2l.dmu2
+ wz[,2*(1:NOS)] = dk.deta^2 * ed2l.dk2
+ w * wz
+ }), list( .cutoff=cutoff, .Maxiter=Maxiter ))))
+
+ if(deviance.arg) ans at deviance=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ NOS = ncol(eta) / 2
+ temp300 = eta[,2*(1:NOS),drop=FALSE]
+ if( .lk == "loge") {
+ bigval = 28
+ temp300[temp300 > bigval] = bigval
+ temp300[temp300 < -bigval] = -bigval
+ } else stop("can only handle the 'loge' link")
+ k = eta2theta(temp300, .lk)
+ devi = 2 * (y*log(ifelse(y<1, 1, y)/mu) + (y+k)*log((mu+k)/(k+y )))
+ if(residuals)
+ sign(y - mu) * sqrt(abs(devi) * w) else
+ sum(w * devi)
+ }, list( .lk=lk )))
+ ans
+}
+
+negbin.ab = function(link.alpha ="loge", link.k ="loge",
+ k.init=1,
+ zero=2,
+ cutoff=0.995)
+{
+
+
+
+ if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument cutoff")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ if(mode(link.alpha) != "character" && mode(link.alpha) != "name")
+ link.alpha = as.character(substitute(link.alpha))
+ if(mode(link.k) != "character" && mode(link.k) != "name")
+ link.k = as.character(substitute(link.k))
+
+ new("vglmff",
+ blurb=c("Negative-binomial distribution\n\n",
+ "Links: ",
+ namesof("alpha", link.alpha), ", ",
+ namesof("k", link.k),
+ "\n",
+ "Mean: alpha * k",
+ "\n",
+ "Variance: alpha * k * (1 + alpha)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("alpha", .link.alpha, tag= FALSE),
+ namesof("k", .link.k, tag= FALSE))
+
+ if(!length(etastart)) {
+ etastart = cbind(theta2eta((y + 0.16667)/.k.init, .link.alpha),
+ theta2eta( .k.init, .link.k))
+ }
+ }), list( .link.alpha=link.alpha, .link.k=link.k, .k.init=k.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha)
+ k = eta2theta(eta[,2], .link.k)
+ alpha * k
+ }, list( .link.alpha=link.alpha, .link.k=link.k ))),
+ last=eval(substitute(expression({
+ misc$link = c(alpha= .link.alpha, k= .link.k)
+ }), list( .link.alpha=link.alpha, .link.k=link.k ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha)
+ k = eta2theta(eta[,2], .link.k)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (y * log(alpha) - (y+k)*log(alpha+1) + lgamma(y+k) -
+ lgamma(k) - lgamma(y+1 )))
+ }, list( .link.alpha=link.alpha, .link.k=link.k ))),
+ vfamily=c("negbin.ab"),
+ deriv=eval(substitute(expression({
+ alpha = eta2theta(eta[,1], .link.alpha)
+ k = eta2theta(eta[,2], .link.k)
+ dl.dalpha = (y/alpha - k)/(1+alpha)
+ dl.dk = digamma(y+k) - digamma(k) - log(1+alpha)
+ dalpha.deta = dtheta.deta(alpha, .link.alpha)
+ dk.deta = dtheta.deta(k, .link.k)
+ cbind(w * dl.dalpha * dalpha.deta, w * dl.dk * dk.deta)
+ }), list( .link.alpha=link.alpha, .link.k=link.k ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) # 3==dimm(M)
+ ed2l.dalpha2 = k/(alpha*(1+alpha))
+ ed2l.dalphak = 1/(1+alpha) # Not -1/(1+alpha)
+
+ fred = dotFortran(name="enbin8",
+ ans=double(n),
+ as.double(k),
+ as.double(1/(1+alpha)),
+ as.double( .cutoff ),
+ as.integer(n), ok=as.integer(1), as.integer(1),
+ sumpdf=double(1), macheps=as.double(.Machine$double.eps))
+ if(fred$ok != 1)
+ stop("error in Fortran subroutine enbin8")
+ ed2l.dk2 = -fred$ans
+
+ wz[,iam(1,1,M)] = dalpha.deta^2 * ed2l.dalpha2
+ wz[,iam(2,2,M)] = dk.deta^2 * ed2l.dk2
+ wz[,iam(1,2,M)] = dk.deta * dalpha.deta * ed2l.dalphak
+
+ w * wz
+ }), list( .cutoff=cutoff ))))
+}
+
+
+
+if(FALSE)
+nbmud = function(lmu = c("loge","identity","reciprocal"),
+ k.init = 1,
+ zero = -2,
+ cutoff = 0.995,
+ deviance.arg=FALSE)
+{
+ ans = negbinomial(link.mu = lmu[1],
+ link.k = "reciprocal",
+ k.init = k.init,
+ zero = zero,
+ cutoff = cutoff,
+ deviance.arg=deviance.arg)
+ ans at vfamily = "nbmud"
+ ans
+}
+
+
+neg.binomial = function(link.p="logit", link.k="loge",
+ zero=2,
+ ik=NULL,
+ cutoff=0.995)
+{
+
+
+
+ if(!is.Numeric(cutoff, allow=1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument cutoff")
+
+ if(mode(link.p) != "character" && mode(link.p) != "name")
+ link.p = as.character(substitute(link.p))
+ if(link.p=="canonical")
+ link.p = "logc"
+ if(mode(link.k) != "character" && mode(link.k) != "name")
+ link.k = as.character(substitute(link.k))
+
+ new("vglmff",
+ blurb=c("Negative-binomial distribution\n\n",
+ "Links: ",
+ namesof("p", link.p), ", ",
+ namesof("k", link.k), "; mu=k*(1-p)/p",
+ "\n",
+ "Variance: mu(1 + mu/k)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y < 0))
+ stop("response must be non-negative")
+ if(max(abs(y - round(y )))>0.00001)
+ stop("response must be integer-valued")
+
+ predictors.names = c(namesof("p", .link.p, tag= FALSE),
+ namesof("k", .link.k, tag= FALSE))
+
+
+
+ junk = lm(y ~ x - 1, weight=w, smart= FALSE) # singular.ok = FALSE,
+ var.y.est = summary(junk)$sigma^2
+ mu.adj = fitted(junk)
+
+ if(FALSE) {
+ mu = rep(weighted.mean(y, w=w), len=length(y))
+ mu = rep(median(rep(y+0.167, times=w)), len=length(y))
+
+ k = mean(rep(mu^2 / (var.y.est - mu), w), trim=0.05)
+ k = rep(k, length(mu))
+ } else {
+ mu = mu.adj
+ mu[mu <= 0] = min(mu[mu > 0])
+ k = mu.adj^2 / (var.y.est - mu.adj)
+ k[k <= 0] = quantile(k[k>0], prob=0.02)
+ }
+
+ if(length( .ik )) {
+ mu = median(rep(y, times=w))
+ k = rep( .ik , len=length(y))
+ }
+
+ if(!length(etastart)) {
+ prob = k / (k + mu)
+ etastart = cbind(theta2eta(prob, .link.p),
+ theta2eta(k, .link.k))
+ }
+ }), list( .link.p=link.p, .link.k=link.k, .ik=ik ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ prob = eta2theta(eta[,1], .link.p)
+ k = eta2theta(eta[,2], .link.k)
+ k * (1 - prob) / prob
+ }, list( .link.p=link.p, .link.k=link.k ))),
+ last=eval(substitute(expression({
+ misc$link = c(p= .link.p, k= .link.k)
+ }), list( .link.p=link.p, .link.k=link.k ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ prob = eta2theta(eta[,1], .link.p)
+ k = eta2theta(eta[,2], .link.k)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (y * log(1-prob) + k * log(prob) + lgamma(y+k) -
+ lgamma(k) - lgamma(y+1 )))
+ }, list( .link.p=link.p, .link.k=link.k ))),
+ vfamily=c("neg.binomial"),
+ deriv=eval(substitute(expression({
+ prob = eta2theta(eta[,1], .link.p)
+ k = eta2theta(eta[,2], .link.k)
+ dl.dp = k/prob - y/(1-prob)
+ dl.dk = log(prob) + digamma(y+k) - digamma(k)
+ dp.deta = dtheta.deta(prob, .link.p)
+ dk.deta = dtheta.deta(k, .link.k)
+ w * cbind(dl.dp * dp.deta, dl.dk * dk.deta)
+ }), list( .link.p=link.p, .link.k=link.k ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) # 3==dimm(M)
+ d2l.dpk = 1/prob
+
+
+ ed2l.dp2 = -k/(prob^2 * (1-prob)) # "e" for expected value
+ fred = dotFortran(name="exnbin",
+ ans=double(n),
+ as.double(k),
+ as.double(prob),
+ as.double( .cutoff ),
+ as.integer(n), ok=as.integer(1), as.integer(1),
+ sumpdf=double(1))
+ if(fred$ok != 1)
+ stop("error in Fortran subroutine exnbin")
+
+ ed2l.dk2 = fred$ans
+
+ wz[,iam(1,1,M)] = dp.deta^2 * ed2l.dp2
+ wz[,iam(1,2,M)] = d2l.dpk * dp.deta * dk.deta #ed2l.dpk=d2l.dpk
+ wz[,iam(2,2,M)] = dk.deta^2 * ed2l.dk2
+ wz = -w * wz
+ wz
+ }), list( .cutoff=cutoff ))))
+}
+
+
+
+
+
+
+neg.binomial.k = function(k, link="logit", expected=TRUE, ...)
+{
+
+ if(!is.Numeric(k, allow=1, posit=TRUE))
+ stop("bad input for argument argument \"k\"")
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Negative-binomial distribution with k known and p unknown\n",
+ "(k=", k, ") ",
+ if(k==1) "Geometric\n\n" else "\n\n",
+ "Links: ",
+ namesof("p", link), "; p=",k,"/(",k,"+mu)",
+ "\n",
+ "Variance: ",
+ if(k==1) "Geometric: mu(1+mu)" else
+ paste("mu(1 + mu/",k,")", sep="")),
+ deviance=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ prob = .k / ( .k + mu)
+ devy = .k * log( .k / ( .k + y))
+ nz = y != 0
+ devy[nz] = devy[nz] + y[nz] * log(y[nz] / ( .k + y[nz]))
+ devmu = y * log(1 - prob) + .k * log(prob)
+ devi = 2 * (devy - devmu)
+ if(residuals)
+ sign(y - mu) * sqrt(abs(devi) * w) else
+ sum(w * devi)
+ }, list( .link=link, .k=k ))),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("p", .link, tag= FALSE)
+ mu = y + 0.167 * (y == 0)
+
+ if(!length(etastart)) {
+ prob = .k / ( .k + mu)
+ etastart = theta2eta(prob, .link)
+ }
+ }), list( .link=link, .k=k ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ prob = eta2theta(eta, .link)
+ .k * (1 - prob) / prob
+ }, list( .link=link, .k=k ))),
+ last=eval(substitute(expression({
+ misc$link = c(p = .link)
+ misc$k = .k
+ }), list( .link=link, .k=k ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ prob = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (y * log(1-prob) + .k * log(prob) + lgamma(y+ .k) -
+ lgamma( .k ) - lgamma(y+1 )))
+ }, list( .link=link, .k=k ))),
+ vfamily=c("neg.binomial.k"),
+ deriv=eval(substitute(expression({
+ prob = .k / ( .k + mu)
+ dp.deta = dtheta.deta(prob, .link)
+ w * ( .k/prob - y/(1-prob)) * dp.deta
+ }), list( .link=link, .k=k ))),
+ weight=eval(substitute(expression({
+ wz = dp.deta^2 * (y/(1 - prob)^2 + .k/prob^2)
+ if(! .expected) {
+ d2pdeta2 = d2theta.deta2(prob, .link)
+ wz = wz - d2pdeta2 * ( .k/prob - y/(1-prob))
+ }
+ w * wz
+ }), list( .link=link, .k=k, .expected=expected ))))
+}
+
+
+
+
+
+simple.poisson = function()
+{
+ new("vglmff",
+ blurb=c("Poisson distribution\n\n",
+ "Link: log(lambda)",
+ "\n",
+ "Variance: lambda"),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ nz = y > 0
+ devi = - (y - mu)
+ devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if(residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(w * devi)
+ },
+ initialize=expression({
+ predictors.names = "log(lambda)"
+ mu = y + 0.167 * (y == 0)
+ if(!length(etastart))
+ etastart = log(mu)
+ }),
+ inverse=function(eta, extra=NULL)
+ exp(eta),
+ last=expression({
+ misc$link = c(lambda = "loge")
+ }),
+ link=function(mu, extra=NULL)
+ log(mu),
+ vfamily="simple.poisson",
+ deriv=expression({
+ lambda = mu
+ dl.dlambda = -1 + y/lambda
+ dlambda.deta = dtheta.deta(theta=lambda, link="loge")
+ w * dl.dlambda * dlambda.deta
+ }),
+ weight=expression({
+ d2l.dlambda2 = 1 / lambda
+ w * d2l.dlambda2 * dlambda.deta^2
+ }))
+}
+
+
+
+studentt = function(link.df="loglog")
+{
+
+ if(mode(link.df) != "character" && mode(link.df) != "name")
+ link.df = as.character(substitute(link.df))
+
+ new("vglmff",
+ blurb=c("Student t-distribution\n\n",
+ "Link: ",
+ namesof("df", link.df),
+ "\n",
+ "Variance: df/(df-2) if df > 2\n"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("df", .link.df, tag= FALSE)
+ if(!length(etastart)) {
+ init.df = (2*var(y)/(var(y)-1))
+ if(is.na(init.df) || init.df<1)
+ init.df = 4
+ etastart = rep(theta2eta(init.df, .link.df), len=length(y))
+ }
+ }), list( .link.df=link.df ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ df = eta2theta(eta, .link.df)
+ ifelse(df > 1, 0, NA)
+ }, list( .link.df=link.df ))),
+ last=eval(substitute(expression({
+ misc$link = c(df = .plink)
+ }), list( .plink=link.df ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ alpha = mu / sqrt(2/pi - mu^2)
+ theta2eta(alpha, .plink)
+ }, list( .plink=link.df ))),
+ loglikelihood=eval(substitute(function(mu, y, w, residuals = FALSE, eta,
+ extra=NULL) {
+ df = eta2theta(eta, .link.df)
+ temp1 = y^2 / df
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(is.R()) sum(w * dt(x=y, df=df, log=TRUE)) else
+ sum(w * (-log(pi*df)/2 - (df+1)*log(1+temp1)/2 +
+ lgamma((df+1)/2) - lgamma(df/2 )))
+ }
+ }, list( .link.df=link.df ))),
+ vfamily=c("studentt"),
+ deriv=eval(substitute(expression({
+ df = eta2theta(eta, .link.df)
+ temp = 1/df
+ temp1 = y^2 * temp
+ dl.ddf = 0.5*(-temp -log(1+temp1) +(df+1)*y^2/(df^2 * (1+temp1)) +
+ digamma((df+1)/2)-digamma(df/2))
+ ddf.deta = dtheta.deta(theta=df, .link.df)
+ w * dl.ddf * ddf.deta
+ }), list( .link.df=link.df ))),
+ weight=eval(substitute(expression({
+ temp2 = (df+1)/2
+ d2df.deta2 = d2theta.deta2(theta=df, .link.df)
+ negative = -trigamma(df/2)/4 -
+ 0.5*y^2*( (1+temp)/(df+y^2) + temp^2 )/(df+y^2)
+ positive = 0.5*temp^2 +trigamma(temp2)/4 + 0.5*y^2*temp/(df+y^2)
+ d2l.ddf2 = positive + negative
+ wz = -ddf.deta^2 * d2l.ddf2 - dl.ddf * d2df.deta2
+ wz * w
+ }), list( .link.df=link.df ))))
+}
+
+
+
+chisq = function(link = "loge")
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Chi-squared distribution\n\n",
+ "Link: ",
+ namesof("df", link)),
+ inverse =eval(substitute(function(eta,extra=NULL) {
+ eta2theta(eta, .link)
+ }, list( .link = link ))),
+ initialize =eval(substitute(expression({
+ predictors.names = namesof("df", .link, tag = FALSE)
+ mu = y + 0.167 * (y == 0)
+ }), list( .link = link ))),
+ last =eval(substitute(expression({
+ misc$link = c(df = .link)
+ }), list( .link = link ))),
+ link=eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, .link)
+ }, list( .link = link ))),
+ loglikelihood =eval(substitute(function(mu,y,w,residuals= FALSE,eta,extra=NULL) {
+ df = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (df*log(0.5)/2 + (df/2 - 1)*log(y) - y/2 -
+ lgamma(df/2 )))
+ }, list( .link = link ))),
+ vfamily="chisq",
+ deriv=eval(substitute(expression({
+ df = eta2theta(eta, .link)
+ dl.dv = (log(y/2) - digamma(df/2)) / 2
+ dv.deta = dtheta.deta(df, .link)
+ w * dl.dv * dv.deta
+ }), list( .link = link ))),
+ weight =eval(substitute(expression({
+ ed2l.dv2 = -trigamma(df/2) / 4
+ wz = -ed2l.dv2 * dv.deta^2
+ wz * w
+ }), list( .link = link ))))
+}
+
+
+
+
+
+
+
+simplex = function(lmu="logit", lsigma="loge", imu=NULL, isigma=NULL)
+{
+
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(lsigma) != "character" && mode(lsigma) != "name")
+ lsigma = as.character(substitute(lsigma))
+
+ new("vglmff",
+ blurb=c("Univariate Simplex distribution \n",
+ "f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
+ " exp[-0.5*(y-mu)^2 / (y*(1-y)*mu^2*(1-mu)^2)/sigma^2], ",
+ " 0 < y < 1,\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("sigma", lsigma), "\n\n",
+ "Mean: mu\n",
+ "Variance: sigma^2"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y <= 0 | y >= 1))
+ stop("all y values must be in (0,1)")
+
+ predictors.names = c(namesof("mu", .lmu, tag= FALSE),
+ namesof("sigma", .lsigma, tag= FALSE))
+
+ if(!length(etastart)) {
+ mu.init = rep(if(length( .imu)) .imu else
+ median(y), length=n)
+ sigma.init = rep(if(length( .isigma)) .isigma else
+ sqrt(var(y)), length=n)
+ etastart = cbind(theta2eta(mu.init, .lmu),
+ theta2eta(sigma.init, .lsigma))
+ }
+ }), list( .lmu=lmu, .lsigma=lsigma,
+ .imu=imu, .isigma=isigma ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$d3 = d3 # because save.weights=F
+ misc$link = c(mu= .lmu, sigma= .lsigma)
+ misc$pooled.weight = pooled.weight
+ }), list( .lmu=lmu, .lsigma=lsigma ))),
+ loglikelihood=eval(substitute(function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ sigma = eta2theta(eta[,2], .lsigma)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-0.5*log(2*pi*sigma^2*(y*(1-y))^3) -
+ (0.5/sigma^2)*(y-mu)^2 / (y*(1-y)*mu^2*(1-mu)^2 )))
+ }, list( .lsigma=lsigma ))),
+ vfamily=c("simplex1"),
+ deriv=eval(substitute(expression({
+ if(iter==1) {
+ d3 = deriv3(~ w * (-0.5*log(2*pi*sigma^2*(y*(1-y))^3) -
+ (0.5/sigma^2)*(y-mu)^2 / (y*(1-y)*mu^2*(1-mu)^2)),
+ c("mu", "sigma"), hessian= TRUE)
+ }
+
+ sigma = eta2theta(eta[,2], .lsigma)
+
+ eval.d3 = eval(d3)
+ dl.dthetas = attr(eval.d3, "gradient")
+
+ dmu.deta = dtheta.deta(mu, .lmu)
+ dsigma.deta = dtheta.deta(sigma, .lsigma)
+ dtheta.detas = cbind(dmu.deta, dsigma.deta)
+
+ dl.dthetas * dtheta.detas
+ }), list( .lmu=lmu, .lsigma=lsigma ))),
+ weight=eval(substitute(expression({
+ d2l.dthetas2 = attr(eval.d3, "hessian")
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.dthetas2[,1,1] * dtheta.detas[,1]^2
+ wz[,iam(2,2,M)] = -d2l.dthetas2[,2,2] * dtheta.detas[,2]^2
+ wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
+ dtheta.detas[,2]
+ if(!.expected) {
+ d2mudeta2 = d2theta.deta2(mu, .lmu)
+ d2sigmadeta2 = d2theta.deta2(sigma, .lsigma)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2sigmadeta2
+ }
+
+ if(intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .lmu=lmu, .lsigma=lsigma, .expected=FALSE ))))
+}
+
+
+
+rig = function(lmu="identity", llambda="loge", imu=NULL, ilambda=1)
+{
+
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(!is.Numeric(ilambda, posit=TRUE))
+ stop("bad input for \"ilambda\"")
+
+ new("vglmff",
+ blurb=c("Reciprocal inverse Gaussian distribution \n",
+ "f(y) = [lambda/(2*pi*y)]^(0.5) * \n",
+ " exp[-0.5*(lambda/y) * (y-mu)^2], ",
+ " 0 < y,\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("lambda", llambda), "\n\n",
+ "Mean: mu"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y <= 0))
+ stop("all y values must be > 0")
+ predictors.names = c(namesof("mu", .lmu, tag= FALSE),
+ namesof("lambda", .llambda, tag= FALSE))
+ if(!length(etastart)) {
+ mu.init = rep(if(length( .imu)) .imu else
+ median(y), length=n)
+ lambda.init = rep(if(length( .ilambda )) .ilambda else
+ sqrt(var(y)), length=n)
+ etastart = cbind(theta2eta(mu.init, .lmu),
+ theta2eta(lambda.init, .llambda))
+ }
+ }), list( .lmu=lmu, .llambda=llambda,
+ .imu=imu, .ilambda=ilambda ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ misc$d3 = d3 # because save.weights=FALSE
+ misc$link = c(mu= .lmu, lambda= .llambda)
+ misc$pooled.weight = pooled.weight
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ lambda = eta2theta(eta[,2], .llambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
+ }, list( .llambda=llambda ))),
+ vfamily=c("rig"),
+ deriv=eval(substitute(expression({
+ if(iter==1) {
+ d3 = deriv3(~ w *
+ (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
+ c("mu", "lambda"), hessian= TRUE)
+ }
+
+ lambda = eta2theta(eta[,2], .llambda)
+
+ eval.d3 = eval(d3)
+ dl.dthetas = attr(eval.d3, "gradient")
+
+ dmu.deta = dtheta.deta(mu, .lmu)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ dtheta.detas = cbind(dmu.deta, dlambda.deta)
+
+ dl.dthetas * dtheta.detas
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ d2l.dthetas2 = attr(eval.d3, "hessian")
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.dthetas2[,1,1] * dtheta.detas[,1]^2
+ wz[,iam(2,2,M)] = -d2l.dthetas2[,2,2] * dtheta.detas[,2]^2
+ wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
+ dtheta.detas[,2]
+ if(!.expected) {
+ d2mudeta2 = d2theta.deta2(mu, .lmu)
+ d2lambda = d2theta.deta2(lambda, .llambda)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
+ }
+
+ if(intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .lmu=lmu, .llambda=llambda, .expected=FALSE ))))
+}
+
+
+
+hyper.secant = function(link.theta="identity", init.theta=NULL)
+{
+
+ if(mode(link.theta) != "character" && mode(link.theta) != "name")
+ link.theta = as.character(substitute(link.theta))
+
+ new("vglmff",
+ blurb=c("Hyperbolic Secant distribution \n",
+ "f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
+ " for all y,\n",
+ "Link: ",
+ namesof("theta", link.theta), "\n\n",
+ "Mean: tan(theta)",
+ "\n",
+ "Variance: ???"),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("theta", .link.theta, tag= FALSE))
+
+ if(!length(etastart)) {
+ theta.init = rep(if(length( .init.theta)) .init.theta else
+ median(y), length=n)
+
+ etastart = theta2eta(theta.init, .link.theta)
+ }
+ }), list( .link.theta=link.theta,
+ .init.theta=init.theta ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ theta = eta2theta(eta, .link.theta)
+ tan(theta)
+ }, list( .link.theta=link.theta ))),
+ last=eval(substitute(expression({
+ misc$link = c(theta= .link.theta)
+ }), list( .link.theta=link.theta ))),
+ loglikelihood=eval(substitute(function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ theta = eta2theta(eta, .link.theta)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
+ }, list( .link.theta=link.theta ))),
+ vfamily=c("hyper.secant"),
+ deriv=eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta)
+ dl.dthetas = y - tan(theta)
+ dparam.deta = dtheta.deta(theta, .link.theta)
+ w * dl.dthetas * dparam.deta
+ }), list( .link.theta=link.theta ))),
+ weight=expression({
+ d2l.dthetas2 = 1 / cos(theta)^2
+ wz = w * d2l.dthetas2 * dparam.deta^2
+ wz
+ }))
+}
+
+
+
+hyper.secant.1 = function(link.theta="identity", init.theta=NULL)
+{
+ # # This is NOT based on deriv3()
+ # # p.101, (3.38), Jorgensen, 1997, The Theory of Dispersion Models
+ # # 19/3/02; N-R is same as Fisher scoring here
+ # # works well , but need a link that restricts theta to \pm pi/2
+ # # See also hyper.secant() for another parameterization
+
+ if(mode(link.theta) != "character" && mode(link.theta) != "name")
+ link.theta = as.character(substitute(link.theta))
+
+ new("vglmff",
+ blurb=c("Hyperbolic Secant distribution \n",
+ "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
+ " (1-y)^(-0.5-theta/pi)], ",
+ " 0 < y < 1,\n",
+ "Link: ",
+ namesof("theta", link.theta), "\n\n",
+ "Mean: 0.5 + theta/pi",
+ "\n",
+ "Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y <= 0 | y >= 1))
+ stop("all y values must be in (0,1)")
+
+ predictors.names = c(namesof("theta", .link.theta, tag= FALSE))
+
+ if(!length(etastart)) {
+ theta.init = rep(if(length( .init.theta)) .init.theta else
+ median(y), length=n)
+
+ etastart = theta2eta(theta.init, .link.theta)
+ }
+ }), list( .link.theta=link.theta,
+ .init.theta=init.theta ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ theta = eta2theta(eta, .link.theta)
+ 0.5 + theta/pi
+ }, list( .link.theta=link.theta ))),
+ last=eval(substitute(expression({
+ misc$link = c(theta= .link.theta)
+ }), list( .link.theta=link.theta ))),
+ loglikelihood=eval(substitute(function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ theta = eta2theta(eta, .link.theta)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
+ (-0.5-theta/pi)*log(1-y )))
+ }, list( .link.theta=link.theta ))),
+ vfamily=c("hyper.secant.1"),
+ deriv=eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta)
+ dl.dthetas = -tan(theta) + log(y/(1-y)) / pi
+ dparam.deta = dtheta.deta(theta, .link.theta)
+ w * dl.dthetas * dparam.deta
+ }), list( .link.theta=link.theta ))),
+ weight=expression({
+ d2l.dthetas2 = 1 / cos(theta)^2
+ wz = w * d2l.dthetas2 * dparam.deta^2
+ wz
+ }))
+}
+
+
+
+leipnik = function(lmu="logit", llambda="loge", imu=NULL, ilambda=NULL)
+{
+
+
+ if(mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(is.Numeric(ilambda) && any(ilambda <= -1))
+ stop("ilambda must be > -1")
+
+ new("vglmff",
+ blurb=c("Leipnik's distribution \n",
+ "f(y) = (y(1-y))^(-1/2) * [1 + (y-mu)^2 / (y*(1-y))]^(-lambda/2) /\n",
+ " Beta[(lambda+1)/2, 1/2], ",
+ " 0 < y < 1, lambda > -1\n",
+ "Links: ",
+ namesof("mu", lmu), ", ",
+ namesof("lambda", llambda), "\n\n",
+ "Mean: mu\n",
+ "Variance: mu*(1-mu)"),
+ initialize=eval(substitute(expression({
+ y = as.numeric(y)
+ if(any(y <= 0 | y >= 1))
+ stop("all y values must be in (0,1)")
+
+ predictors.names = c(namesof("mu", .lmu, tag= FALSE),
+ namesof("lambda", .llambda, tag= FALSE))
+
+ if(!length(etastart)) {
+ mu.init = rep(if(length( .imu)) .imu else
+ (y), length=n)
+ lambda.init = rep(if(length( .ilambda)) .ilambda else
+ 1/var(y), length=n)
+ etastart = cbind(theta2eta(mu.init, .lmu),
+ theta2eta(lambda.init, .llambda))
+ }
+ }), list( .lmu=lmu, .llambda=llambda, .imu=imu, .ilambda=ilambda ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .lmu)
+ }, list( .lmu=lmu ))),
+ last=eval(substitute(expression({
+ if(!is.R())
+ misc$d3 = d3 # because save.weights=FALSE
+ misc$link = c(mu= .lmu, lambda= .llambda)
+ misc$pooled.weight = pooled.weight
+ misc$expected = FALSE
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ lambda = eta2theta(eta[,2], .llambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-0.5*log(y*(1-y)) - 0.5 * lambda * log(1 +
+ (y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
+ lgamma(1+ lambda/2 )))
+ }, list( .llambda=llambda ))),
+ vfamily=c("leipnik"),
+ deriv=eval(substitute(expression({
+ lambda = eta2theta(eta[,2], .llambda)
+ if(is.R()) {
+ dl.dthetas = w * cbind(dl.dmu=lambda*(y-mu)/(y*(1-y)+(y-mu)^2),
+ dl.dlambda=-0.5*log(1+(y-mu)^2 / (y*(1-y ))) -
+ 0.5*digamma((lambda+1)/2) +
+ 0.5*digamma(1+lambda/2))
+ } else {
+ if(iter==1) {
+ d3 = dfun(~ w * (-0.5*log(y*(1-y)) - 0.5 * lambda * log(1 +
+ (y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
+ lgamma(1+ lambda/2)),
+ c("mu", "lambda"), hessian= TRUE)
+ }
+ eval.d3 = eval(d3)
+ dl.dthetas = attr(eval.d3, "gradient")
+ }
+ dmu.deta = dtheta.deta(mu, .lmu)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ dtheta.detas = cbind(dmu.deta, dlambda.deta)
+ dl.dthetas * dtheta.detas
+ }), list( .lmu=lmu, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ if(is.R()) {
+ denominator = y*(1-y) + (y-mu)^2
+ d2l.dthetas2 = array(NA, c(n,2,2))
+ d2l.dthetas2[,1,1] = w * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
+ d2l.dthetas2[,1,2] =
+ d2l.dthetas2[,2,1] = w * (y-mu) / denominator
+ d2l.dthetas2[,2,2] = w * (-0.25*trigamma((lambda+1)/2) +
+ 0.25*trigamma(1+lambda/2))
+ } else {
+ d2l.dthetas2 = attr(eval.d3, "hessian")
+ }
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.dthetas2[,1,1] * dtheta.detas[,1]^2
+ wz[,iam(2,2,M)] = -d2l.dthetas2[,2,2] * dtheta.detas[,2]^2
+ wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
+ dtheta.detas[,2]
+ if(!.expected) {
+ d2mudeta2 = d2theta.deta2(mu, .lmu)
+ d2lambda = d2theta.deta2(lambda, .llambda)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
+ }
+
+ if(intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .lmu=lmu, .llambda=llambda, .expected=FALSE ))))
+}
+
+
+
+
+
+invbinomial = function(lrho="logit", llambda="loge",
+ irho=0.75,
+ ilambda=NULL,
+ zero=NULL)
+{
+
+ if(mode(lrho) != "character" && mode(lrho) != "name")
+ lrho = as.character(substitute(lrho))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Inverse binomial distribution\n\n",
+ "Links: ",
+ namesof("rho", lrho), ", ",
+ namesof("lambda", llambda), "\n",
+ "Mean: lambda*(1-rho)/(2*rho-1)\n"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("rho", .lrho, tag= FALSE),
+ namesof("lambda", .llambda, tag= FALSE))
+
+ if(!length(etastart)) {
+ rho = rep(if(length( .irho)) .irho else
+ 0.75, length=n)
+ lambda = rep(if(length( .ilambda)) .ilambda else
+ 1, length=n)
+ etastart = cbind(theta2eta(rho, .lrho),
+ theta2eta(lambda, .llambda))
+ }
+ }), list( .llambda=llambda, .lrho=lrho,
+ .ilambda=ilambda, .irho=irho ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ rho = eta2theta(eta[,1], .lrho)
+ lambda = eta2theta(eta[,2], .llambda)
+ lambda*(1-rho)/(2*rho-1)
+ }, list( .llambda=llambda, .lrho=lrho ))),
+ last=eval(substitute(expression({
+ misc$link = c(rho= .lrho, lambda= .llambda)
+ misc$pooled.weight = pooled.weight
+ }), list( .llambda=llambda, .lrho=lrho ))),
+ loglikelihood=eval(substitute(function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ rho = eta2theta(eta[,1], .lrho)
+ lambda = eta2theta(eta[,2], .llambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
+ lgamma(y+lambda+1) + y*log(rho*(1-rho)) + lambda*log(rho )))
+ }, list( .llambda=llambda, .lrho=lrho ))),
+ vfamily=c("invbinomial"),
+ deriv=eval(substitute(expression({
+ rho = eta2theta(eta[,1], .lrho)
+ lambda = eta2theta(eta[,2], .llambda)
+ dl.drho = y * (1-2*rho)/(rho*(1-rho)) + lambda /rho
+ dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
+ log(rho)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ drho.deta = dtheta.deta(rho, .lrho)
+ w * cbind( dl.drho * drho.deta, dl.dlambda * dlambda.deta )
+ }), list( .llambda=llambda, .lrho=lrho ))),
+ weight=eval(substitute(expression({
+ d2l.drho2 = y * (-1+2*rho-2*rho^2) / (rho*(1-rho))^2 - lambda/rho^2
+ d2l.dlambda2 = -1/(lambda^2) - trigamma(2*y+lambda) -
+ trigamma(y+lambda+1)
+ d2l.dlambdarho = 1/rho
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.dlambda2 * dlambda.deta^2
+ wz[,iam(2,2,M)] = -d2l.drho2 * drho.deta^2
+ wz[,iam(1,2,M)] = -d2l.dlambdarho * dlambda.deta * drho.deta
+
+ d2lambda.deta2 = d2theta.deta2(lambda, .llambda)
+ d2rhodeta2 = d2theta.deta2(rho, .lrho)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dlambda * d2lambda.deta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.drho * d2rhodeta2
+ wz = w * wz
+
+ if(intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .llambda=llambda, .lrho=lrho ))))
+}
+
+
+
+genpoisson = function(llambda="logit", ltheta="loge",
+ ilambda=0.5, itheta=NULL, zero=NULL)
+{
+
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(mode(ltheta) != "character" && mode(ltheta) != "name")
+ ltheta = as.character(substitute(ltheta))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Generalized Poisson distribution\n\n",
+ "Links: ",
+ namesof("lambda", llambda), ", ",
+ namesof("theta", ltheta), "\n",
+ "Mean: theta / (1-lambda)\n",
+ "Variance: theta / (1-lambda)^3"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("lambda", .llambda, tag= FALSE),
+ namesof("theta", .ltheta, tag= FALSE))
+
+ if(!length(etastart)) {
+ lambda = rep(if(length( .ilambda)) .ilambda else
+ 0.5, length=n)
+ theta = rep(if(length( .itheta)) .itheta else
+ median(y) * (1-lambda), length=n)
+ etastart = cbind(theta2eta(lambda, .llambda),
+ theta2eta(theta, .ltheta))
+ }
+ }), list( .ltheta=ltheta, .llambda=llambda,
+ .itheta=itheta, .ilambda=ilambda )) ),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ lambda = eta2theta(eta[,1], .llambda)
+ theta = eta2theta(eta[,2], .ltheta)
+ theta/(1-lambda)
+ }, list( .ltheta=ltheta, .llambda=llambda ))),
+ last=eval(substitute(expression({
+ misc$link = c(lambda=.llambda, theta=.ltheta)
+ misc$pooled.weight = pooled.weight
+ }), list( .ltheta=ltheta, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ lambda = eta2theta(eta[,1], .llambda)
+ theta = eta2theta(eta[,2], .ltheta)
+ index = y == 0
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w[index]*(-theta[index])) +
+ sum(w[!index]*(-y[!index]*lambda[!index]-theta[!index]+
+ (y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
+ log(theta[!index] )))
+ }, list( .ltheta=ltheta, .llambda=llambda ))),
+ vfamily=c("genpoisson"),
+ deriv=eval(substitute(expression({
+ lambda = eta2theta(eta[,1], .llambda)
+ theta = eta2theta(eta[,2], .ltheta)
+ dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
+ dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
+ dTHETA.deta = dtheta.deta(theta, .ltheta)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ w * cbind( dl.dlambda * dlambda.deta, dl.dtheta * dTHETA.deta )
+ }), list( .ltheta=ltheta, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ d2l.dlambda2 = -y^2 * (y-1) / (theta+y*lambda)^2
+ d2l.dtheta2 = -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
+ d2l.dthetalambda = -y * (y-1) / (theta+y*lambda)^2
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = -d2l.dlambda2 * dlambda.deta^2
+ wz[,iam(2,2,M)] = -d2l.dtheta2 * dTHETA.deta^2
+ wz[,iam(1,2,M)] = -d2l.dthetalambda * dTHETA.deta * dlambda.deta
+
+ d2THETA.deta2 = d2theta.deta2(theta, .ltheta)
+ d2lambdadeta2 = d2theta.deta2(lambda, .llambda)
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dlambda * d2lambdadeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dtheta * d2THETA.deta2
+ wz = w * wz
+
+ if(intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+
+ wz
+ }), list( .ltheta=ltheta, .llambda=llambda ))))
+}
+
+
+
+lgammaff = function(link="loge", init.k=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Log-gamma distribution f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
+ "Link: ",
+ namesof("k", link), "\n", "\n",
+ "Mean: digamma(k)", "\n"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("k", .link, tag= FALSE)
+ if(!length(etastart)) {
+ k.init = if(length( .init.k)) rep( .init.k, len=length(y)) else {
+ medy = median(y)
+ if(medy < 2) 5 else if(medy < 4) 20 else exp(0.7 * medy)
+ }
+ etastart = theta2eta(k.init, .link)
+ }
+ }), list( .link=link, .init.k=init.k ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ k = eta2theta(eta, .link)
+ digamma(k)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(k= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ k = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (k * y - exp(y) - lgamma(k )))
+ }, list( .link=link ))),
+ vfamily=c("lgammaff"),
+ deriv=eval(substitute(expression({
+ k = eta2theta(eta, .link)
+ dl.dk = y - digamma(k)
+ dk.deta = dtheta.deta(k, .link)
+ w * dl.dk * dk.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ ed2l.dk2 = trigamma(k)
+ wz = w * dk.deta^2 * ed2l.dk2
+ wz
+ }), list( .link=link ))))
+}
+
+
+dlgamma = function(x, location=0, scale=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ z = (x-location) / scale
+ exp(k * z - exp(z)) / (scale * gamma(k))
+}
+plgamma = function(q, location=0, scale=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ z = (q-location)/scale
+ pgamma(exp(z), k)
+}
+qlgamma = function(p, location=0, scale=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ q = qgamma(p, k)
+ location + scale * log(q)
+}
+rlgamma = function(n, location=0, scale=1, k=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ y = rgamma(n, k)
+ location + scale * log(y)
+}
+
+
+lgamma3ff = function(llocation="identity", lscale="loge", lshape="loge",
+ ilocation=NULL, iscale=NULL, ishape=1, zero=NULL)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Log-gamma distribution",
+ " f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
+ "location=a, scale=b>0, shape=k>0\n\n",
+ "Links: ",
+ namesof("location", llocation), ", ",
+ namesof("scale", lscale), ", ",
+ namesof("shape", lshape), "\n\n",
+ "Mean: a + b*digamma(k)", "\n"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", .llocation, tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE),
+ namesof("shape", .lshape, tag= FALSE))
+ if(!length(etastart)) {
+ k.init = if(length( .ishape)) rep( .ishape, len=length(y)) else {
+ rep(exp(median(y)), len=length(y))
+ }
+ scale.init = if(length( .iscale)) rep( .iscale, len=length(y)) else {
+ rep(sqrt(var(y) / trigamma(k.init)), len=length(y))
+ }
+ loc.init = if(length( .iloc)) rep( .iloc, len=length(y)) else {
+ rep(median(y) - scale.init * digamma(k.init), len=length(y))
+ }
+ etastart = cbind(theta2eta(loc.init, .llocation),
+ theta2eta(scale.init, .lscale),
+ theta2eta(k.init, .lshape))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .iloc=ilocation, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .llocation) + eta2theta(eta[,2], .lscale) *
+ digamma(eta2theta(eta[,3], .lshape))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation, scale= .lscale, shape= .lshape)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta[,1], .llocation)
+ b = eta2theta(eta[,2], .lscale)
+ k = eta2theta(eta[,3], .lshape)
+ zedd = (y-a)/b
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (k * zedd - exp(zedd) - lgamma(k) - log(b )))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ vfamily=c("lgamma3ff"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta[,1], .llocation)
+ b = eta2theta(eta[,2], .lscale)
+ k = eta2theta(eta[,3], .lshape)
+ zedd = (y-a)/b
+ dl.da = (exp(zedd) - k) / b
+ dl.db = (zedd * (exp(zedd) - k) - 1) / b
+ dl.dk = zedd - digamma(k)
+ da.deta = dtheta.deta(a, .llocation)
+ db.deta = dtheta.deta(b, .lscale)
+ dk.deta = dtheta.deta(k, .lshape)
+ w * cbind(dl.da * da.deta, dl.db * db.deta, dl.dk * dk.deta)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = k / b^2
+ ed2l.db2 = (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
+ ed2l.dk2 = trigamma(k)
+ ed2l.dadb = (1 + k*digamma(k)) / b^2
+ ed2l.dadk = 1 / b
+ ed2l.dbdk = digamma(k) / b
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[,iam(1,1,M)] = ed2l.da2 * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.db2 * db.deta^2
+ wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
+ wz[,iam(1,2,M)] = ed2l.dadb * da.deta * db.deta
+ wz[,iam(1,3,M)] = ed2l.dadk * da.deta * dk.deta
+ wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
+ wz = w * wz
+ wz
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))))
+}
+
+prentice74 = function(llocation="identity", lscale="loge", lshape="identity",
+ ilocation=NULL, iscale=NULL, ishape=NULL, zero=NULL)
+{
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Log-gamma distribution (Prentice, 1974)",
+ " f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
+ "w=(y-a)*q/b + digamma(1/q^2), location=a, scale=b>0, shape=q\n\n",
+ "Links: ",
+ namesof("location", llocation), ", ",
+ namesof("scale", lscale), ", ",
+ namesof("shape", lshape), "\n", "\n",
+ "Mean: a", "\n"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", .llocation, tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE),
+ namesof("shape", .lshape, tag= FALSE))
+ if(!length(etastart)) {
+ sdy = sqrt(var(y))
+ k.init = if(length( .ishape)) rep( .ishape, len=length(y)) else {
+ skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
+ rep(-skewness, len=length(y))
+ }
+ scale.init = if(length( .iscale)) rep( .iscale, len=length(y)) else {
+ rep(sdy, len=length(y))
+ }
+ loc.init = if(length( .iloc)) rep( .iloc, len=length(y)) else {
+ rep(median(y), len=length(y))
+ }
+ etastart = cbind(theta2eta(loc.init, .llocation),
+ theta2eta(scale.init, .lscale),
+ theta2eta(k.init, .lshape))
+ }
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape,
+ .iloc=ilocation, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .llocation)
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= .llocation, scale= .lscale, shape= .lshape)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta[,1], .llocation)
+ b = eta2theta(eta[,2], .lscale)
+ k = eta2theta(eta[,3], .lshape)
+ tmp55 = k^(-2)
+ doubw = (y-a)*k/b + digamma(tmp55)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(abs(k)) -log(b) -lgamma(tmp55) + doubw*tmp55 -exp(doubw )))
+ }, list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ vfamily=c("prentice74"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta[,1], .llocation)
+ b = eta2theta(eta[,2], .lscale)
+ k = eta2theta(eta[,3], .lshape)
+ tmp55 = k^(-2)
+ mustar = digamma(tmp55)
+ doubw = (y-a)*k/b + mustar
+ sigmastar2 = trigamma(tmp55)
+ dl.da = k*(exp(doubw) - tmp55) / b
+ dl.db = ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
+ dl.dk = 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
+ ((doubw - mustar) / k - 2 * sigmastar2 / k^3)
+ da.deta = dtheta.deta(a, .llocation)
+ db.deta = dtheta.deta(b, .lscale)
+ dk.deta = dtheta.deta(k, .lshape)
+ w * cbind(dl.da * da.deta, dl.db * db.deta, dl.dk * dk.deta)
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = 1 / b^2
+ ed2l.db2 = (1 + sigmastar2*tmp55) / b^2
+ ed2l.dk2 = tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
+ (sigmastar2 - k^2)
+ ed2l.dadb = k / b^2
+ ed2l.dadk = (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
+ ed2l.dbdk = (sigmastar2*tmp55 - 1) / (b*k)
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[,iam(1,1,M)] = ed2l.da2 * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.db2 * db.deta^2
+ wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
+ wz[,iam(1,2,M)] = ed2l.dadb * da.deta * db.deta
+ wz[,iam(1,3,M)] = ed2l.dadk * da.deta * dk.deta
+ wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
+ wz = w * wz
+ wz
+ }), list( .llocation=llocation, .lscale=lscale, .lshape=lshape ))))
+}
+
+
+
+dggamma = function(x, scale=1, d=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ N = max(length(x), length(scale), length(d), length(k))
+ x = rep(x, len=N); scale = rep(scale, len=N);
+ d = rep(d, len=N); k = rep(k, len=N);
+ ans = rep(0.0, len=N)
+ ind = x > 0
+ if(any(ind)) {
+ z = (x[ind]/scale[ind])^d[ind]
+ ans[ind] = d[ind] * scale[ind]^(-d[ind]*k[ind]) *
+ x[ind]^(d[ind]*k[ind]-1) * exp(-z) / gamma(k[ind])
+ }
+ ans
+}
+pggamma = function(q, scale=1, d=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ z = (q/scale)^d
+ pgamma(z, k)
+}
+qggamma = function(p, scale=1, d=1, k=1) {
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for argument \"d\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for argument \"k\"")
+ q = qgamma(p, k)
+ scale * q^(1/d)
+}
+rggamma = function(n, scale=1, d=1, k=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for \"n\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for \"scale\"")
+ if(!is.Numeric(d, posit=TRUE)) stop("bad input for \"d\"")
+ if(!is.Numeric(k, posit=TRUE)) stop("bad input for \"k\"")
+ y = rgamma(n, k)
+ scale * y^(1/d)
+}
+
+ggamma = function(lscale="loge", ld="loge", lk="loge",
+ iscale=NULL, id=NULL, ik=NULL, zero=NULL)
+{
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(ld) != "character" && mode(ld) != "name")
+ ld = as.character(substitute(ld))
+ if(mode(lk) != "character" && mode(lk) != "name")
+ lk = as.character(substitute(lk))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Generalized gamma distribution",
+ " f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
+ "scale=b>0, d>0, k>0, y>0\n\n",
+ "Links: ",
+ namesof("scale", lscale), ", ",
+ namesof("d", ld), ", ",
+ namesof("k", lk), "\n", "\n",
+ "Mean: b*k", "\n"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(any(y <= 0)) stop("response must be have positive values only")
+ predictors.names = c(namesof("scale", .lscale, tag= FALSE),
+ namesof("d", .ld, tag= FALSE),
+ namesof("k", .lk, tag= FALSE))
+ if(!length(etastart)) {
+ b.init = if(length( .iscale)) rep( .iscale, len=length(y)) else {
+ rep(mean(y^2) / mean(y), len=length(y))
+ }
+ k.init = if(length( .ik)) rep( .ik, len=length(y)) else {
+ rep(mean(y) / b.init, len=length(y))
+ }
+ d.init = if(length( .id)) rep( .id, len=length(y)) else {
+ rep(digamma(k.init) / mean(log(y/b.init)), len=length(y))
+ }
+ etastart = cbind(theta2eta(b.init, .lscale),
+ theta2eta(d.init, .ld),
+ theta2eta(k.init, .lk))
+ }
+ }), list( .lscale=lscale, .ld=ld, .lk=lk,
+ .iscale=iscale, .id=id, .ik=ik ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ b = eta2theta(eta[,1], .lscale)
+ k = eta2theta(eta[,3], .lk)
+ b * k
+ }, list( .ld=ld, .lscale=lscale, .lk=lk ))),
+ last=eval(substitute(expression({
+ misc$link = c(scale= .lscale, d= .ld, k= .lk)
+ }), list( .lscale=lscale, .ld=ld, .lk=lk ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ b = eta2theta(eta[,1], .lscale)
+ d = eta2theta(eta[,2], .ld)
+ k = eta2theta(eta[,3], .lk)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(d) - lgamma(k) + (d*k-1) * log(y) - d*k*log(b) - (y/b)^d))
+ }, list( .lscale=lscale, .ld=ld, .lk=lk ))),
+ vfamily=c("ggamma"),
+ deriv=eval(substitute(expression({
+ b = eta2theta(eta[,1], .lscale)
+ d = eta2theta(eta[,2], .ld)
+ k = eta2theta(eta[,3], .lk)
+ tmp22 = (y/b)^d
+ tmp33 = log(y/b)
+ dl.db = d * (tmp22 - k) / b
+ dl.dd = 1/d + tmp33 * (k - tmp22)
+ dl.dk = d * tmp33 - digamma(k)
+ db.deta = dtheta.deta(b, .lscale)
+ dd.deta = dtheta.deta(d, .ld)
+ dk.deta = dtheta.deta(k, .lk)
+ w * cbind(dl.db * db.deta, dl.dd * dd.deta, dl.dk * dk.deta)
+ }), list( .lscale=lscale, .ld=ld, .lk=lk ))),
+ weight=eval(substitute(expression({
+ ed2l.db2 = k * (d/b)^2
+ ed2l.dd2 = (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
+ ed2l.dk2 = trigamma(k)
+ ed2l.dbdd = -(1 + k*digamma(k)) / b
+ ed2l.dbdk = d / b
+ ed2l.dddk = -digamma(k) / d
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[,iam(1,1,M)] = ed2l.db2 * db.deta^2
+ wz[,iam(2,2,M)] = ed2l.dd2 * dd.deta^2
+ wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
+ wz[,iam(1,2,M)] = ed2l.dbdd * db.deta * dd.deta
+ wz[,iam(1,3,M)] = ed2l.dbdk * db.deta * dk.deta
+ wz[,iam(2,3,M)] = ed2l.dddk * dd.deta * dk.deta
+ wz = w * wz
+ wz
+ }), list( .lscale=lscale, .ld=ld, .lk=lk ))))
+}
+
+
+dlog = function(x, prob) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
+ stop("bad input for argument \"prob\"")
+ N = max(length(x), length(prob))
+ x = rep(x, len=N); prob = rep(prob, len=N);
+ ox = !is.finite(x)
+ zero = round(x) != x | x < 1
+ ans = rep(0.0, len=length(x))
+ if(any(!zero))
+ ans[!zero] = -(prob[!zero]^(x[!zero])) / (x[!zero] * log(1-prob[!zero]))
+ if(any(ox))
+ ans[ox] = NA
+ ans
+}
+
+plog = function(q, prob, log.p=FALSE) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(prob, posit=TRUE) || max(prob) >= 1)
+ stop("bad input for argument \"prob\"")
+ N = max(length(q), length(prob))
+ q = rep(q, len=N); prob = rep(prob, len=N);
+ ans = q * 0 # Retains names(q)
+ if(max(abs(prob-prob[1])) < 1.0e-08) {
+ qstar = floor(q)
+ temp = if(max(qstar) >= 1) dlog(x=1:max(qstar),
+ prob=prob[1]) else 0*qstar
+ unq = unique(qstar)
+ for(i in unq) {
+ index = qstar == i
+ ans[index] = if(i >= 1) sum(temp[1:i]) else 0
+ }
+ } else
+ for(i in 1:N) {
+ qstar = floor(q[i])
+ ans[i] = if(qstar >= 1) sum(dlog(x=1:qstar, prob=prob[i])) else 0
+ }
+ if(log.p) log(ans) else ans
+}
+
+rlog = function(n, prob, Smallno=1.0e-6) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(prob, allow=1, posit=TRUE) || max(prob) >= 1)
+ stop("bad input for argument \"prob\"")
+ if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument \"Smallno\"")
+ ans = rep(0.0, len=n)
+
+ ptr1 = 1; ptr2 = 0
+ a = -1 / log(1 - prob)
+ mean = a*prob/(1-prob) # E(Y)
+ sigma = sqrt(a*prob*(1-a*prob)) / (1-prob) # sd(Y)
+ ymax = dlog(x=1, prob)
+ while(ptr2 < n) {
+ Lower = 0.5 # A continuity correction is used = 1 - 0.5.
+ Upper = mean + 5 * sigma
+ while(plog(q=Upper, prob) < 1-Smallno)
+ Upper = Upper + sigma
+ Upper = Upper + 0.5
+ x = round(runif(2*n, min=Lower, max=Upper))
+ index = runif(2*n, max=ymax) < dlog(x,prob)
+ sindex = sum(index)
+ if(sindex) {
+ ptr2 = min(n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
+ }
+ }
+ ans
+}
+
+
+logff = function(link="logit", init.c=NULL)
+{
+ if(length(init.c) &&
+ (!is.Numeric(init.c, posit=TRUE) || max(init.c) >= 1))
+ stop("init.c must be in (0,1)")
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Logarithmic distribution f(y) = a * c^y / y, y=1,2,3,...,\n",
+ " 0 < c < 1, a = -1 / log(1-c) \n\n",
+ "Link: ", namesof("c", link), "\n", "\n",
+ "Mean: a * c / (1 - c)", "\n"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("c", .link, tag= FALSE)
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(!length(etastart)) {
+ llfun = function(cc, y, w) {
+ a = -1 / log(1-cc)
+ sum(w * (log(a) + y * log(cc) - log(y)))
+ }
+ c.init = if(length( .init.c )) .init.c else
+ getInitVals(gvals=seq(0.05, 0.95, len=9), llfun=llfun, y=y, w=w)
+ c.init = rep(c.init, length=length(y))
+ etastart = theta2eta(c.init, .link)
+ }
+ }), list( .link=link, .init.c=init.c ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ cc = eta2theta(eta, .link)
+ a = -1 / log(1-cc)
+ a * cc / (1-cc)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(c= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ cc = eta2theta(eta, .link)
+ a = -1 / log(1-cc)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(a) + y * log(cc) - log(y )))
+ }, list( .link=link ))),
+ vfamily=c("logff"),
+ deriv=eval(substitute(expression({
+ cc = eta2theta(eta, .link)
+ a = -1 / log(1-cc)
+ dl.dc = 1 / ((1-cc) * log(1-cc)) + y / cc
+ dc.deta = dtheta.deta(cc, .link)
+ w * dl.dc * dc.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ ed2l.dc2 = a * (1 - a * cc) / (cc * (1-cc)^2)
+ wz = w * dc.deta^2 * ed2l.dc2
+ wz
+ }), list( .link=link ))))
+}
+
+
+levy = function(delta=NULL, link.gamma="loge", idelta=NULL, igamma=NULL)
+{
+
+
+
+ delta.known = is.Numeric(delta, allow=1)
+ if(mode(link.gamma) != "character" && mode(link.gamma) != "name")
+ link.gamma = as.character(substitute(link.gamma))
+
+ new("vglmff",
+ blurb=c("Levy distribution f(y) = sqrt(gamma/(2*pi)) * ",
+ "(y-delta)^(-3/2) * \n",
+ " exp(-gamma / (2*(y-delta ))),\n",
+ " delta < y, gamma > 0",
+ if(delta.known) paste(", delta = ", delta, ",", sep=""),
+ "\n\n",
+ if(delta.known) "Link: " else "Links: ",
+ namesof("gamma", link.gamma),
+ if(! delta.known)
+ c(", ", namesof("delta", "identity")),
+ "\n\n",
+ "Mean: NA",
+ "\n"),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("gamma", .link.gamma, tag= FALSE),
+ if( .delta.known) NULL else
+ namesof("delta", "identity", tag= FALSE))
+
+ if(!length(etastart)) {
+ delta.init = if( .delta.known) {
+ if(min(y,na.rm= TRUE) <= .delta)
+ stop("delta must be < min(y)")
+ .delta
+ } else {
+ if(length( .idelta)) .idelta else
+ min(y,na.rm= TRUE) - 1.0e-4 *
+ diff(range(y,na.rm= TRUE))
+ }
+ gamma.init = if(length( .igamma)) .igamma else
+ median(y - delta.init) # = 1/median(1/(y-delta.init))
+ gamma.init = rep(gamma.init, length=length(y))
+ etastart = cbind(theta2eta(gamma.init, .link.gamma),
+ if( .delta.known) NULL else delta.init)
+
+ }
+ }), list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta,
+ .idelta=idelta,
+ .igamma=igamma ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[,1], .link.gamma)
+ delta = if( .delta.known) .delta else eta[,2]
+
+
+ NA * mygamma
+ }, list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta ))),
+ last=eval(substitute(expression({
+ misc$link = if( .delta.known) NULL else c(delta="identity")
+ misc$link = c(gamma = .link.gamma, misc$link)
+ if( .delta.known)
+ misc$delta = .delta
+ }), list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[,1], .link.gamma)
+ delta = if( .delta.known) .delta else eta[,2]
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * 0.5 * (log(mygamma) -3*log(y-delta) - mygamma / (y-delta )))
+ }, list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta ))),
+ vfamily=c("levy"),
+ deriv=eval(substitute(expression({
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[,1], .link.gamma)
+ delta = if( .delta.known) .delta else eta[,2]
+ if(! .delta.known)
+ dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
+ dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
+ dgamma.deta = dtheta.deta(mygamma, .link.gamma)
+ w * cbind(dl.dgamma * dgamma.deta,
+ if( .delta.known) NULL else dl.ddelta)
+ }), list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) # M = if(delta is known) 1 else 2
+ wz[,iam(1,1,M)] = 1 * dgamma.deta^2
+ if(! .delta.known) {
+ wz[,iam(1,2,M)] = 3 * dgamma.deta
+ wz[,iam(2,2,M)] = 21
+ }
+ wz = w * wz / (2 * mygamma^2)
+ wz
+ }), list( .link.gamma=link.gamma,
+ .delta.known=delta.known,
+ .delta=delta ))))
+}
+
+
+
+
+if(FALSE)
+stoppa = function(y0,
+ link.alpha="loge",
+ link.theta="loge",
+ ialpha=NULL,
+ itheta=1.0,
+ zero=NULL)
+{
+ if(!is.Numeric(y0, allo=1) || y0 <= 0)
+ stop("y0 must be a positive value")
+
+ if(mode(link.alpha) != "character" && mode(link.alpha) != "name")
+ link.alpha = as.character(substitute(link.alpha))
+ if(mode(link.theta) != "character" && mode(link.theta) != "name")
+ link.theta = as.character(substitute(link.theta))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Stoppa distribution\n\n",
+ "Links: ",
+ namesof("alpha", link.alpha), ", ",
+ namesof("theta", link.theta), "\n",
+ if(is.R()) "Mean: theta*y0*beta(1-1/alpha, theta)" else
+ "Mean: theta*y0*beta(1-1/alpha, theta)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("alpha", .link.alpha, tag= FALSE),
+ namesof("theta", .link.theta, tag= FALSE))
+
+ y0 = .y0
+ if(min(y) < y0) stop("y0 must lie in the interval (0, min(y))")
+ if(!length( .ialpha) || !length( .itheta)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.theta = if(length( .itheta)) .itheta else 1
+ xvec = log(1-qvec^(1/ init.theta))
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec))-log(y0), intercept=FALSE)
+ }
+
+ extra$y0 = y0
+ if(!length(etastart)) {
+ alpha = rep(if(length( .ialpha)) .ialpha else -1/fit0$coef[1], length=n)
+ theta = rep(if(length( .itheta)) .itheta else 1.0, length=n)
+ etastart = cbind(theta2eta(alpha, .link.alpha),
+ theta2eta(theta, .link.theta))
+ }
+ }), list( .link.theta=link.theta, .link.alpha=link.alpha,
+ .y0=y0,
+ .itheta=itheta, .ialpha=ialpha ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha)
+ theta = eta2theta(eta[,2], .link.theta)
+ theta * extra$y0 * beta(1-1/alpha, theta)
+ }, list( .link.theta=link.theta, .link.alpha=link.alpha ))),
+ last=eval(substitute(expression({
+ misc$link = c(alpha= .link.alpha, theta= .link.theta)
+ }), list( .link.theta=link.theta, .link.alpha=link.alpha ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ alpha = eta2theta(eta[,1], .link.alpha)
+ theta = eta2theta(eta[,2], .link.theta)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(theta*alpha) + alpha*log(extra$y0) -(alpha+1)*log(y)+
+ (theta-1) * log(1 - (y/extra$y0)^(-alpha ))))
+ }, list( .link.theta=link.theta, .link.alpha=link.alpha ))),
+ vfamily=c("stoppa"),
+ deriv=eval(substitute(expression({
+ alpha = eta2theta(eta[,1], .link.alpha)
+ theta = eta2theta(eta[,2], .link.theta)
+ temp8 = (y / extra$y0)^(-alpha)
+ temp8a = log(temp8)
+ temp8b = log(1-temp8)
+ dl.dalpha = 1/alpha - log(y/extra$y0) + (theta-1) * temp8 *
+ log(y / extra$y0) / (1-temp8)
+ dl.dtheta = 1/theta + temp8b
+ dtheta.deta = dtheta.deta(theta, .link.theta)
+ dalpha.deta = dtheta.deta(alpha, .link.alpha)
+ w * cbind( dl.dalpha * dalpha.deta, dl.dtheta * dtheta.deta )
+ }), list( .link.theta=link.theta, .link.alpha=link.alpha ))),
+ weight=eval(substitute(expression({
+ ed2l.dalpha = 1/alpha^2 + theta * (2 * log(extra$y0) * (digamma(2)-
+ digamma(theta+4)) -
+ (trigamma(1)+trigamma(theta+3)) / alpha^3) /
+ (alpha * (theta+1) * (theta+2) / n) # zz / sum(w)
+ ed2l.dtheta = 1 / theta^2
+ ed2l.dalphatheta = (digamma(2)-digamma(theta+2)) / (alpha*(theta+1))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.dalpha * dalpha.deta^2
+ wz[,iam(2,2,M)] = ed2l.dtheta * dtheta.deta^2
+ wz[,iam(1,2,M)] = ed2l.dalpha * dtheta.deta * dalpha.deta
+ wz = w * wz
+ wz
+ }), list( .link.theta=link.theta, .link.alpha=link.alpha ))) )
+}
+
+
+
+dlino = function(x, shape1, shape2, lambda=1) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(shape1, posit=TRUE))
+ stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, posit=TRUE))
+ stop("bad input for argument \"shape2\"")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ dbeta(x=x, shape1=shape1, shape2=shape2) * lambda^shape1 /
+ (1 - (1-lambda)*x)^(shape1+shape2)
+}
+
+plino = function(q, shape1, shape2, lambda=1) {
+ if(!is.Numeric(q)) stop("bad input for \"q\"")
+ if(!is.Numeric(shape1, posit=TRUE))
+ stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, posit=TRUE))
+ stop("bad input for argument \"shape2\"")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ pbeta(q=lambda*q/(1 - (1-lambda)*q), shape1=shape1, shape2=shape2)
+}
+
+qlino = function(p, shape1, shape2, lambda=1) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(shape1, posit=TRUE))
+ stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ Y = qbeta(p=p, shape1=shape1, shape2=shape2)
+ Y / (lambda + (1-lambda)*Y)
+}
+
+
+rlino = function(n, shape1, shape2, lambda=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(shape1, posit=TRUE))
+ stop("bad input for argument \"shape1\"")
+ if(!is.Numeric(shape2, posit=TRUE))
+ stop("bad input for argument \"shape2\"")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop("bad input for argument \"lambda\"")
+ Y = rbeta(n=n, shape1=shape1, shape2=shape2)
+ Y / (lambda + (1-lambda)*Y)
+}
+
+
+
+lino = function(lshape1="loge",
+ lshape2="loge",
+ llambda="loge",
+ ishape1=NULL, ishape2=NULL, ilambda=1, zero=NULL)
+{
+ if(mode(lshape1) != "character" && mode(lshape1) != "name")
+ lshape1 = as.character(substitute(lshape1))
+ if(mode(lshape2) != "character" && mode(lshape2) != "name")
+ lshape2 = as.character(substitute(lshape2))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(ilambda, positive=TRUE))
+ stop("bad input for argument \"ilambda\"")
+
+ new("vglmff",
+ blurb=c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
+ "Links: ",
+ namesof("shape1", lshape1), ", ",
+ namesof("shape2", lshape2), ", ",
+ namesof("lambda", llambda), "\n",
+ "Mean: something complicated"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("shape1", .lshape1, tag= FALSE),
+ namesof("shape2", .lshape2, tag= FALSE),
+ namesof("lambda", .llambda, tag= FALSE))
+ if(min(y) <= 0 || max(y) >= 1)
+ stop("values of the response must be between 0 and 1 (0,1)")
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if(!length(etastart)) {
+ lambda.init = rep(if(length( .ilambda )) .ilambda else 1, length=n)
+ sh1.init = if(length( .ishape1 )) rep( .ishape1, length=n) else NULL
+ sh2.init = if(length( .ishape2 )) rep( .ishape2, length=n) else NULL
+ txY.init = lambda.init * y / (1+lambda.init*y - y)
+ mean1 = mean(txY.init)
+ mean2 = mean(1/txY.init)
+ if(!is.Numeric(sh1.init))
+ sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length=n)
+ if(!is.Numeric(sh2.init))
+ sh2.init = rep(sh1.init * (1-mean1) / mean1, length=n)
+ etastart = cbind(theta2eta(sh1.init, .lshape1),
+ theta2eta(sh2.init, .lshape2),
+ theta2eta(lambda.init, .llambda))
+ }
+ }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda,
+ .ishape1=ishape1, .ishape2=ishape2, .ilambda=ilambda ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ sh1 = eta2theta(eta[,1], .lshape1)
+ sh2 = eta2theta(eta[,2], .lshape2)
+ lambda = eta2theta(eta[,3], .llambda)
+ rep(as.numeric(NA), length=nrow(eta))
+ }, list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape1 = .lshape1, shape2 = .lshape2, lambda = .llambda)
+ }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ sh1 = eta2theta(eta[,1], .lshape1)
+ sh2 = eta2theta(eta[,2], .lshape2)
+ lambda = eta2theta(eta[,3], .llambda)
+ if(!is.R()) lbeta = function(a,b) lgamma(a) + lgamma(b) - lgamma(a+b)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(sh1*log(lambda) + (sh1-1)*log(y) + (sh2-1)*log(1-y) -
+ lbeta(sh1,sh2) -(sh1+sh2)*log(1-(1-lambda)*y)) )
+ }, list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda ))),
+ vfamily=c("lino"),
+ deriv=eval(substitute(expression({
+ sh1 = eta2theta(eta[,1], .lshape1)
+ sh2 = eta2theta(eta[,2], .lshape2)
+ lambda = eta2theta(eta[,3], .llambda)
+ temp1 = log(1 - (1-lambda) * y)
+ temp2 = digamma(sh1+sh2)
+ dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
+ dl.dsh2 = log(1-y) - digamma(sh2) + temp2 - temp1
+ dl.dlambda = sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
+ dsh1.deta = dtheta.deta(sh1, .lshape1)
+ dsh2.deta = dtheta.deta(sh2, .lshape2)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ w * cbind( dl.dsh1 * dsh1.deta,
+ dl.dsh2 * dsh2.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ if(!is.R()) beta = function(a,b) (gamma(a) / gamma(a+b)) * gamma(b)
+ temp3 = trigamma(sh1+sh2)
+ ed2l.dsh1 = trigamma(sh1) - temp3
+ ed2l.dsh2 = trigamma(sh2) - temp3
+ ed2l.dlambda2 = sh1 * sh2 / (lambda^2 * (sh1+sh2+1))
+ ed2l.dsh1sh2 = -temp3
+ ed2l.dsh1lambda = -sh2 / ((sh1+sh2)*lambda)
+ ed2l.dsh2lambda = sh1 / ((sh1+sh2)*lambda)
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.dsh1 * dsh1.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsh2 * dsh2.deta^2
+ wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
+ wz[,iam(1,2,M)] = ed2l.dsh1sh2 * dsh1.deta * dsh2.deta
+ wz[,iam(1,3,M)] = ed2l.dsh1lambda * dsh1.deta * dlambda.deta
+ wz[,iam(2,3,M)] = ed2l.dsh2lambda * dsh2.deta * dlambda.deta
+ wz = w * wz
+ wz
+ }), list( .lshape1=lshape1, .lshape2=lshape2, .llambda=llambda ))))
+}
+
+
+genbetaII= function(link.a="loge",
+ link.scale="loge",
+ link.p="loge",
+ link.q="loge",
+ init.a=NULL,
+ init.scale=NULL,
+ init.p=1.0,
+ init.q=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.p) != "character" && mode(link.p) != "name")
+ link.p = as.character(substitute(link.p))
+ if(mode(link.q) != "character" && mode(link.q) != "name")
+ link.q = as.character(substitute(link.q))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Generalized Beta II distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), ", ",
+ namesof("p", link.p), ", ",
+ namesof("q", link.q), "\n",
+ "Mean: scale*gamma(p + 1/a)*gamma(q - 1/a)/(gamma(p)*gamma(q))"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE),
+ namesof("p", .link.p, tag= FALSE),
+ namesof("q", .link.q, tag= FALSE))
+
+ if(!length(.init.a) || !length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if(length(.init.q)) .init.q else 1
+ xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ aa = rep(if(length(.init.a)) .init.a else 1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ qq = rep(if(length(.init.q)) .init.q else 1.0, length=n)
+ parg = rep(if(length(.init.p)) .init.p else 1.0, length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale),
+ theta2eta(parg, .link.p),
+ theta2eta(qq, .link.q))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q,
+ .init.a=init.a, .init.scale=init.scale,
+ .init.p=init.p, .init.q=init.q ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = eta2theta(eta[,4], .link.q)
+ scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale,
+ p= .link.p, q= .link.q)
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = eta2theta(eta[,4], .link.q)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ vfamily=c("genbetaII"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = eta2theta(eta[,4], .link.q)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3 = digamma(parg + qq)
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+ temp4 = log(1+temp2)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp = aa * temp1 + temp3 - temp3a - temp4
+ dl.dq = temp3 - temp3b - temp4
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dp.deta = dtheta.deta(parg, .link.p)
+ dq.deta = dtheta.deta(qq, .link.q)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
+ dl.dp * dp.deta, dl.dq * dq.deta )
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ weight=eval(substitute(expression({
+ temp5 = trigamma(parg + qq)
+ temp5a = trigamma(parg)
+ temp5b = trigamma(qq)
+ ed2l.da = (1 + parg+qq + parg * qq * (temp5a + temp5b +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dp = temp5a - temp5
+ ed2l.dq = temp5b - temp5
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ ed2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
+ ed2l.daq= -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ed2l.dscalep = aa * qq / (scale*(parg+qq))
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ ed2l.dpq = -temp5
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==4 means 10=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
+ wz[,iam(4,4,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
+ wz[,iam(1,4,M)] = ed2l.daq * da.deta * dq.deta
+ wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[,iam(2,4,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[,iam(3,4,M)] = ed2l.dpq * dp.deta * dq.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))))
+}
+
+
+rsinmad = function(n, a, scale, q.arg)
+ qsinmad(runif(n), a, scale, q.arg)
+
+rlomax = function(n, scale, q.arg)
+ rsinmad(n, a=1, scale, q.arg)
+
+rfisk = function(n, a, scale)
+ rsinmad(n, a, scale, q.arg=1)
+
+rparalogistic = function(n, a, scale)
+ rsinmad(n, a, scale, a)
+
+rdagum = function(n, a, scale, p.arg)
+ qdagum(runif(n), a, scale, p.arg)
+
+rinvlomax = function(n, scale, p.arg)
+ rdagum(n, a=1, scale, p.arg)
+
+rinvparalogistic = function(n, a, scale)
+ rdagum(n, a, scale, a)
+
+
+
+
+qsinmad = function(p, a, scale, q.arg) {
+ bad = (p < 0) | (p > 1)
+ ans = NA * p
+ a = rep(a, len=length(p))[!bad]
+ scale = rep(scale, len=length(p))[!bad]
+ q = rep(q.arg, len=length(p))[!bad]
+ xx = p[!bad]
+ ans[!bad] = scale* ((1 - xx)^(-1/q) - 1)^(1/a)
+ ans
+}
+
+qlomax = function(p, scale, q.arg)
+ qsinmad(p, a=1, scale, q.arg)
+
+qfisk = function(p, a, scale)
+ qsinmad(p, a, scale, q.arg=1)
+
+qparalogistic = function(p, a, scale)
+ qsinmad(p, a, scale, a)
+
+qdagum = function(p, a, scale, p.arg) {
+ bad = (p < 0) | (p > 1)
+ ans = NA * p
+ a = rep(a, len=length(p))[!bad]
+ scale = rep(scale, len=length(p))[!bad]
+ p.arg = rep(p.arg, len=length(p))[!bad]
+ xx = p[!bad]
+ ans[!bad] = scale* (xx^(-1/p.arg) - 1)^(-1/a)
+ ans
+}
+
+qinvlomax = function(p, scale, p.arg)
+ qdagum(p, a=1, scale, p.arg)
+
+qinvparalogistic = function(p, a, scale)
+ qdagum(p, a, scale, a)
+
+
+
+
+
+
+psinmad = function(q, a, scale, q.arg) {
+ zero = q <= 0
+ a = rep(a, len=length(q))[!zero]
+ scale = rep(scale, len=length(q))[!zero]
+ q.arg = rep(q.arg, len=length(q))[!zero]
+ ans = 0 * q
+ xx = q[!zero]
+ ans[!zero] = 1 - (1 + (xx/scale)^a)^(-q.arg)
+ ans
+}
+
+plomax = function(q, scale, q.arg)
+ psinmad(q, a=1, scale, q.arg)
+
+pfisk = function(q, a, scale)
+ psinmad(q, a, scale, q.arg=1)
+
+pparalogistic = function(q, a, scale)
+ psinmad(q, a, scale, a)
+
+
+
+pdagum = function(q, a, scale, p.arg) {
+ zero = q <= 0
+ a = rep(a, len=length(q))[!zero]
+ scale = rep(scale, len=length(q))[!zero]
+ p = rep(p.arg, len=length(q))[!zero]
+ ans = 0 * q
+ xx = q[!zero]
+ ans[!zero] = (1 + (xx/scale)^(-a))^(-p)
+ ans
+}
+
+pinvlomax = function(q, scale, p.arg)
+ pdagum(q, a=1, scale, p.arg)
+
+pinvparalogistic = function(q, a, scale)
+ pdagum(q, a, scale, a)
+
+
+
+dsinmad = function(x, a, scale, q.arg) {
+ zero = x <= 0
+ a = rep(a, len=length(x))[!zero]
+ scale = rep(scale, len=length(x))[!zero]
+ q = rep(q.arg, len=length(x))[!zero]
+ ans = 0 * x
+ xx = x[!zero]
+ ans[!zero] = a * q * xx^(a-1) / (scale^a * (1 + (xx/scale)^a)^(1+q))
+ ans
+}
+
+dlomax = function(x, scale, q.arg)
+ dsinmad(x, a=1, scale, q.arg)
+
+dfisk = function(x, a, scale)
+ dsinmad(x, a, scale, q.arg=1)
+
+dparalogistic = function(x, a, scale)
+ dsinmad(x, a, scale, a)
+
+
+
+ddagum = function(x, a, scale, p.arg) {
+ zero = x <= 0
+ a = rep(a, len=length(x))[!zero]
+ scale = rep(scale, len=length(x))[!zero]
+ p = rep(p.arg, len=length(x))[!zero]
+ ans = 0 * x
+ xx = x[!zero]
+ ans[!zero] = a * p * xx^(a*p-1) / (scale^(a*p) * (1 + (xx/scale)^a)^(1+p))
+ ans
+}
+
+dinvlomax = function(x, scale, p.arg)
+ ddagum(x, a=1, scale, p.arg)
+
+dinvparalogistic = function(x, a, scale)
+ ddagum(x, a, scale, a)
+
+
+
+sinmad = function(link.a="loge",
+ link.scale="loge",
+ link.q="loge",
+ init.a=NULL,
+ init.scale=NULL,
+ init.q=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.q) != "character" && mode(link.q) != "name")
+ link.q = as.character(substitute(link.q))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Singh-Maddala distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), ", ",
+ namesof("q", link.q), "\n",
+ "Mean: scale*gamma(1 + 1/a)*gamma(q - 1/a)/gamma(q)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE),
+ namesof("q", .link.q, tag= FALSE))
+ parg = 1
+
+ if(!length(.init.a) || !length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if(length(.init.q)) .init.q else 1
+ xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ aa = rep(if(length(.init.a)) .init.a else 1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ qq = rep(if(length(.init.q)) .init.q else 1.0, length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale),
+ theta2eta(qq, .link.q))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q,
+ .init.a=init.a, .init.scale=init.scale,
+ .init.q=init.q ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ qq = eta2theta(eta[,3], .link.q)
+ scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale, q= .link.q)
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = 1
+ qq = eta2theta(eta[,3], .link.q)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q ))),
+ vfamily=c("sinmad"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = 1
+ qq = eta2theta(eta[,3], .link.q)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dq = digamma(parg + qq) - temp3b - log(1+temp2)
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dq.deta = dtheta.deta(qq, .link.q)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
+ dl.dq * dq.deta )
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q ))),
+ weight=eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dq = 1/qq^2
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ ed2l.daq= -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[,iam(1,3,M)] = ed2l.daq * da.deta * dq.deta
+ wz[,iam(2,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.q=link.q ))))
+}
+
+
+ dagum = function(link.a="loge",
+ link.scale="loge",
+ link.p="loge",
+ init.a=NULL,
+ init.scale=NULL,
+ init.p=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.p) != "character" && mode(link.p) != "name")
+ link.p = as.character(substitute(link.p))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Dagum distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), ", ",
+ namesof("p", link.p), "\n",
+ "Mean: scale*gamma(p + 1/a)*gamma(1 - 1/a)/gamma(p)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE),
+ namesof("p", .link.p, tag= FALSE))
+
+ if(!length(.init.a) || !length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if(length(.init.p)) .init.p else 1
+ xvec = log( qvec^(-1/ init.p ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ parg = rep(if(length(.init.p)) .init.p else 1.0, length=n)
+ aa = rep(if(length(.init.a)) .init.a else -1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale),
+ theta2eta(parg, .link.p))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p,
+ .init.a=init.a, .init.scale=init.scale,
+ .init.p=init.p ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = 1
+ scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale, p= .link.p )
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = 1
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p ))),
+ vfamily=c("dagum"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = eta2theta(eta[,3], .link.p)
+ qq = 1
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp = aa * temp1 + digamma(parg + qq) - temp3a - log(1+temp2)
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dp.deta = dtheta.deta(parg, .link.p)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta,
+ dl.dp * dp.deta )
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p ))),
+ weight=eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dp = 1/parg^2
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ ed2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
+ ed2l.dscalep = aa * qq / (scale*(parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
+ wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .link.p=link.p ))))
+}
+
+
+
+betaII= function(link.scale="loge",
+ link.p="loge",
+ link.q="loge",
+ init.scale=NULL,
+ init.p=1.0,
+ init.q=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.p) != "character" && mode(link.p) != "name")
+ link.p = as.character(substitute(link.p))
+ if(mode(link.q) != "character" && mode(link.q) != "name")
+ link.q = as.character(substitute(link.q))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Beta II distribution\n\n",
+ "Links: ",
+ namesof("scale", link.scale), ", ",
+ namesof("p", link.p), ", ",
+ namesof("q", link.q), "\n",
+ "Mean: scale*gamma(p + 1)*gamma(q - 1)/(gamma(p)*gamma(q))"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .link.scale, tag= FALSE),
+ namesof("p", .link.p, tag= FALSE),
+ namesof("q", .link.q, tag= FALSE))
+
+ if(!length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if(length(.init.q)) .init.q else 1
+ xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ qq = rep(if(length(.init.q)) .init.q else 1.0, length=n)
+ parg = rep(if(length(.init.p)) .init.p else 1.0, length=n)
+ etastart = cbind(theta2eta(scale, .link.scale),
+ theta2eta(parg, .link.p),
+ theta2eta(qq, .link.q))
+ }
+ }), list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q,
+ .init.scale=init.scale,
+ .init.p=init.p, .init.q=init.q ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = eta2theta(eta[,2], .link.p)
+ qq = eta2theta(eta[,3], .link.q)
+ scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
+ }, list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ last=eval(substitute(expression({
+ misc$link = c(scale= .link.scale, p= .link.p, q= .link.q)
+ }), list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = eta2theta(eta[,2], .link.p)
+ qq = eta2theta(eta[,3], .link.q)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ vfamily=c("betaII"),
+ deriv=eval(substitute(expression({
+ aa = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = eta2theta(eta[,2], .link.p)
+ qq = eta2theta(eta[,3], .link.q)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3 = digamma(parg + qq)
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+ temp4 = log(1+temp2)
+
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp = aa * temp1 + temp3 - temp3a - temp4
+ dl.dq = temp3 - temp3b - temp4
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dp.deta = dtheta.deta(parg, .link.p)
+ dq.deta = dtheta.deta(qq, .link.q)
+ w * cbind( dl.dscale * dscale.deta,
+ dl.dp * dp.deta, dl.dq * dq.deta )
+ }), list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))),
+ weight=eval(substitute(expression({
+ temp5 = trigamma(parg + qq)
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dp = trigamma(parg) - temp5
+ ed2l.dq = trigamma(qq) - temp5
+ ed2l.dscalep = aa * qq / (scale*(parg+qq))
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ ed2l.dpq = -temp5
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(2,2,M)] = ed2l.dp * dp.deta^2
+ wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[,iam(1,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[,iam(2,3,M)] = ed2l.dpq * dp.deta * dq.deta
+ wz = w * wz
+ wz
+ }), list( .link.scale=link.scale,
+ .link.p=link.p, .link.q=link.q ))))
+}
+
+
+
+lomax = function(link.scale="loge",
+ link.q="loge",
+ init.scale=NULL,
+ init.q=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.q) != "character" && mode(link.q) != "name")
+ link.q = as.character(substitute(link.q))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", link.scale), ", ",
+ namesof("q", link.q), "\n",
+ "Mean: scale/(q-1)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .link.scale, tag= FALSE),
+ namesof("q", .link.q, tag= FALSE))
+ aa = parg = 1
+
+ if(!length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.q = if(length(.init.q)) .init.q else 1
+ xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ qq = rep(if(length(.init.q)) .init.q else 1.0, length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else exp(fit0$coef[1]), length=n)
+ etastart = cbind(theta2eta(scale, .link.scale),
+ theta2eta(qq, .link.q))
+ }
+ }), list( .link.scale=link.scale,
+ .link.q=link.q,
+ .init.scale=init.scale,
+ .init.q=init.q ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ scale = eta2theta(eta[,1], .link.scale)
+ qq = eta2theta(eta[,2], .link.q)
+ scale/(qq-1)
+ }, list( .link.scale=link.scale,
+ .link.q=link.q ))),
+ last=eval(substitute(expression({
+ misc$link = c(scale= .link.scale, q= .link.q)
+ }), list( .link.scale=link.scale,
+ .link.q=link.q ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = 1
+ qq = eta2theta(eta[,2], .link.q)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.scale=link.scale,
+ .link.q=link.q ))),
+ vfamily=c("lomax"),
+ deriv=eval(substitute(expression({
+ aa = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = 1
+ qq = eta2theta(eta[,2], .link.q)
+ temp2 = (y/scale)^aa
+
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dq = digamma(parg + qq) - digamma(qq) - log(1+temp2)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dq.deta = dtheta.deta(qq, .link.q)
+ w * cbind( dl.dscale * dscale.deta,
+ dl.dq * dq.deta )
+ }), list( .link.scale=link.scale,
+ .link.q=link.q ))),
+ weight=eval(substitute(expression({
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dq = 1/qq^2
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(2,2,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz = w * wz
+ wz
+ }), list( .link.scale=link.scale,
+ .link.q=link.q ))))
+}
+
+
+ fisk = function(link.a="loge",
+ link.scale="loge",
+ init.a=NULL,
+ init.scale=NULL,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Fisk distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), "\n",
+ "Mean: scale * gamma(1 + 1/a) * gamma(1 - 1/a)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE))
+ qq = parg = 1
+
+ if(!length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ xvec = log( 1/qvec - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ aa = rep(if(length(.init.a)) .init.a else -1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else exp(fit0$coef[1]), length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .init.a=init.a, .init.scale=init.scale
+ ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ qq = 1
+ scale*gamma(1 + 1/aa)*gamma(1-1/aa)
+ }, list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale)
+ }), list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = qq = 1
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale ))),
+ vfamily=c("fisk"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = qq = 1
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta )
+ }), list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ weight=eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale ))))
+}
+
+
+invlomax = function(link.scale="loge",
+ link.p="loge",
+ init.scale=NULL,
+ init.p=1.0,
+ zero=NULL)
+{
+
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(mode(link.p) != "character" && mode(link.p) != "name")
+ link.p = as.character(substitute(link.p))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Inverse Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", link.scale), ", ",
+ namesof("p", link.p), "\n",
+ "Mean: does not exist"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .link.scale, tag= FALSE),
+ namesof("p", .link.p, tag= FALSE))
+ qq = aa = 1
+
+ if(!length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if(length(.init.p)) .init.p else 1
+ xvec = log( qvec^(-1/ init.p ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+ if(!length(etastart)) {
+ scale = rep(if(length(.init.scale)) .init.scale else exp(fit0$coef[1]), length=n)
+ parg = rep(if(length(.init.p)) .init.p else 1.0, length=n)
+ etastart = cbind(theta2eta(scale, .link.scale),
+ theta2eta(parg, .link.p))
+ }
+ }), list( .link.scale=link.scale,
+ .link.p=link.p,
+ .init.scale=init.scale,
+ .init.p=init.p ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ rep(as.numeric(NA), len=nrow(eta))
+ }, list( .link.scale=link.scale,
+ .link.p=link.p ))),
+ last=eval(substitute(expression({
+ misc$link = c(scale= .link.scale, p= .link.p )
+ }), list( .link.scale=link.scale,
+ .link.p=link.p ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = qq = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = eta2theta(eta[,2], .link.p)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.scale=link.scale,
+ .link.p=link.p ))),
+ vfamily=c("invlomax"),
+ deriv=eval(substitute(expression({
+ aa = qq = 1
+ scale = eta2theta(eta[,1], .link.scale)
+ parg = eta2theta(eta[,2], .link.p)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp = aa * temp1 + digamma(parg + qq) - digamma(parg) - log(1+temp2)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ dp.deta = dtheta.deta(parg, .link.p)
+ w * cbind( dl.dscale * dscale.deta,
+ dl.dp * dp.deta )
+ }), list( .link.scale=link.scale,
+ .link.p=link.p ))),
+ weight=eval(substitute(expression({
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dp = 1/parg^2
+ ed2l.dscalep = aa * qq / (scale*(parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(2,2,M)] = ed2l.dp * dp.deta^2
+ wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz = w * wz
+ wz
+ }), list( .link.scale=link.scale,
+ .link.p=link.p ))))
+}
+
+
+paralogistic = function(link.a="loge",
+ link.scale="loge",
+ init.a=1.0,
+ init.scale=NULL,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Paralogistic distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), "\n",
+ "Mean: scale*gamma(1 + 1/a)*gamma(a - 1/a)/gamma(a)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE))
+ parg = 1
+
+ if(!length(.init.a) || !length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.a = if(length(.init.a)) .init.a else 1
+ xvec = log( (1-qvec)^(-1/ init.a ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ if(!length(etastart)) {
+ aa = rep(if(length(.init.a)) .init.a else 1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .init.a=init.a, .init.scale=init.scale
+ ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ qq = aa
+ scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
+ }, list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale)
+ }), list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = 1
+ qq = aa
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ vfamily=c("paralogistic"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = 1
+ qq = aa
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta)
+ }), list( .link.a=link.a, .link.scale=link.scale
+ ))),
+ weight=eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale
+ ))))
+}
+
+
+ invparalogistic = function(link.a="loge",
+ link.scale="loge",
+ init.a=1.0,
+ init.scale=NULL,
+ zero=NULL)
+{
+
+ if(mode(link.a) != "character" && mode(link.a) != "name")
+ link.a = as.character(substitute(link.a))
+ if(mode(link.scale) != "character" && mode(link.scale) != "name")
+ link.scale = as.character(substitute(link.scale))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Inverse paralogistic distribution\n\n",
+ "Links: ",
+ namesof("a", link.a), ", ",
+ namesof("scale", link.scale), "\n",
+ "Mean: scale*gamma(a + 1/a)*gamma(1 - 1/a)/gamma(a)"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("a", .link.a, tag= FALSE),
+ namesof("scale", .link.scale, tag= FALSE))
+
+ if(!length(.init.a) || !length(.init.scale)) {
+ qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
+ init.p = if(length(.init.a)) .init.a else 1
+ xvec = log( qvec^(-1/ init.p ) - 1 )
+ fit0 = lsfit(x=xvec, y=log(quantile(y, qvec )))
+ }
+
+ qq = 1
+ if(!length(etastart)) {
+ aa = rep(if(length(.init.a)) .init.a else -1/fit0$coef[2], length=n)
+ scale = rep(if(length(.init.scale)) .init.scale else
+ exp(fit0$coef[1]), length=n)
+ etastart = cbind(theta2eta(aa, .link.a),
+ theta2eta(scale, .link.scale))
+ }
+ }), list( .link.a=link.a, .link.scale=link.scale,
+ .init.a=init.a, .init.scale=init.scale ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = aa
+ qq = 1
+ scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
+ }, list( .link.a=link.a, .link.scale=link.scale ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link.a, scale= .link.scale )
+ }), list( .link.a=link.a, .link.scale=link.scale ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE,eta, extra=NULL) {
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = aa
+ qq = 1
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (if(is.R()) -lbeta(parg, qq) else lgamma(parg+qq)-lgamma(parg)-lgamma(qq))-
+ (parg+qq)*log(1 + (y/scale)^aa )))
+ }, list( .link.a=link.a, .link.scale=link.scale ))),
+ vfamily=c("invparalogistic"),
+ deriv=eval(substitute(expression({
+ aa = eta2theta(eta[,1], .link.a)
+ scale = eta2theta(eta[,2], .link.scale)
+ parg = aa
+ qq = 1
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ da.deta = dtheta.deta(aa, .link.a)
+ dscale.deta = dtheta.deta(scale, .link.scale)
+ w * cbind( dl.da * da.deta, dl.dscale * dscale.deta )
+ }), list( .link.a=link.a, .link.scale=link.scale ))),
+ weight=eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = w * wz
+ wz
+ }), list( .link.a=link.a, .link.scale=link.scale ))))
+}
+
+
+
+if(FALSE)
+genlognormal = function(link.sigma="loge",
+ link.r="loge",
+ init.sigma=1, init.r=1, zero=NULL)
+{
+warning(paste("2/4/04; doesn't work, possibly because first derivs are",
+ "not continuous (sign() is used). Certainly, the derivs wrt",
+ "mymu are problematic (run with maxit=4:9 and look at weight",
+ "matrices). Possibly fundamentally cannot be estimated by IRLS.",
+ "Pooling doesn't seem to help."))
+
+ if(mode(link.sigma) != "character" && mode(link.sigma) != "name")
+ link.sigma = as.character(substitute(link.sigma))
+ if(mode(link.r) != "character" && mode(link.r) != "name")
+ link.r = as.character(substitute(link.r))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Three-parameter generalized lognormal distribution\n\n",
+ "Links: ",
+ "loc; ", namesof("sigma", link.sigma, tag= TRUE),
+ ", ", namesof("r", link.r, tag= TRUE)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c("loc", # zz call it "mean" or "mymu" ?
+ namesof("sigma", .link.sigma, tag= FALSE),
+ namesof("r", .link.r, tag= FALSE))
+
+ if(!length(.init.sigma) || !length(.init.r)) {
+ init.r = if(length(.init.r)) .init.r else 1
+ sigma.init = (0.5 * sum(abs(log(y) - mean(log(y )))^init.r))^(1/init.r)
+ }
+ if(any(y <= 0)) stop("y must be positive")
+
+ if(!length(etastart)) {
+ sigma.init = rep(if(length( .init.sigma)) .init.sigma else sigma.init, len=n)
+ r.init = if(length( .init.r)) .init.r else init.r
+ etastart = cbind(mu=rep(log(median(y)), len=n),
+ sigma=sigma.init,
+ r = r.init)
+ }
+ }), list( .link.sigma=link.sigma, .link.r=link.r,
+ .init.sigma=init.sigma, .init.r=init.r ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], "identity")
+ sigma = eta2theta(eta[,2], .link.sigma)
+ r = eta2theta(eta[,3], .link.r)
+ r
+ }, list( .link.sigma=link.sigma, .link.r=link.r ))),
+ last=eval(substitute(expression({
+ misc$link = c(loc="identity", "sigma"= .link.sigma, r= .link.r )
+ misc$expected = TRUE
+ }), list( .link.sigma=link.sigma, .link.r=link.r ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ mymu = eta2theta(eta[,1], "identity")
+ sigma = eta2theta(eta[,2], .link.sigma)
+ r = eta2theta(eta[,3], .link.r)
+ temp89 = (abs(log(y)-mymu)/sigma)^r
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
+ }, list( .link.sigma=link.sigma, .link.r=link.r ))),
+ vfamily=c("genlognormal3"),
+ deriv=eval(substitute(expression({
+ mymu = eta2theta(eta[,1], "identity")
+ sigma = eta2theta(eta[,2], .link.sigma)
+ r = eta2theta(eta[,3], .link.r)
+ ss = 1 + 1/r
+ temp33 = (abs(log(y)-mymu)/sigma)
+ temp33r1 = temp33^(r-1)
+ dl.dmymu = temp33r1 * sign(log(y)-mymu) / sigma
+ dl.dsigma = (temp33*temp33r1 - 1) / sigma
+ dl.dr = (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
+ temp33r1 * log(temp33r1) / r
+
+ dmymu.deta = dtheta.deta(mymu, "identity")
+ dsigma.deta = dtheta.deta(sigma, .link.sigma)
+ dr.deta = dtheta.deta(r, .link.r)
+ w * cbind(dl.dmymu * dmymu.deta,
+ dl.dsigma * dsigma.deta,
+ dl.dr * dr.deta)
+ }), list( .link.sigma=link.sigma, .link.r=link.r ))),
+ weight=expression({
+ wz = matrix(0, n, 6) # 5 will have small savings of 1 column
+ B = log(r) + digamma(ss)
+ ed2l.dmymu2 = (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
+ ed2l.dsigma2 = r / sigma^2
+ ed2l.dr2 = (ss * trigamma(ss) + B^2 - 1) / r^3
+ ed2l.dsigmar = -B / (r * sigma)
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsigma2 * dsigma.deta^2
+ wz[,iam(3,3,M)] = ed2l.dr2 * dr.deta^2
+ wz[,iam(2,3,M)] = ed2l.dsigmar * dsigma.deta * dr.deta
+ wz = w * wz
+ wz
+ }))
+}
+
+
+betaprime = function(link="loge", i1=2, i2=NULL, zero=NULL)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Beta-prime distribution\n",
+ "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
+ " y>0, shape1>0, shape2>0\n\n",
+ "Links: ",
+ namesof("shape1", link), ", ",
+ namesof("shape2", link), "\n",
+ "Mean: shape1/(shape2-1) provided shape2>1"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(y <- as.matrix(y)) > 1)
+ stop("betaprime cannot handle matrix responses yet")
+ if(min(y) <= 0)
+ stop("response must be positive")
+ predictors.names = c(namesof("shape1", .link, short= TRUE),
+ namesof("shape2", .link, short= TRUE))
+ if(is.numeric(.i1) && is.numeric(.i2)) {
+ vec = c(.i1, .i2)
+ vec = c(theta2eta(vec[1], .link), theta2eta(vec[2], .link))
+ etastart = matrix(vec, n, 2, byrow= TRUE)
+ }
+ if(!length(etastart)) {
+ init1 = if(length( .i1)) rep( .i1, len=n) else rep(1, len=n)
+ init2 = if(length( .i2)) rep( .i2, len=n) else 1 + init1 / (y + 0.1)
+ etastart = matrix(theta2eta(c(init1, init2), .link),n,2,byrow=TRUE)
+ }
+ }), list( .link=link, .i1=i1, .i2=i2 ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shapes = eta2theta(eta, .link)
+ ifelse(shapes[,2] > 1, shapes[,1]/(shapes[,2]-1), NA)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape1 = .link, shape2 = .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ shapes = eta2theta(eta, .link)
+ temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
+ lgamma(shapes[,1]) + lgamma(shapes[,2]) -
+ lgamma(shapes[,1]+shapes[,2])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w *((shapes[,1]-1)*log(y)-(shapes[,2]+shapes[,1])*log(1+y)-temp))
+ }, list( .link=link ))),
+ vfamily="betaprime",
+ deriv=eval(substitute(expression({
+ shapes = eta2theta(eta, .link)
+ dshapes.deta = dtheta.deta(shapes, .link)
+ dl.dshapes = cbind(log(y) - log(1+y) - digamma(shapes[,1]) +
+ digamma(shapes[,1]+shapes[,2]),
+ - log(1+y) - digamma(shapes[,2]) +
+ digamma(shapes[,1]+shapes[,2]))
+ w * dl.dshapes * dshapes.deta
+ }), list( .link=link ))),
+ weight=expression({
+ temp2 = trigamma(shapes[,1]+shapes[,2])
+ d2l.dshape12 = temp2 - trigamma(shapes[,1])
+ d2l.dshape22 = temp2 - trigamma(shapes[,2])
+ d2l.dshape1shape2 = temp2
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+
+ -w * wz
+ }))
+}
+
+
+
+maxwell = function(link="loge") {
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Maxwell distribution f(y) = sqrt(2/pi) * a^(3/2) * y^2 *",
+ " exp(-0.5*a*y^2), y>0, a>0\n",
+ "Link: ", namesof("a", link), "\n", "\n",
+ "Mean: sqrt(8 / (a * pi))"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("a", .link, tag= FALSE)
+ if(!length(etastart)) {
+ a.init = rep(8 / (pi*(y+0.1)^2), length=length(y))
+ etastart = theta2eta(a.init, .link)
+ }
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ sqrt(8 / (a * pi))
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (1.5 * log(a) + 2 * log(y) - 0.5 * a * y^2 + 0.5*log(2/pi )))
+ }, list( .link=link ))),
+ vfamily=c("maxwell"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta, .link)
+ dl.da = 1.5 / a - 0.5 * y^2
+ da.deta = dtheta.deta(a, .link)
+ w * dl.da * da.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = 1.5 / a^2
+ wz = w * da.deta^2 * ed2l.da2
+ wz
+ }), list( .link=link ))))
+}
+
+
+dmaxwell = function(x, a) {
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ L = max(length(x), length(a))
+ x = rep(x, len=L); a = rep(a, len=L);
+ ifelse(x>0, sqrt(2/pi) * a^(1.5) * x^2 * exp(-0.5*a*x^2), 0)
+}
+
+pmaxwell = function(q, a) {
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ L = max(length(q), length(a))
+ q = rep(q, len=L); a = rep(a, len=L);
+ ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
+}
+
+rmaxwell = function(n, a) {
+ if(!is.Numeric(n, posit=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ sqrt(2 * rgamma(n=n, 1.5) / a)
+}
+
+qmaxwell = function(p, a) {
+ if(!is.Numeric(p, posit=TRUE) || any(p>=1))
+ stop("bad input for argument \"p\"")
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ N = max(length(p), length(a)); p = rep(p, len=N); a = rep(a, len=N)
+ sqrt(2 * qgamma(p=p, 1.5) / a)
+}
+
+
+
+
+dnaka = function(x, shape, scale=1) {
+ L = max(length(x), length(shape), length(scale))
+ x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ ifelse(x <= 0, 0, dgamma(x=x^2, shape=shape, scale=scale/shape) * 2 * x)
+}
+
+
+pnaka = function(q, shape, scale=1) {
+ if(!is.Numeric(q))
+ stop("bad input for argument \"q\"")
+ if(!is.Numeric(shape, posit=TRUE))
+ stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ L = max(length(q), length(shape), length(scale))
+ q = rep(q, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
+}
+
+
+qnaka = function(p, shape, scale=1, ...) {
+ if(!is.Numeric(p, posit=TRUE) || max(p) >= 1)
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(shape, posit=TRUE))
+ stop("bad input for argument \"shape\"")
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ L = max(length(p), length(shape), length(scale))
+ p = rep(p, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
+ ans = rep(0.0, len=L)
+ myfun = function(x, shape, scale=1, p)
+ pnaka(q=x, shape=shape, scale=scale) - p
+ for(i in 1:L) {
+ EY = sqrt(scale[i]/shape[i]) * gamma(shape[i]+0.5) / gamma(shape[i])
+ Upper = 5 * EY
+ while(pnaka(q=Upper, shape=shape[i], scale=scale[i]) < p[i])
+ Upper = Upper + scale[i]
+ ans[i] = uniroot(f=myfun, lower=0, upper=Upper,
+ shape=shape[i], scale=scale[i], p=p[i], ...)$root
+ }
+ ans
+}
+
+
+rnaka = function(n, shape, scale=1, Smallno=1.0e-6) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE, allow=1))
+ stop("bad input for argument \"scale\"")
+ if(!is.Numeric(shape, posit=TRUE, allow=1))
+ stop("bad input for argument \"shape\"")
+ if(!is.Numeric(Smallno, posit=TRUE, allow=1) || Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument \"Smallno\"")
+ ans = rep(0.0, len=n)
+
+ ptr1 = 1; ptr2 = 0
+ ymax = dnaka(x=sqrt(scale*(1 - 0.5/shape)), shape=shape, scale=scale)
+ while(ptr2 < n) {
+ EY = sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
+ Upper = EY + 5 * scale
+ while(pnaka(q=Upper, shape=shape, scale=scale) < 1-Smallno)
+ Upper = Upper + scale
+ x = runif(2*n, min=0, max=Upper)
+ index = runif(2*n, max=ymax) < dnaka(x, shape=shape, scale=scale)
+ sindex = sum(index)
+ if(sindex) {
+ ptr2 = min(n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
+ }
+ }
+ ans
+}
+
+
+
+
+
+
+
+
+nakagami = function(lshape="loge", lscale="loge", ishape=NULL, iscale=1) {
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.null(iscale) && !is.Numeric(iscale, positi=TRUE))
+ stop("argument \"iscale\" must be a positive number or NULL")
+
+ new("vglmff",
+ blurb=c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
+ " ",
+ "y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape),\n",
+ " ",
+ "y>0, shape>0, scale>0\n",
+ "Links: ",
+ namesof("shape", lshape), ", ",
+ namesof("scale", lscale),
+ "\n",
+ "\n",
+ "Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names = c(namesof("shape", .lshape, tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE))
+ if(!length(etastart)) {
+ init2 = if(is.Numeric( .iscale, posit=TRUE))
+ rep( .iscale, len=n) else rep(1, len=n)
+ init1 = if(is.Numeric( .ishape, posit=TRUE))
+ rep( .ishape, len=n) else
+ rep(init2 / (y+1/8)^2, len=n)
+ etastart = cbind(theta2eta(init1, .lshape),
+ theta2eta(init2, .lscale))
+ }
+ }), list( .lscale=lscale, .lshape=lshape, .ishape=ishape, .iscale=iscale ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta[,1], .lshape)
+ scale = eta2theta(eta[,2], .lscale)
+ sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
+ }, list( .lscale=lscale, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape, scale= .lscale)
+ misc$expected = TRUE
+ }), list( .lscale=lscale, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ shape = eta2theta(eta[,1], .lshape)
+ scale = eta2theta(eta[,2], .lscale)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(2) + shape * log(shape/scale) - lgamma(shape) +
+ (2*shape-1) * log(y) - shape * y^2 / scale))
+ }, list( .lscale=lscale, .lshape=lshape ))),
+ vfamily=c("nakagami"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta[,1], .lshape)
+ Scale = eta2theta(eta[,2], .lscale)
+ dl.dshape = 1 + log(shape/Scale) - digamma(shape) +
+ 2 * log(y) - y^2 / Scale
+ dl.dscale = -shape/Scale + shape * (y/Scale)^2
+ dshape.deta = dtheta.deta(shape, .lshape)
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ w * cbind(dl.dshape * dshape.deta, dl.dscale * dscale.deta)
+ }), list( .lscale=lscale, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ d2l.dshape2 = trigamma(shape) - 1/shape
+ d2l.dscale2 = shape / Scale^2
+ wz = matrix(as.numeric(NA), n, M) # diagonal
+ wz[,iam(1,1,M)] = d2l.dshape2 * dshape.deta^2
+ wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ w * wz
+ }), list( .lscale=lscale, .lshape=lshape ))))
+}
+
+
+
+
+rayleigh = function(link="loge") {
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ new("vglmff",
+ blurb=c("Rayleigh distribution f(y) = y*exp(-0.5*(y/a)^2)/a^2, y>0, a>0\n",
+ "Link: ",
+ namesof("a", link), "\n\n",
+ "Mean: a * sqrt(pi / 2)"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("a", .link, tag= FALSE)
+ if(!length(etastart)) {
+ a.init = (y+1/8) / sqrt(pi/2)
+ etastart = theta2eta(a.init, .link)
+ }
+ }), list( .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ a * sqrt(pi/2)
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(a= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ a = eta2theta(eta, .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(y) - 2 * log(a) - 0.5 * (y/a)^2))
+ }, list( .link=link ))),
+ vfamily=c("rayleigh"),
+ deriv=eval(substitute(expression({
+ a = eta2theta(eta, .link)
+ dl.da = ((y/a)^2 - 2) / a
+ da.deta = dtheta.deta(a, .link)
+ w * dl.da * da.deta
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ ed2l.da2 = 4 / a^2
+ wz = w * da.deta^2 * ed2l.da2
+ wz
+ }), list( .link=link ))))
+}
+
+
+drayleigh = function(x, a) {
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ L = max(length(x), length(a))
+ x = rep(x, len=L); a = rep(a, len=L);
+ ifelse(x>0, x*exp(-0.5*(x/a)^2)/a^2, 0)
+}
+
+prayleigh = function(q, a) {
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ L = max(length(q), length(a))
+ q = rep(q, len=L); a = rep(a, len=L);
+ ifelse(q > 0, 1 - exp(-0.5*(q/a)^2), 0)
+}
+
+qrayleigh = function(p, a) {
+ if(any(a <= 0)) stop("argument \"a\" must be positive")
+ if(any(p <= 0) || any(p >= 1)) stop("argument \"p\" must be between 0 and 1")
+ a * sqrt(-2 * log(1-p))
+}
+
+rrayleigh = function(n, a) {
+ if(!is.Numeric(n, posit=TRUE, allow=1, integ=TRUE))
+ stop("bad input for argument \"n\"")
+ if(any(a <= 0)) stop("a must be positive")
+ a * sqrt(-2 * log(runif(n )))
+}
+
+
+
+
+
+dparetoIV = function(x, location=0, scale=1, inequality=1, shape=1) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(inequality, posi=TRUE))
+ stop("bad input for argument \"inequality\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ N = max(length(x), length(location), length(scale), length(inequality),
+ length(shape))
+ x = rep(x, len=N); location = rep(location, len=N)
+ scale = rep(scale, len=N); inequality = rep(inequality, len=N)
+ shape = rep(shape, len=N)
+ answer = x * 0
+ ii = x > location
+ zedd = (x[ii] - location[ii]) / scale[ii]
+ answer[ii] = (shape[ii]/(scale[ii]*inequality[ii])) *
+ zedd^(1/inequality[ii]-1) / (1+zedd^(1/inequality[ii]))^(shape[ii]+1)
+ answer
+}
+
+pparetoIV = function(q, location=0, scale=1, inequality=1, shape=1) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ if(!is.Numeric(inequality, posi=TRUE))
+ stop("bad input for argument \"inequality\"")
+ if(!is.Numeric(shape, posit=TRUE))
+ stop("bad input for argument \"shape\"")
+ N = max(length(q), length(location), length(scale), length(inequality),
+ length(shape))
+ q = rep(q, len=N); location = rep(location, len=N)
+ scale = rep(scale, len=N); inequality = rep(inequality, len=N)
+ shape = rep(shape, len=N)
+ answer = q * 0
+ ii = q > location
+ zedd = (q[ii] - location[ii]) / scale[ii]
+ answer[ii] = 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
+ answer
+}
+
+qparetoIV = function(p, location=0, scale=1, inequality=1, shape=1) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("bad input for argument \"scale\"")
+ if(!is.Numeric(inequality, posi=TRUE))
+ stop("bad input for argument \"inequality\"")
+ if(!is.Numeric(shape, posit=TRUE))
+ stop("bad input for argument \"shape\"")
+ location + scale * (-1 + (1-p)^(-1/shape))^inequality
+}
+
+rparetoIV = function(n, location=0, scale=1, inequality=1, shape=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument n")
+ if(!is.Numeric(scale, posit=TRUE)) stop("bad input for argument \"scale\"")
+ if(!is.Numeric(inequality, posi=TRUE))
+ stop("bad input for argument \"inequality\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ location + scale * (-1 + runif(n)^(-1/shape))^inequality
+}
+
+
+dparetoIII = function(x, location=0, scale=1, inequality=1)
+ dparetoIV(x=x, location=location, scale=scale, inequality=inequality, shape=1)
+
+pparetoIII = function(q, location=0, scale=1, inequality=1)
+ pparetoIV(q=q, location=location, scale=scale, inequality=inequality, shape=1)
+
+qparetoIII = function(p, location=0, scale=1, inequality=1)
+ qparetoIV(p=p, location=location, scale=scale, inequality=inequality, shape=1)
+
+rparetoIII = function(n, location=0, scale=1, inequality=1)
+ rparetoIV(n=n, location=location, scale=scale, inequality=inequality, shape=1)
+
+
+
+dparetoII = function(x, location=0, scale=1, shape=1)
+ dparetoIV(x=x, location=location, scale=scale, inequality=1, shape=shape)
+
+pparetoII = function(q, location=0, scale=1, shape=1)
+ pparetoIV(q=q, location=location, scale=scale, inequality=1, shape=shape)
+
+qparetoII = function(p, location=0, scale=1, shape=1)
+ qparetoIV(p=p, location=location, scale=scale, inequality=1, shape=shape)
+
+rparetoII = function(n, location=0, scale=1, shape=1)
+ rparetoIV(n=n, location=location, scale=scale, inequality=1, shape=shape)
+
+
+dparetoI = function(x, scale=1, shape=1)
+ dparetoIV(x=x, location=scale, scale=scale, inequality=1, shape=shape)
+
+pparetoI = function(q, scale=1, shape=1)
+ pparetoIV(q=q, location=scale, scale=scale, inequality=1, shape=shape)
+
+qparetoI = function(p, scale=1, shape=1)
+ qparetoIV(p=p, location=scale, scale=scale, inequality=1, shape=shape)
+
+rparetoI = function(n, scale=1, shape=1)
+ rparetoIV(n=n, location=scale, scale=scale, inequality=1, shape=shape)
+
+
+
+paretoIV = function(location=0,
+ lscale="loge",
+ linequality="loge",
+ lshape="loge",
+ iscale=1, iinequality=1, ishape=NULL,
+ method.init=1) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(linequality) != "character" && mode(linequality) != "name")
+ linequality = as.character(substitute(linequality))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!is.Numeric(location))
+ stop("argument \"location\" must be numeric")
+ if(is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument \"iscale\" must be positive")
+ if(is.Numeric(iinequality) && any(iinequality <= 0))
+ stop("argument \"iinequality\" must be positive")
+ if(is.Numeric(ishape) && any(ishape <= 0))
+ stop("argument \"ishape\" must be positive")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE) || method.init>2)
+ stop("bad input for argument \"method.init\"")
+ if(linequality == "nloge" && location != 0)
+ warning("The Burr distribution has location=0 and linequality=nloge")
+
+ new("vglmff",
+ blurb=c("Pareto(IV) distribution F(y)=1-[1+((y - ", location,
+ ")/scale)^(1/inequality)]^(-shape),",
+ "\n", " y > ",
+ location, ", scale > 0, inequality > 0, shape > 0,\n",
+ "Links: ", namesof("scale", lscale ), ", ",
+ namesof("inequality", linequality ), ", ",
+ namesof("shape", lshape ), "\n",
+ "Mean: location + scale * NA"), # zz
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .lscale, tag= FALSE),
+ namesof("inequality", .linequality, tag= FALSE),
+ namesof("shape", .lshape, tag= FALSE))
+ extra$location = location = .location
+ if(ncol(cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+ if(any(y <= location))
+ stop("the response must have values > than the \"location\" argument")
+ if(!length(etastart)) {
+ inequality.init = if(length(.iinequality)) .iinequality else 1
+ scale.init = if(length( .iscale)) .iscale else 1
+ shape.init = if(length( .ishape)) .ishape else NULL
+ if(!length(shape.init)) {
+ zedd = (y - location) / scale.init
+ if( .method.init == 1) {
+ A1 = weighted.mean(1/(1 + zedd^(1/inequality.init)), w=w)
+ A2 = weighted.mean(1/(1 + zedd^(1/inequality.init))^2, w=w)
+ } else {
+ A1 = median(1/(1 + zedd^(1/inequality.init )))
+ A2 = median(1/(1 + zedd^(1/inequality.init))^2)
+ }
+ shape.init = max(0.01, (2*A2-A1)/(A1-A2))
+ }
+ etastart=cbind(theta2eta(rep(scale.init, len=n), .lscale),
+ theta2eta(rep(inequality.init, len=n), .linequality),
+ theta2eta(rep(shape.init, len=n), .lshape))
+ }
+ }), list( .location=location, .lscale=lscale,
+ .linequality=linequality, .lshape=lshape, .method.init=method.init,
+ .iscale=iscale, .iinequality=iinequality, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ shape = eta2theta(eta[,3], .lshape)
+ location + Scale * NA
+ }, list( .lscale=lscale, .linequality=linequality, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link=c("scale"= .lscale, "inequality"= .linequality,
+ "shape"= .lshape)
+ misc$location = extra$location # Use this for prediction
+ }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ shape = eta2theta(eta[,3], .lshape)
+ zedd = (y - location) / Scale
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) - log(inequality) - log(Scale) + (1/inequality -1) *
+ log(zedd) - (shape+1) * log(1 + zedd^(1/inequality ))))
+ }, list( .lscale=lscale, .linequality=linequality, .lshape=lshape ))),
+ vfamily=c("paretoIV"),
+ deriv=eval(substitute(expression({
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ shape = eta2theta(eta[,3], .lshape)
+ zedd = (y - location) / Scale
+ temp100 = 1 + zedd^(1/inequality)
+ dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
+ dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ inequality - 1) / inequality
+ dl.dshape = -log(temp100) + 1/shape
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ dinequality.deta = dtheta.deta(inequality, .linequality)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * cbind(dl.dscale * dscale.deta,
+ dl.dinequality * dinequality.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ temp200 = digamma(shape) - digamma(1) - 1
+ d2scale.deta2 = shape / ((inequality*Scale)^2 * (shape+2))
+ d2inequality.deta2 = (shape * (temp200^2 + trigamma(shape) + trigamma(1)
+ ) + 2*(temp200+1)) / (inequality^2 * (shape+2))
+ d2shape.deta2 = 1 / shape^2
+ d2si.deta2 = (shape*(-temp200) -1) / (inequality^2 * Scale * (shape+2))
+ d2ss.deta2 = -1 / ((inequality*Scale) * (shape+1))
+ d2is.deta2 = temp200 / (inequality*(shape+1))
+ wz = matrix(0, n, dimm(M))
+ wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
+ wz[,iam(2,2,M)] = dinequality.deta^2 * d2inequality.deta2
+ wz[,iam(3,3,M)] = dshape.deta^2 * d2shape.deta2
+ wz[,iam(1,2,M)] = dscale.deta * dinequality.deta * d2si.deta2
+ wz[,iam(1,3,M)] = dscale.deta * dshape.deta * d2ss.deta2
+ wz[,iam(2,3,M)] = dinequality.deta * dshape.deta * d2is.deta2
+ w * wz
+ }), list( .lscale=lscale, .linequality=linequality, .lshape=lshape ))))
+}
+
+paretoIII = function(location=0,
+ lscale="loge",
+ linequality="loge",
+ iscale=NULL, iinequality=NULL) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(linequality) != "character" && mode(linequality) != "name")
+ linequality = as.character(substitute(linequality))
+ if(!is.Numeric(location))
+ stop("argument \"location\" must be numeric")
+ if(is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument \"iscale\" must be positive")
+ if(is.Numeric(iinequality) && any(iinequality <= 0))
+ stop("argument \"iinequality\" must be positive")
+
+ new("vglmff",
+ blurb=c("Pareto(III) distribution F(y)=1-[1+((y - ", location,
+ ")/scale)^(1/inequality)]^(-1),",
+ "\n", " y > ",
+ location, ", scale > 0, inequality > 0, \n",
+ "Links: ", namesof("scale", lscale ), ", ",
+ namesof("inequality", linequality ), "\n",
+ "Mean: location + scale * NA"), # zz
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .lscale, tag= FALSE),
+ namesof("inequality", .linequality, tag= FALSE))
+ extra$location = location = .location
+ if(ncol(cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+ if(any(y <= location))
+ stop("the response must have values > than the \"location\" argument")
+ if(!length(etastart)) {
+ inequality.init = if(length(.iinequality)) .iinequality else NULL
+ scale.init = if(length( .iscale)) .iscale else NULL
+ if(!length(inequality.init) || !length(scale.init)) {
+ probs = (1:4)/5
+ ytemp = quantile(x=log(y-location), probs=probs)
+ fittemp = lsfit(x=logit(probs), y=ytemp, int=TRUE)
+ if(!length(inequality.init))
+ inequality.init = max(fittemp$coef["X"], 0.01)
+ if(!length(scale.init))
+ scale.init = exp(fittemp$coef["Intercept"])
+ }
+ etastart=cbind(theta2eta(rep(scale.init, len=n), .lscale),
+ theta2eta(rep(inequality.init, len=n), .linequality))
+ }
+ }), list( .location=location, .lscale=lscale, .linequality=linequality,
+ .iscale=iscale, .iinequality=iinequality ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ location + Scale * NA
+ }, list( .lscale=lscale, .linequality=linequality ))),
+ last=eval(substitute(expression({
+ misc$link=c("scale"= .lscale, "inequality"= .linequality)
+ misc$location = extra$location # Use this for prediction
+ }), list( .lscale=lscale, .linequality=linequality ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ zedd = (y - location) / Scale
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-log(inequality) - log(Scale) + (1/inequality -1) *
+ log(zedd) - (1+1) * log(1 + zedd^(1/inequality ))))
+ }, list( .lscale=lscale, .linequality=linequality ))),
+ vfamily=c("paretoIII"),
+ deriv=eval(substitute(expression({
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ inequality = eta2theta(eta[,2], .linequality)
+ shape = 1
+ zedd = (y - location) / Scale
+ temp100 = 1 + zedd^(1/inequality)
+ dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
+ dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ inequality - 1) / inequality
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ dinequality.deta = dtheta.deta(inequality, .linequality)
+ w * cbind(dl.dscale * dscale.deta,
+ dl.dinequality * dinequality.deta)
+ }), list( .lscale=lscale, .linequality=linequality ))),
+ weight=eval(substitute(expression({
+ d2scale.deta2 = 1 / ((inequality*Scale)^2 * 3)
+ d2inequality.deta2 = (1 + 2* trigamma(1)) / (inequality^2 * 3)
+ wz = matrix(0, n, M) # It is diagonal
+ wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
+ wz[,iam(2,2,M)] = dinequality.deta^2 * d2inequality.deta2
+ w * wz
+ }), list( .lscale=lscale, .linequality=linequality ))))
+}
+
+
+paretoII = function(location=0,
+ lscale="loge",
+ lshape="loge",
+ iscale=NULL, ishape=NULL) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!is.Numeric(location))
+ stop("argument \"location\" must be numeric")
+ if(is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument \"iscale\" must be positive")
+ if(is.Numeric(ishape) && any(ishape <= 0))
+ stop("argument \"ishape\" must be positive")
+
+ new("vglmff",
+ blurb=c("Pareto(II) distribution F(y)=1-[1+(y - ", location,
+ ")/scale]^(-shape),",
+ "\n", " y > ",
+ location, ", scale > 0, shape > 0,\n",
+ "Links: ", namesof("scale", lscale ), ", ",
+ namesof("shape", lshape ), "\n",
+ "Mean: location + scale * NA"), # zz
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("scale", .lscale, tag= FALSE),
+ namesof("shape", .lshape, tag= FALSE))
+ extra$location = location = .location
+ if(ncol(cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+ if(any(y <= location))
+ stop("the response must have values > than the \"location\" argument")
+ if(!length(etastart)) {
+ scale.init = if(length( .iscale)) .iscale else NULL
+ shape.init = if(length( .ishape)) .ishape else NULL
+ if(!length(shape.init) || !length(scale.init)) {
+ probs = (1:4)/5
+ scale.init.0 = 1 # zz; have to put some value here...
+ ytemp = quantile(x=log(y-location+scale.init.0), probs=probs)
+ fittemp = lsfit(x=log(1-probs), y=ytemp, int=TRUE)
+ if(!length(shape.init))
+ shape.init = max(-1/fittemp$coef["X"], 0.01)
+ if(!length(scale.init))
+ scale.init = exp(fittemp$coef["Intercept"])
+ }
+ etastart=cbind(theta2eta(rep(scale.init, len=n), .lscale),
+ theta2eta(rep(shape.init, len=n), .lshape))
+ }
+ }), list( .location=location, .lscale=lscale,
+ .lshape=lshape, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ shape = eta2theta(eta[,2], .lshape)
+ location + Scale * NA
+ }, list( .lscale=lscale, .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link=c("scale"= .lscale, "shape"= .lshape)
+ misc$location = extra$location # Use this for prediction
+ }), list( .lscale=lscale, .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ shape = eta2theta(eta[,2], .lshape)
+ zedd = (y - location) / Scale
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) - log(Scale) - (shape+1) * log(1 + zedd )))
+ }, list( .lscale=lscale, .lshape=lshape ))),
+ vfamily=c("paretoII"),
+ deriv=eval(substitute(expression({
+ location = extra$location
+ Scale = eta2theta(eta[,1], .lscale)
+ shape = eta2theta(eta[,2], .lshape)
+ zedd = (y - location) / Scale
+ temp100 = 1 + zedd
+ dl.dscale = (shape - (1+shape) / temp100) / (1 * Scale)
+ dl.dshape = -log(temp100) + 1/shape
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * cbind(dl.dscale * dscale.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lscale=lscale, .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ d2scale.deta2 = shape / (Scale^2 * (shape+2))
+ d2shape.deta2 = 1 / shape^2
+ d2ss.deta2 = -1 / (Scale * (shape+1))
+ wz = matrix(0, n, dimm(M))
+ wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
+ wz[,iam(2,2,M)] = dshape.deta^2 * d2shape.deta2
+ wz[,iam(1,2,M)] = dscale.deta * dshape.deta * d2ss.deta2
+ w * wz
+ }), list( .lscale=lscale, .lshape=lshape ))))
+}
+
+
+
+pareto1 = function(lshape="loge", location=NULL) {
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(is.Numeric(location) && location <= 0)
+ stop("argument \"location\" must be positive")
+
+ new("vglmff",
+ blurb=c("Pareto distribution f(y) = shape * location^shape / y^(shape+1),",
+ " y>location>0, shape>0\n",
+ "Link: ", namesof("shape", lshape), "\n", "\n",
+ "Mean: location*shape/(shape-1) for shape>1"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("shape", .lshape, tag= FALSE)
+ locationhat = if(!length( .location)) {
+ locationEstimated = TRUE
+ min(y)
+ } else {
+ locationEstimated = FALSE
+ .location
+ }
+ if(any(y < locationhat))
+ stop("the value of location is too high (requires 0 < location < min(y))")
+ extra$location = locationhat
+ extra$locationEstimated = locationEstimated
+ if(!length(etastart)) {
+ k.init = (y + 1/8) / (y - locationhat + 1/8)
+ etastart = theta2eta(k.init, .lshape)
+ }
+ }), list( .lshape=lshape, .location=location ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ k = eta2theta(eta, .lshape)
+ location = extra$location
+ ifelse(k>1, k * location / (k-1), NA)
+ }, list( .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(k= .lshape)
+ misc$location = extra$location # Use this for prediction
+ }), list( .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ k = eta2theta(eta, .lshape)
+ location = extra$location
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(k) + k * log(location) - (k+1) * log(y )))
+ }, list( .lshape=lshape ))),
+ vfamily=c("pareto1"),
+ deriv=eval(substitute(expression({
+ location = extra$location
+ k = eta2theta(eta, .lshape)
+ dl.dk = 1/k + log(location/y)
+ dk.deta = dtheta.deta(k, .lshape)
+ w * dl.dk * dk.deta
+ }), list( .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ ed2l.dk2 = 1 / k^2
+ wz = w * dk.deta^2 * ed2l.dk2
+ wz
+ }), list( .lshape=lshape ))))
+}
+
+
+dpareto = function(x, location, shape) {
+ if(any(location <= 0)) stop("argument \"location\" must be positive")
+ if(any(shape <= 0)) stop("argument \"shape\" must be positive")
+ L = max(length(x), length(location), length(shape))
+ x = rep(x, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+ ifelse(x>location, shape * location^shape / x^(shape+1), 0)
+}
+
+ppareto = function(q, location, shape) {
+ if(any(location <= 0)) stop("argument \"location\" must be positive")
+ if(any(shape <= 0)) stop("argument \"shape\" must be positive")
+ L = max(length(q), length(location), length(shape))
+ q = rep(q, len=L); location = rep(location, len=L); shape= rep(shape, len=L)
+ ifelse(q > location, 1 - (location/q)^shape, 0)
+}
+
+qpareto = function(p, location, shape) {
+ if(any(location <= 0)) stop("argument \"location\" must be positive")
+ if(any(shape <= 0)) stop("argument \"shape\" must be positive")
+ if(any(p <= 0) || any(p >= 1)) stop("argument \"p\" must be between 0 and 1")
+ location / (1 - p)^(1/shape)
+}
+
+rpareto = function(n, location, shape) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(any(location <= 0)) stop("argument \"location\" must be positive")
+ if(any(shape <= 0)) stop("argument \"shape\" must be positive")
+ location / runif(n)^(1/shape)
+}
+
+
+tpareto1 = function(lower, upper, lshape="loge", ishape=NULL) {
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!is.Numeric(lower, posit=TRUE, allow=1))
+ stop("bad input for argument \"lower\"")
+ if(!is.Numeric(upper, posit=TRUE, allow=1))
+ stop("bad input for argument \"upper\"")
+ if(lower >= upper)
+ stop("lower < upper is required")
+ if(length(ishape) && !is.Numeric(ishape, posit=TRUE))
+ stop("bad input for argument \"ishape\"")
+
+ new("vglmff",
+ blurb=c("Truncated Pareto distribution f(y) = shape * lower^shape /",
+ "(y^(shape+1) * (1-(lower/upper)^shape)),",
+ " 0 < lower < y < upper < Inf, shape>0\n",
+ "Link: ", namesof("shape", lshape), "\n", "\n",
+ "Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
+ " ((1-shape) * (1-(lower/upper)^shape))"),
+ initialize=eval(substitute(expression({
+ if(ncol(cbind(y)) != 1)
+ stop("response must be a vector or a 1-column matrix")
+ predictors.names = namesof("shape", .lshape, tag= FALSE)
+ if(any(y <= .lower))
+ stop(paste("the value of argument \"lower\" is too high",
+ "(requires 0 < lower < min(y))"))
+ extra$lower = .lower
+ if(any(y >= .upper))
+ stop(paste("the value of argument \"upper\" is too low",
+ "(requires max(y) < upper)"))
+ extra$upper = .upper
+ if(!length(etastart)) {
+ shape.init = if(is.Numeric( .ishape)) 0 * y + .ishape else
+ (y + 1/8) / (y - .lower + 1/8)
+ etastart = theta2eta(shape.init, .lshape)
+ }
+ }), list( .ishape=ishape, .lshape=lshape, .lower=lower, .upper=upper ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta, .lshape)
+ myratio = .lower / .upper
+ constprop = shape * .lower^shape / (1 - myratio^shape)
+ constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
+ }, list( .lshape=lshape, .lower=lower, .upper=upper ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape)
+ misc$lower = extra$lower
+ misc$upper = extra$upper
+ misc$expected = TRUE
+ }), list( .lshape=lshape, .lower=lower, .upper=upper ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ shape = eta2theta(eta, .lshape)
+ myratio = .lower / .upper
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) + shape * log( .lower) - (shape+1) * log(y) -
+ log(1 - myratio^shape )))
+ }, list( .lshape=lshape, .lower=lower, .upper=upper ))),
+ vfamily=c("tpareto1"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta, .lshape)
+ myratio = .lower / .upper
+ myratio2 = myratio^shape
+ tmp330 = myratio2 * log(myratio) / (1 - myratio2)
+ dl.dshape = 1/shape + log( .lower) - log(y) + tmp330
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * dl.dshape * dshape.deta
+ }), list( .lshape=lshape, .lower=lower, .upper=upper ))),
+ weight=eval(substitute(expression({
+ ed2l.dshape2 = 1 / shape^2 - tmp330^2 / myratio2
+ wz = w * dshape.deta^2 * ed2l.dshape2
+ wz
+ }), list( .lshape=lshape, .lower=lower, .upper=upper ))))
+}
+
+dtpareto = function(x, lower, upper, shape) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
+ L = max(length(x), length(lower), length(upper), length(shape))
+ x = rep(x, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
+ shape= rep(shape, len=L)
+ ifelse(x > lower & x < upper,
+ shape * lower^shape / (x^(shape+1) * (1-(lower/upper)^shape)), 0)
+}
+
+ptpareto = function(q, lower, upper, shape) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
+ L = max(length(q), length(lower), length(upper), length(shape))
+ q = rep(q, len=L); lower = rep(lower, len=L); upper = rep(upper, len=L);
+ shape= rep(shape, len=L)
+ ans = q * 0
+ ans[q > lower & q < upper] = (1-(lower/q)^shape) / (1-(lower/upper)^shape)
+ ans[q >= upper] = 1
+ ans
+}
+
+qtpareto = function(p, lower, upper, shape) {
+ if(!is.Numeric(p, posit=TRUE)) stop("bad input for argument \"p\"")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
+ if(max(p) >= 1) stop("argument \"p\" must be between 0 and 1")
+ lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
+}
+
+rtpareto = function(n, lower, upper, shape) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(lower, pos=TRUE)) stop("argument \"lower\" must be positive")
+ if(!is.Numeric(upper, pos=TRUE)) stop("argument \"upper\" must be positive")
+ if(!is.Numeric(shape, pos=TRUE)) stop("argument \"shape\" must be positive")
+ lower / (1 - runif(n)*(1-(lower/upper)^shape))^(1/shape)
+}
+
+
+erf = function(x)
+ 2 * pnorm(x * sqrt(2)) - 1
+
+erfc = function(x)
+ 2 * pnorm(x * sqrt(2), lower=FALSE)
+
+
+
+wald <- function(link.lambda="loge", init.lambda=NULL)
+{
+ if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
+ link.lambda = as.character(substitute(link.lambda))
+
+ new("vglmff",
+ blurb=c("Standard Wald distribution\n\n",
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
+ "\n",
+ "Link: ",
+ namesof("lambda", link.lambda), "\n",
+ "Mean: ", "1\n",
+ "Variance: 1 / lambda"),
+ initialize=eval(substitute(expression({
+ if(any(y <= 0)) stop("Require the response to have positive values")
+ predictors.names = c(namesof("lambda", .link.lambda, short= TRUE))
+ if(!length(etastart)) {
+ initlambda = if(length( .init.lambda)) .init.lambda else
+ 1 / (0.01 + (y-1)^2)
+ initlambda = rep(initlambda, len=n)
+ etastart = cbind(theta2eta(initlambda, link=.link.lambda))
+ }
+ }), list( .link.lambda=link.lambda,
+ .init.lambda=init.lambda ))),
+ inverse=function(eta, extra=NULL) {
+ 0*eta + 1
+ },
+ last=eval(substitute(expression({
+ misc$link = c(lambda = .link.lambda)
+ }), list( .link.lambda=link.lambda ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ lambda = eta2theta(eta, link=.link.lambda)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w*(0.5 * log(lambda / (2 * pi * y^3)) -
+ lambda *(y-1)^2 / (2* y )))
+ }, list( .link.lambda=link.lambda ))),
+ vfamily="wald",
+ deriv=eval(substitute(expression({
+ lambda = eta2theta(eta, link=.link.lambda)
+ dl.dlambda = 0.5 / lambda + 1 - 0.5 * (y + 1/y)
+ dlambda.deta = dtheta.deta(theta=lambda, link=.link.lambda)
+ w * cbind(dl.dlambda * dlambda.deta)
+ }), list( .link.lambda=link.lambda ))),
+ weight=eval(substitute(expression({
+ d2l.dlambda2 = 0.5 / (lambda^2)
+ w * cbind(dlambda.deta^2 * d2l.dlambda2)
+ }), list( .link.lambda=link.lambda ))))
+}
+
+
+expexp = function(lshape="loge", lscale="loge",
+ ishape=1.1, iscale=NULL, # ishape cannot be 1
+ tolerance = 1.0e-6,
+ zero=NULL) {
+
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+ if(!is.Numeric(tolerance, posit=TRUE, allow=1) || tolerance>1.0e-2)
+ stop("bad input for argument \"tolerance\"")
+ if(!is.Numeric(ishape, posit=TRUE))
+ stop("bad input for argument \"ishape\"")
+ ishape[ishape==1] = 1.1 # Fails in @deriv
+
+ new("vglmff",
+ blurb=c("Exponentiated Exponential Distribution\n",
+ "Links: ",
+ namesof("shape", lshape), ", ",
+ namesof("scale", lscale),"\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("shape", .lshape, short=TRUE),
+ namesof("scale", .lscale, short=TRUE))
+ if(!length(etastart)) {
+ shape.init = if(!is.Numeric( .ishape, posit=TRUE))
+ stop("argument \"ishape\" must be positive") else
+ rep(.ishape, len=n)
+ scale.init = if(length( .iscale)) rep(.iscale, len=n) else
+ (digamma(shape.init+1) - digamma(1)) / (y+1/8)
+ scale.init = rep(weighted.mean(scale.init, w=w), len=n)
+ etastart = cbind(theta2eta(shape.init, .lshape),
+ theta2eta(scale.init, .lscale))
+ }
+ }), list( .lshape=lshape, .lscale=lscale, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta[,1], .lshape)
+ scale = eta2theta(eta[,2], .lscale)
+ (digamma(shape+1)-digamma(1)) / scale
+ }, list( .lshape=lshape, .lscale=lscale ))),
+ last=eval(substitute(expression({
+ misc$link = c("shape"= .lshape, "scale"= .lscale)
+ misc$expected = TRUE
+ }), list( .lshape=lshape, .lscale=lscale ))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ shape = eta2theta(eta[,1], .lshape)
+ scale = eta2theta(eta[,2], .lscale)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) + log(scale) +
+ (shape-1)*log(1-exp(-scale*y)) - scale*y))
+ }, list( .lscale=lscale, .lshape=lshape ))),
+ vfamily=c("expexp"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta[,1], .lshape)
+ scale = eta2theta(eta[,2], .lscale)
+ dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (1-exp(-scale*y)) - y
+ dl.dshape = 1/shape + log(1-exp(-scale*y))
+ dscale.deta = dtheta.deta(scale, .lscale)
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * cbind(dl.dshape * dshape.deta, dl.dscale * dscale.deta)
+ }), list( .lshape=lshape, .lscale=lscale ))),
+ weight=eval(substitute(expression({
+ d11 = 1 / shape^2 # True for all shape
+ d22 = d12 = rep(as.numeric(NA), len=n)
+ index2 = abs(shape - 2) > .tolerance # index2 = shape != 1
+ largeno = 10000
+ if(any(index2)) {
+ Shape = shape[index2]
+ Shape[abs(Shape-1) < .tolerance] = 1.001 # digamma(0) is undefined
+ Scale = scale[index2]
+ tmp200 = trigamma(1)-trigamma(Shape-1) +
+ (digamma(Shape-1)-digamma(1))^2 # Fails when Shape==1
+ tmp300 = trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
+ d22[index2] = (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
+ Shape*tmp300 / Scale^2
+ }
+ if(any(!index2)) {
+ Scale = scale[!index2]
+ d22[!index2] = (1 + 4 * sum(1/(2 + (0:largeno))^3)) / Scale^2
+ }
+
+ index1 = abs(shape - 1) > .tolerance # index1 = shape != 1
+ if(any(index1)) {
+ Shape = shape[index1]
+ Scale = scale[index1]
+ d12[index1] = -(Shape*(digamma(Shape)-digamma(1))/(Shape-1) -
+ digamma(Shape+1) + digamma(1)) / Scale
+ }
+ if(any(!index1)) {
+ Scale = scale[!index1]
+ d12[!index1] = -sum(1/(2 + (0:largeno))^2) / Scale
+ }
+ wz = matrix(0, n, dimm(M))
+ wz[,iam(1,1,M)] = dshape.deta^2 * d11
+ wz[,iam(2,2,M)] = dscale.deta^2 * d22
+ wz[,iam(1,2,M)] = dscale.deta * dshape.deta * d12
+ w * wz
+ }), list( .tolerance=tolerance ))))
+}
+
+
+expexp1 = function(lscale="loge",
+ iscale=NULL,
+ ishape=1) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ new("vglmff",
+ blurb=c("Exponentiated Exponential Distribution",
+ " (profile likelihood estimation)\n",
+ "Links: ",
+ namesof("scale", lscale), "\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ initialize=eval(substitute(expression({
+ predictors.names = namesof("scale", .lscale, short=TRUE)
+ if(length(w) != n || !is.Numeric(w, integer=TRUE, posit=TRUE))
+ stop("weights must be a vector of positive integers")
+ if(!intercept.only)
+ stop("this family function only works for an intercept-only, i.e., y ~ 1")
+ extra$yvector = y
+ extra$sumw = sum(w)
+ extra$w = w
+ if(!length(etastart)) {
+ shape.init = if(!is.Numeric( .ishape, posit=TRUE))
+ stop("argument \"ishape\" must be positive") else
+ rep(.ishape, len=n)
+ scaleinit = if(length( .iscale)) rep(.iscale, len=n) else
+ (digamma(shape.init+1) - digamma(1)) / (y+1/8)
+ etastart = cbind(theta2eta(scaleinit, .lscale))
+ }
+ }), list( .lscale=lscale, .iscale=iscale, .ishape=ishape ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ scale = eta2theta(eta, .lscale)
+ temp7 = 1 - exp(-scale*extra$yvector)
+ shape = -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
+ (digamma(shape+1)-digamma(1)) / scale
+ }, list( .lscale=lscale ))),
+ last=eval(substitute(expression({
+ misc$link = c("scale"= .lscale)
+ temp7 = 1 - exp(-scale*y)
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ misc$shape = shape # Store the ML estimate here
+ misc$pooled.weight = pooled.weight
+ }), list( .lscale=lscale ))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ scale = eta2theta(eta, .lscale)
+ temp7 = 1 - exp(-scale*y)
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) + log(scale) +
+ (shape-1)*log(1-exp(-scale*y)) - scale*y))
+ }, list( .lscale=lscale ))),
+ vfamily=c("expexp1"),
+ deriv=eval(substitute(expression({
+ scale = eta2theta(eta, .lscale)
+ temp6 = exp(-scale*y)
+ temp7 = 1-temp6
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ d1 = 1/scale + (shape-1)*y*temp6/temp7 - y
+ w * cbind(d1 * dtheta.deta(scale, .lscale))
+ }), list( .lscale=lscale ))),
+ weight=eval(substitute(expression({
+ d11 = 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
+ (y*temp7+temp6) - y*temp6 / (log(temp7))^2)
+ wz = matrix(0, n, dimm(M))
+ wz[,iam(1,1,M)] = dtheta.deta(scale, .lscale)^2 * d11 -
+ d2theta.deta2(scale, .lscale) * d1
+
+ if(FALSE && intercept.only) {
+ sumw = sum(w)
+ for(i in 1:ncol(wz))
+ wz[,i] = sum(wz[,i]) / sumw
+ pooled.weight = TRUE
+ wz = w * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+ w * wz
+ }), list( .lscale=lscale ))))
+}
+
+
+
+betaffqn.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+
+
+betaffqn = function(link="loge", i1=NULL, i2=NULL, trim=0.05, A=0, B=1)
+{
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ if(!is.Numeric(A, allow=1) || !is.Numeric(B, allow=1) || A >= B)
+ stop("A must be < B, and both must be of length one")
+ stdbeta = (A==0 && B==1) # stdbeta==T iff standard beta distribution
+
+ new("vglmff",
+ blurb=c("Two-parameter Beta distribution\n",
+ if(stdbeta)
+ "y^(shape1-1) * (1-y)^(shape2-1), 0<=y<=1, shape1>0, shape2>0\n\n"
+ else
+ paste("(y-",A,")^(shape1-1) * (",B,
+ "-y)^(shape2-1), ",A,"<=y<=",B," shape1>0, shape2>0\n\n", sep=""),
+ "Links: ",
+ namesof("shape1", link), ", ",
+ namesof("shape2", link)),
+ initialize=eval(substitute(expression({
+ if(min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+ predictors.names = c(namesof("shape1", .link, short= TRUE),
+ namesof("shape2", .link, short= TRUE))
+ if(is.numeric(.i1) && is.numeric(.i2)) {
+ vec = c(.i1, .i2)
+ vec = c(theta2eta(vec[1], .link), theta2eta(vec[2], .link))
+ etastart = matrix(vec, n, 2, byrow= TRUE)
+ }
+
+ # For QN update below
+ if(length(w) != n || !is.Numeric(w, posit=TRUE))
+ stop("weights must be a vector of positive weights")
+
+ if(!length(etastart)) {
+ mu1d = mean(y, trim=.trim)
+ uu = (mu1d-.A) / (.B - .A)
+ DD = (.B - .A)^2
+ pinit = uu^2 * (1-uu)*DD/var(y) - uu # But var(y) is not robust
+ qinit = pinit * (1-uu) / uu
+ etastart = matrix(theta2eta(c(pinit,qinit), .link),n,2,byrow=TRUE)
+ }
+ }), list( .link=link, .i1=i1, .i2=i2, .trim=trim, .A=A, .B=B ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shapes = eta2theta(eta, .link)
+ .A + (.B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ }, list( .link=link, .A=A, .B=B ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape1 = .link, shape2 = .link)
+ misc$limits = c(.A, .B)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list( .link=link, .A=A, .B=B ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals= FALSE, eta, extra=NULL){
+ shapes = eta2theta(eta, .link)
+ temp = if(is.R()) lbeta(shapes[,1], shapes[,2]) else
+ lgamma(shapes[,1]) + lgamma(shapes[,2]) -
+ lgamma(shapes[,1]+shapes[,2])
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log(.B-y) - temp -
+ (shapes[,1]+shapes[,2]-1)*log(.B-.A )))
+ }, list( .link=link, .A=A, .B=B ))),
+ vfamily="betaffqn",
+ deriv=eval(substitute(expression({
+ shapes = eta2theta(eta, .link)
+ dshapes.deta = dtheta.deta(shapes, .link)
+ dl.dshapes = cbind(log(y-.A), log(.B-y)) - digamma(shapes) +
+ digamma(shapes[,1] + shapes[,2]) - log(.B - .A)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * dl.dshapes * dshapes.deta
+ derivnew
+ }), list( .link=link, .A=A, .B=B ))),
+ weight=expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }))
+}
+
+
+
+logistic2 = function(llocation="identity",
+ lscale="loge",
+ ilocation=NULL, iscale=NULL,
+ method.init=1, zero=NULL) {
+ if(mode(llocation) != "character" && mode(llocation) != "name")
+ llocation = as.character(substitute(llocation))
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Two-parameter logistic distribution\n\n",
+ "Links: ",
+ namesof("location", llocation), ", ",
+ namesof("scale", lscale),
+ "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: (pi*scale)^2 / 3"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", .llocation, tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE))
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init = y
+ scale.init = sqrt(3) * sd(y) / pi
+ } else {
+ location.init = median(rep(y, w))
+ scale.init = sqrt(3) * sum(w*(y-location.init)^2) / (sum(w) *pi)
+ }
+ location.init = if(length(.ilocation)) rep(.ilocation, len=n) else
+ rep(location.init, len=n)
+ if(.llocation == "loge") location.init = abs(location.init) + 0.001
+ scale.init = if(length(.iscale)) rep(.iscale, len=n) else
+ rep(1, len=n)
+ etastart = cbind(theta2eta(location.init, .llocation),
+ theta2eta(scale.init, .lscale))
+ }
+ }), list( .method.init=method.init, .ilocation=ilocation,
+ .llocation=llocation, .iscale=iscale, .lscale=lscale ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], .llocation)
+ }, list( .llocation=llocation ))),
+ last=eval(substitute(expression({
+ misc$link = c(location=.llocation, scale= .lscale)
+ }), list( .llocation=llocation, .lscale=lscale ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = eta2theta(eta[,1], .llocation)
+ Scale = eta2theta(eta[,2], .lscale)
+ zedd = (y-location) / Scale
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-zedd - 2 * log(1+exp(-zedd)) - log(Scale )))
+ }, list( .llocation=llocation, .lscale=lscale ))),
+ vfamily=c("logistic2"),
+ deriv=eval(substitute(expression({
+ location = eta2theta(eta[,1], .llocation)
+ Scale = eta2theta(eta[,2], .lscale)
+ zedd = (y-location) / Scale
+ ezedd = exp(-zedd)
+ dl.dlocation = (1-ezedd) / ((1 + ezedd) * Scale)
+ dlocation.deta = dtheta.deta(location, .llocation)
+ dl.dscale = zedd * (1-ezedd) / ((1 + ezedd) * Scale) - 1/Scale
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ }), list( .llocation=llocation, .lscale=lscale ))),
+ weight=eval(substitute(expression({
+ d2l.location2 = 1 / (3*Scale^2)
+ d2l.dscale2 = (3 + pi^2) / (9*Scale^2)
+ wz = matrix(as.numeric(NA), nrow=n, ncol=M) # diagonal
+ wz[,iam(1,1,M)] = d2l.location2 * dlocation.deta^2
+ wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ w * wz
+ }), list( .llocation=llocation, .lscale=lscale ))))
+}
+
+
+
+
+
+if(FALSE)
+laplace.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+
+if(FALSE)
+laplace = function(lscale="loge",
+ ilocation=NULL, iscale=NULL,
+ method.init=1, zero=NULL) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Two-parameter Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", "identity"), ", ",
+ namesof("scale", lscale),
+ "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: 2*scale^2"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", "identity", tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE))
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init = median(y)
+ scale.init = sqrt(var(y) / 2)
+ } else {
+ location.init = y
+ scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ }
+ location.init = if(length(.ilocation)) rep(.ilocation, len=n) else
+ rep(location.init, len=n)
+ scale.init = if(length(.iscale)) rep(.iscale, len=n) else
+ rep(1, len=n)
+ etastart = cbind(theta2eta(location.init, "identity"),
+ theta2eta(scale.init, .lscale))
+ }
+ }), list( .method.init=method.init, .ilocation=ilocation,
+ .iscale=iscale, .lscale=lscale ))),
+ inverse=function(eta, extra=NULL) {
+ eta[,1]
+ },
+ last=eval(substitute(expression({
+ misc$link = c(location="identity", scale= .lscale)
+ misc$expected = FALSE
+ misc$BFGS = TRUE
+ }), list( .lscale=lscale ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = eta2theta(eta[,1], "identity")
+ Scale = eta2theta(eta[,2], .lscale)
+ zedd = abs(y-location) / Scale
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (-zedd - log(Scale )))
+ }, list( .lscale=lscale ))),
+ vfamily=c("laplace"),
+ deriv=eval(substitute(expression({
+ location = eta2theta(eta[,1], "identity")
+ Scale = eta2theta(eta[,2], .lscale)
+ zedd = abs(y-location) / Scale
+ dl.dlocation = sign(y-location) / Scale
+ dlocation.deta = dtheta.deta(location, "identity")
+ dl.dscale = zedd / Scale - 1/Scale
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ derivnew
+ }), list( .lscale=lscale ))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .lscale=lscale ))))
+}
+
+dlaplace = function(x, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("argument \"scale\" must be positive")
+ exp(-abs(x-location)/scale) / (2*scale)
+}
+
+plaplace = function(q, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("argument \"scale\" must be positive")
+ zedd = (q-location) / scale
+ L = max(length(q), length(location), length(scale))
+ q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
+}
+
+qlaplace = function(p, location=0, scale=1) {
+ if(!is.Numeric(scale, posit=TRUE))
+ stop("argument \"scale\" must be positive")
+ L = max(length(p), length(location), length(scale))
+ p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ loc - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
+}
+
+rlaplace = function(n, location=0, scale=1) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(scale, posit=TRUE)) stop("\"scale\" must be positive")
+ r = runif(n)
+ loc - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
+}
+
+
+
+
+fff.control <- function(save.weight=TRUE, ...)
+{
+ list(save.weight=save.weight)
+}
+
+fff = function(link="loge",
+ idf1=NULL, idf2=NULL,
+ method.init=1, zero=NULL) {
+ if(mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("F-distribution\n\n",
+ "Links: ",
+ namesof("df1", link), ", ",
+ namesof("df2", link),
+ "\n", "\n",
+ "Mean: df2/(df2-2) provided df2>2", "\n",
+ "Variance: 2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) provided df2>4"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("df1", .link, tag= FALSE),
+ namesof("df2", .link, tag= FALSE))
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ df2.init = b = 2*mean(y) / (mean(y)-1)
+ df1.init = 2*b^2*(b-2)/(var(y)*(b-2)^2 * (b-4) - 2*b^2)
+ if(df2.init > 4) df2.init = 5
+ if(df1.init > 2) df1.init = 3
+ } else {
+ df2.init = b = 2*median(y) / (median(y)-1)
+ summy = summary(y)
+ var.est = summy[5] - summy[2]
+ df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
+ }
+ df1.init = if(length(.idf1)) rep(.idf1, len=n) else
+ rep(df1.init, len=n)
+ df2.init = if(length(.idf2)) rep(.idf2, len=n) else rep(1, len=n)
+ etastart = cbind(theta2eta(df1.init, .link),
+ theta2eta(df2.init, .link))
+ }
+ }), list( .method.init=method.init, .idf1=idf1,
+ .idf2=idf2, .link=link ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ df2 = eta2theta(eta[,2], .link)
+ ans = df2 * NA
+ ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
+ ans
+ }, list( .link=link ))),
+ last=eval(substitute(expression({
+ misc$link = c(df1= .link, df2= .link)
+ }), list( .link=link ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ df1 = eta2theta(eta[,1], .link)
+ df2 = eta2theta(eta[,2], .link)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (lgamma(0.5*(df1+df2)) + 0.5*df1*log(df1/df2) +
+ 0.5*(df1-2) * log(y) - lgamma(df1/2) - lgamma(df2/2) -
+ 0.5*(df1+df2)*log(1 + df1*y/df2 )))
+ }, list( .link=link ))),
+ vfamily=c("fff"),
+ deriv=eval(substitute(expression({
+ df1 = eta2theta(eta[,1], .link)
+ df2 = eta2theta(eta[,2], .link)
+ dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(y) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
+ 0.5*log(1 + df1*y/df2)
+ ddf1.deta = dtheta.deta(df1, .link)
+ dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
+ 0.5*log(1 + df1*y/df2)
+ ddf2.deta = dtheta.deta(df2, .link)
+ if(iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = w * cbind(dl.ddf1 * ddf1.deta,
+ dl.ddf2 * ddf2.deta)
+ derivnew
+ }), list( .link=link ))),
+ weight=eval(substitute(expression({
+ if(iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M=M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .link=link ))))
+}
+
+
+
+vonmises = function(lscale="loge",
+ ilocation=NULL, iscale=NULL,
+ method.init=1, zero=NULL) {
+ if(mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ if(length(zero) && !is.Numeric(zero, integer=TRUE, posit=TRUE))
+ stop("bad input for argument \"zero\"")
+
+ new("vglmff",
+ blurb=c("Von Mises distribution\n\n",
+ "Links: ",
+ namesof("location", "identity"), ", ",
+ namesof("scale", lscale),
+ "\n", "\n",
+ "Mean: location"),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("location", "identity", tag= FALSE),
+ namesof("scale", .lscale, tag= FALSE))
+ if(!length(etastart)) {
+ if( .method.init == 1) {
+ location.init = mean(y)
+ rat10 = sqrt((sum(w*cos(y )))^2 + sum(w*sin(y))^2) / sum(w)
+ scale.init = sqrt(1 - rat10)
+ } else {
+ location.init = median(y)
+ scale.init = sqrt(sum(w*abs(y - location.init)) / sum(w))
+ }
+ location.init = if(length(.ilocation)) rep(.ilocation, len=n) else
+ rep(location.init, len=n)
+ scale.init = if(length(.iscale)) rep(.iscale, len=n) else rep(1, len=n)
+ etastart = cbind(theta2eta(location.init, "identity"),
+ theta2eta(scale.init, .lscale))
+ }
+ y = y %% (2*pi) # Coerce after initial values have been computed
+ }), list( .method.init=method.init, .ilocation=ilocation,
+ .iscale=iscale, .lscale=lscale ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta[,1], "identity") %% (2*pi)
+ }, list( .lscale=lscale ))),
+ last=eval(substitute(expression({
+ misc$link = c(location= "identity", scale= .lscale)
+ }), list( .lscale=lscale ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ location = eta2theta(eta[,1], "identity")
+ Scale = eta2theta(eta[,2], .lscale)
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (Scale * cos(y - location) -
+ log(mbesselI0(x=Scale ))))
+ }, list( .lscale=lscale ))),
+ vfamily=c("vonmises"),
+ deriv=eval(substitute(expression({
+ location = eta2theta(eta[,1], "identity")
+ Scale = eta2theta(eta[,2], .lscale)
+ tmp6 = mbesselI0(x=Scale, deriv=2)
+ dl.dlocation = Scale * sin(y - location)
+ dlocation.deta = dtheta.deta(location, "identity")
+ dl.dscale = cos(y - location) - tmp6[,2] / tmp6[,1]
+ dscale.deta = dtheta.deta(Scale, .lscale)
+ w * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ }), list( .lscale=lscale ))),
+ weight=eval(substitute(expression({
+ d2l.location2 = Scale * tmp6[,2] / tmp6[,1]
+ d2l.dscale2 = tmp6[,3] / tmp6[,1] - (tmp6[,2] / tmp6[,1])^2
+ wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
+ wz[,iam(1,1,M)] = d2l.location2 * dlocation.deta^2
+ wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ w * wz
+ }), list( .lscale=lscale ))))
+}
+
+
+
+hyper = function(N=NULL, D=NULL,
+ lprob="logit",
+ iprob=NULL) {
+ if(mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+ inputN = is.Numeric(N, positive=TRUE)
+ inputD = is.Numeric(D, positive=TRUE)
+ if(inputD && inputN)
+ stop("only one of \"N\" and \"D\" is to be inputted")
+ if(!inputD && !inputN)
+ stop("one of \"N\" and \"D\" needs to be inputted")
+
+ new("vglmff",
+ blurb=c("Hypergeometric distribution\n\n",
+ "Link: ",
+ namesof("prob", lprob), "\n",
+ "Mean: D/N\n"),
+ initialize=eval(substitute(expression({
+ NCOL = function (x)
+ if(is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+ if(NCOL(y) == 1) {
+ if(is.factor(y)) y = y != levels(y)[1]
+ nn = rep(1, len=n)
+ if(!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ mustart = (0.5 + w * y) / (1 + w)
+ no.successes = w * y
+ if(any(abs(no.successes - round(no.successes)) > 0.001))
+ stop("Number of successes must be integer-valued")
+ } else if(NCOL(y) == 2) {
+ if(any(abs(y - round(y)) > 0.001))
+ stop("Count data must be integer-valued")
+ nn = y[,1] + y[,2]
+ y = ifelse(nn > 0, y[,1]/nn, 0)
+ w = w * nn
+ mustart = (0.5 + nn * y) / (1 + nn)
+ } else
+ stop("Response not of the right form")
+
+ predictors.names = namesof("prob", .lprob, tag= FALSE)
+ extra$Nvector = .N
+ extra$Dvector = .D
+ extra$Nunknown = length(extra$Nvector) == 0
+ if(!length(etastart)) {
+ init.prob = if(length( .iprob)) rep( .iprob, len=n) else mustart
+ etastart = matrix(init.prob, n, ncol(cbind(y )))
+ }
+ }), list( .lprob=lprob, .N=N, .D=D, .iprob=iprob ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ eta2theta(eta, .lprob)
+ }, list( .lprob=lprob ))),
+ last=eval(substitute(expression({
+ misc$link = c("prob"= .lprob)
+ misc$Dvector = .D
+ misc$Nvector = .N
+ }), list( .N=N, .D=D, .lprob=lprob ))),
+ link=eval(substitute(function(mu, extra=NULL) {
+ theta2eta(mu, .lprob)
+ }, list( .lprob=lprob ))),
+ loglikelihood=eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
+ N = extra$Nvector
+ Dvec = extra$Dvector
+ prob = mu
+ yvec = w * y
+ if(residuals) stop("loglikelihood residuals not implemented yet") else {
+ if(extra$Nunknown) {
+ tmp12 = Dvec * (1-prob) / prob
+ sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
+ lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
+ } else
+ sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
+ lgamma(1+N*prob-yvec) - lgamma(1+N*(1-prob) -w + yvec))
+ }
+ }, list( .lprob=lprob ))),
+ vfamily=c("hyper"),
+ deriv=eval(substitute(expression({
+ prob = mu # equivalently, eta2theta(eta, .lprob)
+ dprob.deta = dtheta.deta(prob, .lprob)
+ Dvec = extra$Dvector
+ Nvec = extra$Nvector
+ yvec = w * y
+ if(extra$Nunknown) {
+ tmp72 = -Dvec / prob^2
+ tmp12 = Dvec * (1-prob) / prob
+ dl.dprob = tmp72 * (digamma(1 + tmp12) + digamma(1 + Dvec/prob -w) -
+ digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
+ } else {
+ dl.dprob = Nvec * (digamma(1+Nvec*prob) - digamma(1+Nvec*(1-prob)) -
+ digamma(1+Nvec*prob-yvec) + digamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ w * dl.dprob * dprob.deta
+ }), list( .lprob=lprob ))),
+ weight=eval(substitute(expression({
+ if(extra$Nunknown) {
+ tmp722 = tmp72^2
+ tmp13 = 2*Dvec / prob^3
+ d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
+ trigamma(1 + Dvec/prob - w) -
+ trigamma(1 + tmp12 - w + yvec) -
+ trigamma(1 + Dvec/prob)) +
+ tmp13 * (digamma(1 + tmp12) +
+ digamma(1 + Dvec/prob - w) -
+ digamma(1 + tmp12 - w + yvec) -
+ digamma(1 + Dvec/prob))
+ } else {
+ d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
+ trigamma(1+Nvec*(1-prob)) -
+ trigamma(1+Nvec*prob-yvec) -
+ trigamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ d2prob.deta2 = d2theta.deta2(prob, .lprob)
+ wz = -(dprob.deta^2) * d2l.dprob2 - d2prob.deta2 * dl.dprob
+ wz = w * wz
+ wz[wz < .Machine$double.eps] = .Machine$double.eps
+ wz
+ }), list( .lprob=lprob ))))
+}
+
+
+
+dbenini = function(x, shape, y0) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ N = max(length(x), length(shape), length(y0))
+ x = rep(x, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
+ ok = x > y0
+ temp = log(x[ok]/y0[ok])
+ ans = y0 * 0
+ ans[ok] = 2 * shape[ok] * exp(-shape[ok] * temp^2) * temp / x[ok]
+ ans
+}
+
+pbenini = function(q, shape, y0) {
+ if(!is.Numeric(q)) stop("bad input for argument \"q\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ N = max(length(q), length(shape), length(y0))
+ q = rep(q, len=N); shape = rep(shape, len=N); y0 = rep(y0, len=N);
+ ans = y0 * 0
+ ok = q > y0
+ ans[ok] = 1 - exp(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+ ans
+}
+
+qbenini = function(p, shape, y0) {
+ if(!is.Numeric(p, posit=TRUE) || any(p >= 1))
+ stop("bad input for argument \"p\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ y0 * exp(sqrt(-log(1-p) / shape))
+}
+
+rbenini = function(n, shape, y0) {
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("bad input for argument \"n\"")
+ if(!is.Numeric(shape, posit=TRUE)) stop("bad input for argument \"shape\"")
+ if(!is.Numeric(y0, posit=TRUE)) stop("bad input for argument \"y0\"")
+ y0 * exp(sqrt(-log(runif(n)) / shape))
+}
+
+benini = function(y0=stop("argument \"y0\" must be specified"),
+ lshape="loge",
+ ishape=NULL, method.init=1) {
+ if(mode(lshape) != "character" && mode(lshape) != "name")
+ lshape = as.character(substitute(lshape))
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 2) stop("argument \"method.init\" must be 1 or 2")
+ if(!is.Numeric(y0, allow=1, posit=TRUE))
+ stop("bad input for argument \"y0\"")
+
+ new("vglmff",
+ blurb=c("1-parameter Benini distribution\n\n",
+ "Link: ",
+ namesof("shape", lshape),
+ "\n", "\n",
+ "Mean: zz"),
+ initialize=eval(substitute(expression({
+ predictors.names = c(namesof("shape", .lshape, tag= FALSE))
+ extra$y0 = .y0
+ if(min(y) <= extra$y0) stop("argument \"y0\" is too large")
+ if(!length(etastart)) {
+ probs = (1:3) / 4
+ qofy= quantile(rep(y, times=w), probs=probs) # fails if w != integer
+ if( .method.init == 1) {
+ shape.init = mean(-log(1-probs) / (log(qofy))^2)
+ } else {
+ shape.init = median(-log(1-probs) / (log(qofy))^2)
+ }
+ shape.init = if(length(.ishape)) rep(.ishape, len=n) else
+ rep(shape.init, len=n)
+ etastart = cbind(theta2eta(shape.init, .lshape))
+ }
+ }), list( .method.init=method.init, .ishape=ishape, .lshape=lshape,
+ .y0=y0 ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ shape = eta2theta(eta, .lshape)
+ temp = 1/(4*shape)
+ extra$y0 * exp(temp) *
+ ((sqrt(pi) * (1 - pgamma(temp, 0.5 ))) / (2*sqrt(shape)) +
+ 1 - pgamma(temp, 1))
+ }, list( .lshape=lshape ))),
+ last=eval(substitute(expression({
+ misc$link = c(shape= .lshape)
+ }), list( .lshape=lshape ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra=NULL) {
+ shape = eta2theta(eta, .lshape)
+ y0 = extra$y0
+ if(residuals) stop("loglikelihood residuals not implemented yet") else
+ sum(w * (log(shape) - log(y) - shape*(log(y/y0))^2 + log(log(y/y0 ))))
+ }, list( .lshape=lshape ))),
+ vfamily=c("benini"),
+ deriv=eval(substitute(expression({
+ shape = eta2theta(eta, .lshape)
+ y0 = extra$y0
+ dl.dshape = 1/shape - (log(y/y0))^2
+ dshape.deta = dtheta.deta(shape, .lshape)
+ w * dl.dshape * dshape.deta
+ }), list( .lshape=lshape ))),
+ weight=eval(substitute(expression({
+ d2l.dshape2 = 1 / shape^2
+ wz = d2l.dshape2 * dshape.deta^2
+ w * wz
+ }), list( .lshape=lshape ))))
+}
+
+
+
+
+dpolono = function(x, meanlog=0, sdlog=1, ...) {
+ if(!is.Numeric(x)) stop("bad input for argument \"x\"")
+ if(!is.Numeric(meanlog)) stop("bad input for argument \"meanlog\"")
+ if(!is.Numeric(sdlog, posit=TRUE)) stop("bad input for argument \"sdlog\"")
+ N = max(length(x), length(meanlog), length(sdlog))
+ x = rep(x, len=N); meanlog = rep(meanlog, len=N); sdlog = rep(sdlog, len=N);
+ ans = x * 0
+ integrand = function(t, x, meanlog, sdlog)
+ exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
+ for(i in 1:N) {
+ if(x[i] == round(x[i]) && x[i] >= 0) {
+ temp = integrate(f=integrand, lower=-Inf, upper=Inf,
+ x=x[i], meanlog=meanlog[i], sdlog=sdlog[i], ...)
+ if(temp$message == "OK") ans[i] = temp$value else {
+ warning(paste("could not integrate (numerically) observation", i))
+ ans[i] = NA
+ }
+ }
+ }
+ ans = ans / (sqrt(2*pi) * sdlog * gamma(x+1))
+ ifelse(x == round(x) & x >= 0, ans, 0)
+}
+
+
+rpolono = function(n, meanlog=0, sdlog=1) {
+ if(!is.Numeric(n, integ=TRUE,allow=1)) stop("bad input for argument \"n\"")
+ if(!is.Numeric(meanlog)) stop("bad input for argument \"meanlog\"")
+ if(!is.Numeric(sdlog)) stop("bad input for argument \"sdlog\"")
+ meanlog = rep(meanlog, len=n); sdlog = rep(sdlog, len=n);
+ lambda = if(is.R()) rlnorm(n=n, meanlog=meanlog, sdlog=sdlog) else
+ rlognormal(n=n, m=meanlog, s=sdlog, lambda=0)
+ rpois(n=n, lambda=lambda)
+}
+
+
+
+
diff --git a/R/family.vglm.q b/R/family.vglm.q
new file mode 100644
index 0000000..abfe7bd
--- /dev/null
+++ b/R/family.vglm.q
@@ -0,0 +1,30 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+family.vglm <- function(object, ...)
+ object$vfamily
+
+print.vfamily <- function(x, ...)
+{
+ f <- x$vfamily
+ if(is.null(f))
+ stop("not a VGAM family function")
+
+ nn <- x$blurb
+ if(is.null(nn))
+ invisible(return(x))
+
+ cat("Family: ", f[1], "\n")
+ if(length(f)>1) cat("Classes:", paste(f, collapse=", "), "\n")
+ cat("\n")
+
+ for(i in 1:length(nn))
+ cat(nn[i])
+ cat("\n")
+ invisible(return(x))
+}
+
+
+
diff --git a/R/family.zeroinf.q b/R/family.zeroinf.q
new file mode 100644
index 0000000..939631e
--- /dev/null
+++ b/R/family.zeroinf.q
@@ -0,0 +1,716 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+dzipois = function(x, lambda, phi=0) {
+ L = max(length(x), length(lambda), length(phi))
+ x = rep(x, len=L); lambda = rep(lambda, len=L); phi = rep(phi, len=L);
+ ans = dpois(x, lambda)
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ifelse(x==0, phi + (1-phi) * ans, (1-phi) * ans)
+}
+
+pzipois = function(q, lambda, phi=0) {
+ ans = ppois(q, lambda)
+ phi = rep(phi, length=length(ans))
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ phi + (1-phi) * ans
+}
+
+qzipois = function(p, lambda, phi=0) {
+ nn = max(length(p), length(lambda), length(phi))
+ p = rep(p, len=nn)
+ lambda = rep(lambda, len=nn)
+ phi = rep(phi, len=nn)
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ans = p
+ ans[p<=phi] = 0
+ ans[p>phi] = qpois((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), lam=lambda[p>phi])
+ ans
+}
+
+rzipois = function(n, lambda, phi=0) {
+ if(!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
+ stop("n must be a single positive integer")
+ ans = rpois(n, lambda)
+ phi = rep(phi, len=length(ans))
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ifelse(runif(n) < phi, 0, ans)
+}
+
+
+yip88 = function(link.lambda="loge", n.arg=NULL)
+{
+ if(mode(link.lambda) != "character" && mode(link.lambda) != "name")
+ link.lambda = as.character(substitute(link.lambda))
+
+ new("vglmff",
+ blurb=c("Zero-inflated Poisson (based on Yip (1988))\n\n",
+ "Link: ", namesof("lambda", link.lambda), "\n",
+ "Variance: (1-phi)*lambda"),
+ first=eval(substitute(expression({
+ zero <- y==0
+ if(any(zero)) {
+ if(length(extra)) extra$sumw = sum(w) else
+ extra = list(sumw=sum(w))
+ if(is.numeric(.n.arg) && extra$sumw != .n.arg)
+ stop(paste("value of n.arg conflicts with data",
+ "(it need not be specified anyway)"))
+ warning("trimming out the zero observations")
+
+ axa.save = attr(x, "assign")
+ x = x[!zero,,drop=FALSE]
+ attr(x, "assign") = axa.save # Don't lose these!!
+ w = w[!zero]
+ y = y[!zero]
+ } else
+ if(!is.numeric(.n.arg))
+ stop("n.arg must be supplied")
+
+ }), list( .n.arg=n.arg ))),
+ initialize=eval(substitute(expression({
+ narg = if(is.numeric(.n.arg)) .n.arg else extra$sumw
+ if(sum(w) > narg)
+ stop("sum(w) > narg")
+
+ predictors.names = namesof("lambda", .link.lambda, tag=FALSE)
+ if(!length(etastart)) {
+ lambda.init = rep(median(y), length=length(y))
+ etastart = theta2eta(lambda.init, .link.lambda)
+ }
+ if(length(extra)) {
+ extra$sumw = sum(w)
+ extra$narg = narg # For @inverse
+ } else
+ extra = list(sumw=sum(w), narg = narg)
+ }), list( .link.lambda=link.lambda, .n.arg=n.arg ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ lambda = eta2theta(eta, .link.lambda)
+ temp5 = exp(-lambda)
+ phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
+ if(any(phi) <= 0)
+ stop("non-positive value(s) of phi")
+ (1-phi) * lambda
+ }, list( .link.lambda=link.lambda ))),
+ last=eval(substitute(expression({
+ misc$link = c(lambda = .link.lambda)
+
+ if(ncol(x)==1 && dimnames(x)[[2]]=="(Intercept)") {
+ suma = extra$sumw
+ phi = (1 - temp5[1] - suma/narg) / (1 - temp5[1])
+ phi = if(phi < 0 || phi>1) NA else phi # phi is a probability
+ misc$phi = phi # zz call it $p0 = phi ??
+ }
+ }), list( .link.lambda=link.lambda ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ lambda = eta2theta(eta, .link.lambda)
+ lstar = -lambda + y * log(lambda) - log(1-exp(-lambda))
+ sum(w * lstar)
+ }, list( .link.lambda=link.lambda ))),
+ vfamily=c("yip88"),
+ deriv=eval(substitute(expression({
+ lambda = eta2theta(eta, .link.lambda)
+ temp5 = exp(-lambda)
+ dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
+ dlambda.deta = dtheta.deta(lambda, .link.lambda)
+ w * dl.dlambda * dlambda.deta
+ }), list( .link.lambda=link.lambda ))),
+ weight=eval(substitute(expression({
+ d2lambda.deta2 = d2theta.deta2(lambda, .link.lambda)
+ d2l.dlambda2 = -y / lambda^2 + temp5 / (1-temp5)^2
+ -w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
+ }), list( .link.lambda=link.lambda ))))
+}
+
+
+
+
+zapoisson = function(lp0="logit", llambda="loge")
+{
+ if(mode(lp0) != "character" && mode(lp0) != "name")
+ lp0 = as.character(substitute(lp0))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+
+ new("vglmff",
+ blurb=c(
+ "Zero-altered Poisson (binomial and positive-Poisson conditional model)\n\n",
+ "Links: ",
+ namesof("p0", lp0, tag=FALSE), ", ",
+ namesof("lambda", llambda, tag=FALSE),
+ "\n"),
+ initialize=eval(substitute(expression({
+ y = as.matrix(y)
+ extra$y0 = y0 = ifelse(y==0, 1, 0)
+ extra$ymat = ymat = cbind(y0=y0, y=y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+
+ mynames1 = if(ncoly==1) "p0" else paste("p0", 1:ncoly, sep="")
+ mynames2 = if(ncoly==1) "lambda" else paste("lambda", 1:ncoly, sep="")
+ predictors.names = c(namesof(mynames1, .lp0, tag=FALSE),
+ namesof(mynames2, .llambda, tag=FALSE))
+ if(!length(etastart)) {
+ etastart = cbind(theta2eta((0.5 + w * y0) / (1 + w), .lp0),
+ matrix(1, n, NOS)) # 1 here is any old value
+ for(spp. in 1:NOS)
+ etastart[!skip.these[,spp.],NOS+spp.] =
+ theta2eta(y[!skip.these[,spp.],spp.] /
+ (1-exp(-y[!skip.these[,spp.],spp.])), .llambda)
+ }
+ }), list( .lp0=lp0, .llambda=llambda ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ NOS = extra$NOS
+ p0 = eta2theta(eta[,1:NOS], .lp0)
+ lambda = eta2theta(eta[,NOS+(1:NOS)], .llambda)
+ (1-p0) * (lambda / (1-exp(-lambda)))
+ }, list( .lp0=lp0, .llambda=llambda ))),
+ last=eval(substitute(expression({
+ misc$link = c(rep( .lp0, len=NOS), rep( .llambda, len=NOS))
+ names(misc$link) = c(mynames1, mynames2)
+ }), list( .lp0=lp0, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ NOS = extra$NOS
+ p0 = cbind(eta2theta(eta[,1:NOS], .lp0))
+ skip = extra$skip.these
+ lambda = cbind(eta2theta(eta[,NOS+(1:NOS)], .llambda))
+ ans = 0
+ for(spp. in 1:NOS) {
+ ans = ans + sum(w[skip[,spp.]] * log(p0[skip[,spp.],spp.])) +
+ sum(w[!skip[,spp.]] * (log(1-p0[!skip[,spp.],spp.]) -
+ log(1-exp(-lambda[!skip[,spp.],spp.])) -
+ lambda[!skip[,spp.],spp.] +
+ y[!skip[,spp.],spp.]*log(lambda[!skip[,spp.],spp.])))
+ }
+ ans
+ }, list( .lp0=lp0, .llambda=llambda ))),
+ vfamily=c("zapoisson"),
+ deriv=eval(substitute(expression({
+ NOS = extra$NOS
+ y0 = extra$y0
+ skip = extra$skip.these
+ p0 = cbind(eta2theta(eta[,1:NOS], .lp0))
+ lambda = cbind(eta2theta(eta[,NOS+(1:NOS)], .llambda))
+ dl.dlambda = y/lambda - 1 - 1/(exp(lambda)-1)
+ for(spp. in 1:NOS)
+ dl.dlambda[skip[,spp.],spp.] = 0
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ mup0 = p0
+ temp3 = if(.lp0 == "logit") {
+ w * (y0 - mup0)
+ } else
+ w * dtheta.deta(mup0, link=.lp0) * (y0/mup0 - 1) / (1-mup0)
+ ans = cbind(temp3, w * dl.dlambda * dlambda.deta)
+ ans
+ }), list( .lp0=lp0, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ wz = matrix( .Machine$double.eps^0.8, n, 2*NOS)
+ for(spp. in 1:NOS) {
+ temp4 = exp(lambda[!skip[,spp.],spp.])
+ ed2l.dlambda2 = -temp4 * (1/lambda[!skip[,spp.],spp.] -
+ 1/(temp4-1)) / (temp4-1)
+ wz[!skip[,spp.],NOS+spp.] = -w[!skip[,spp.]] *
+ (dlambda.deta[!skip[,spp.],spp.]^2) *
+ ed2l.dlambda2
+ }
+
+ tmp100 = mup0*(1-mup0)
+ tmp200 = if(.lp0 == "logit") {
+ cbind(w * tmp100)
+ } else {
+ cbind(w * dtheta.deta(mup0, link= .lp0)^2 / tmp100)
+ }
+ for(ii in 1:NOS) {
+ index200 = abs(tmp200[,ii]) < .Machine$double.eps
+ if(any(index200)) {
+ tmp200[index200,ii] = .Machine$double.eps # Diagonal 0's are bad
+ }
+ }
+ wz[,1:NOS] = tmp200
+ wz
+ }), list( .lp0=lp0, .llambda=llambda ))))
+}
+
+
+
+zanegbinomial = function(lp0="logit", lmunb = "loge", lk = "loge",
+ ik = 1, zero = -3, cutoff = 0.995, method.init=3)
+{
+
+ if(!is.Numeric(cutoff, positiv=TRUE, allow=1) || cutoff<0.8 || cutoff>=1)
+ stop("range error in the argument cutoff")
+ if(!is.Numeric(ik, positiv=TRUE))
+ stop("ik must contain positive values only")
+ if(!is.Numeric(method.init, allow=1, integ=TRUE, posit=TRUE) ||
+ method.init > 3) stop("argument \"method.init\" must be 1, 2 or 3")
+
+ if(mode(lmunb) != "character" && mode(lmunb) != "name")
+ lmunb = as.character(substitute(lmunb))
+ if(mode(lk) != "character" && mode(lk) != "name")
+ lk = as.character(substitute(lk))
+ if(mode(lp0) != "character" && mode(lp0) != "name")
+ lp0 = as.character(substitute(lp0))
+
+ new("vglmff",
+ blurb=c("Zero-altered negative binomial (binomial and\n",
+ "positive-negative binomial conditional model)\n\n",
+ "Links: ",
+ namesof("p0", lp0, tag=FALSE), ", ",
+ namesof("munb", lmunb), ", ",
+ namesof("k", lk), "\n",
+ "Mean: (1-p0) * munb / [1 - (k/(k+munb))^k]"),
+ constraints=eval(substitute(expression({
+ temp752 = .zero
+ if(length(temp752) && all(temp752 == -3))
+ temp752 = 3*(1:ncol(y))
+ constraints = cm.zero.vgam(constraints, x, temp752, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ y = as.matrix(y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ M = 3 * ncoly #
+
+ mynames1 = if(NOS==1) "p0" else paste("p0", 1:NOS, sep="")
+ mynames2 = if(NOS==1) "munb" else paste("munb", 1:NOS, sep="")
+ mynames3 = if(NOS==1) "k" else paste("k", 1:NOS, sep="")
+ predictors.names = c(namesof(mynames1, .lp0, tag= FALSE),
+ namesof(mynames2, .lmunb, tag= FALSE),
+ namesof(mynames3, .lk, tag= FALSE))
+ predictors.names = predictors.names[interleave.VGAM(3*NOS, M=3)]
+ extra$y0 = y0 = ifelse(y==0, 1, 0)
+ extra$ymat = ymat = cbind(y0=y0, y=y)
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+
+ if(!length(etastart)) {
+ if( .method.init == 3) {
+ mu.init = y + 1/16
+ } else {
+ mu.init = y
+ for(iii in 1:ncol(y))
+ mu.init[,iii] = if( .method.init == 2)
+ weighted.mean(y[,iii], w=w) else
+ median(rep(y[,iii], w)) + 1/8
+ }
+ kmat0 = matrix( .ik, nrow(y), ncoly, byrow=TRUE) # Initial kmat
+ pnb0 = (kmat0 / (kmat0 + mu.init))^kmat0
+ etastart = cbind(theta2eta((0.5 + w * y0) / (1 + w), .lp0),
+ theta2eta(mu.init*(1-pnb0), .lmunb),
+ theta2eta(kmat0, .lk))
+ etastart = etastart[,interleave.VGAM(ncol(etastart),M=3)]
+ }
+ }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk, .ik=ik,
+ .method.init=method.init ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ NOS = extra$NOS
+ p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0) # p(0) from logistic regression
+ munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk)
+ pnb0 = (kmat / (kmat + munb))^kmat # p(0) from negative binomial
+ (1 - p0) * munb / (1 - pnb0)
+ }, list( .lp0=lp0, .lk=lk, .lmunb=lmunb ))),
+ last=eval(substitute(expression({
+ misc$link = c(rep( .lp0, length=NOS), rep( .lmunb, length=NOS),
+ rep( .lk, length=NOS))
+ names(misc$link) = c(mynames1, mynames2, mynames3)
+ misc$cutoff = .cutoff
+ misc$method.init = .method.init
+ }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk, .cutoff=cutoff,
+ .method.init=method.init ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta,extra=NULL) {
+ NOS = extra$NOS
+ p0 = eta2theta(eta[,3*(1:NOS)-2,drop=FALSE], .lp0)
+ munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk)
+ skip = extra$skip.these
+ pnb0 = (kmat / (kmat + munb))^kmat
+ ans = 0.0
+ for(spp. in 1:NOS) {
+ i8 = skip[,spp.]
+ ans = ans + sum(w[i8] * log(p0[i8,spp.])) +
+ sum(w[!i8] * (log(1-p0[!i8,spp.]) + y[!i8,spp.] *
+ log(munb[!i8,spp.]/(munb[!i8,spp.]+
+ kmat[!i8,spp.])) + kmat[!i8,spp.]*log(kmat[!i8,spp.] /
+ (munb[!i8,spp.]+kmat[!i8,spp.])) +
+ lgamma(y[!i8,spp.]+kmat[!i8,spp.]) -
+ lgamma(kmat[!i8,spp.]) - lgamma(y[!i8,spp.]+1) -
+ (if(is.R())
+ log1p(-pnb0[!i8,spp.]) else log(1 - pnb0[!i8,spp.]))))
+ }
+ ans
+ }, list( .lp0=lp0, .lmunb=lmunb, .lk=lk ))),
+ vfamily=c("zanegbinomial"),
+ deriv=eval(substitute(expression({
+ NOS = extra$NOS
+ y0 = extra$y0
+
+ p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0) # p(0) from logistic regression
+ munb = eta2theta(eta[,3*(1:NOS)-1,drop=FALSE], .lmunb)
+ kmat = eta2theta(eta[,3*(1:NOS),drop=FALSE], .lk)
+ skip = extra$skip.these
+
+ d3 = deriv3(~ -log(1 - (kmat. /(kmat. + munb. ))^kmat. ),
+ c("munb.", "kmat."), hessian= TRUE) # Extra term
+ dl0.dthetas = array(NA, c(n, NOS, 2))
+ d2l0.dthetas2 = array(NA, c(n, NOS, 3)) # matrix-band format
+ for(spp. in 1:NOS) {
+ kmat. = kmat[,spp.]
+ munb. = munb[,spp.]
+ eval.d3 = eval(d3) # Evaluated for one species
+ dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[,1]
+ dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[,2]
+ d2l0.dthetas2[,spp.,1] = attr(eval.d3, "hessian")[,1,1]
+ d2l0.dthetas2[,spp.,2] = attr(eval.d3, "hessian")[,2,2]
+ d2l0.dthetas2[,spp.,3] = attr(eval.d3, "hessian")[,1,2]
+ }
+ dl.dmunb = y/munb - (y+kmat)/(kmat+munb) + dl0.dthetas[,,1]
+ dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(munb+kmat) + 1 +
+ log(kmat/(kmat+munb)) + dl0.dthetas[,,2]
+ for(spp. in 1:NOS)
+ dl.dk[skip[,spp.],spp.] = dl.dmunb[skip[,spp.],spp.] = 0
+
+ dmunb.deta = dtheta.deta(munb, .lmunb)
+ dk.deta = dtheta.deta(kmat, .lk)
+ myderiv = w * cbind(dl.dmunb * dmunb.deta, dl.dk * dk.deta)
+
+ mup0 = p0
+ temp3 = if(.lp0 == "logit") {
+ w * (y0 - mup0)
+ } else
+ w * dtheta.deta(mup0, link=.lp0) * (y0/mup0 - 1) / (1-mup0)
+
+ ans = cbind(temp3, myderiv)
+ ans = ans[,interleave.VGAM(ncol(ans), M=3)]
+ ans
+ }), list( .lp0=lp0, .lmunb=lmunb, .lk=lk ))),
+ weight=eval(substitute(expression({
+ wz = matrix(0, n, 6*NOS-1) # wz is not 'diagonal'
+ pnb0 = (kmat / (kmat + munb))^kmat
+ ed2l.dmunb2 = (1/munb - (munb + kmat*(1-pnb0))/(munb+kmat)^2) /
+ (1-pnb0) - d2l0.dthetas2[,,1]
+ fred = dotFortran(name="enbin8",
+ ans=double(n*NOS), as.double(kmat),
+ as.double(kmat/(munb+kmat)), as.double(.cutoff),
+ as.integer(n), ok=as.integer(1), as.integer(NOS),
+ sumpdf=double(1), macheps=as.double(.Machine$double.eps))
+ if(fred$ok != 1)
+ stop("error in Fortran subroutine exnbin")
+ dim(fred$ans) = c(n, NOS)
+ ed2l.dk2 = -fred$ans/(1-pnb0) - 1/kmat + 1/(kmat+munb) -
+ munb * pnb0 / ((1-pnb0)*(munb+kmat)^2) - d2l0.dthetas2[,,2]
+ wz[,3*(1:NOS)-1] = w * dmunb.deta^2 * ed2l.dmunb2
+ wz[,3*(1:NOS)] = w * dk.deta^2 * ed2l.dk2
+
+ wz[,3*NOS+3*(1:NOS)-1] = -w * d2l0.dthetas2[,,3] * dmunb.deta * dk.deta
+
+ tmp100 = mup0*(1-mup0)
+ tmp200 = if(.lp0 == "logit") {
+ cbind(w * tmp100)
+ } else {
+ cbind(w * dtheta.deta(mup0, link= .lp0)^2 / tmp100)
+ }
+ for(ii in 1:NOS) {
+ index200 = abs(tmp200[,ii]) < .Machine$double.eps
+ if(any(index200)) {
+ tmp200[index200,ii] = .Machine$double.eps # Diagonal 0's are bad
+ }
+ }
+ wz[,3*(1:NOS)-2] = tmp200
+
+ for(spp. in 1:NOS) {
+ wz[skip[,spp.],3*spp. - 1] =
+ wz[skip[,spp.],3*spp.] = .Machine$double.eps^0.5
+ wz[skip[,spp.],3*NOS+3*(spp.)-1] = 0
+ }
+ wz
+ }), list( .lp0=lp0, .cutoff=cutoff ))))
+}
+
+
+rposnegbin = function(n, munb, k) {
+ if(!is.Numeric(k, posit=TRUE))
+ stop("argument \"k\" must be positive")
+ if(!is.Numeric(munb, posit=TRUE))
+ stop("argument \"munb\" must be positive")
+ if(!is.Numeric(n, posit=TRUE, integ=TRUE, allow=1))
+ stop("argument \"n\" must be a positive integer")
+ ans = rnbinom(n=n, mu=munb, size=k)
+ munb = rep(munb, len=n)
+ k = rep(k, len=n)
+ index = ans == 0
+ while(any(index)) {
+ more = rnbinom(n=sum(index), mu=munb[index], size=k[index])
+ ans[index] = more
+ index = ans == 0
+ }
+ ans
+}
+
+
+
+zipoisson = function(lphi="logit", llambda="loge", iphi=NULL, zero=NULL)
+{
+ if(mode(lphi) != "character" && mode(lphi) != "name")
+ lphi = as.character(substitute(lphi))
+ if(mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if(is.Numeric(iphi))
+ if(!is.Numeric(iphi, allow=1, posit=TRUE) || iphi >= 1)
+ stop("iphi must be a single number inside the interval (0,1)")
+
+ new("vglmff",
+ blurb=c("Zero-inflated Poisson\n\n",
+ "Links: ", namesof("phi", lphi), ", ",
+ namesof("lambda", llambda), "\n",
+ "Mean: (1-phi)*lambda"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ if(ncol(as.matrix(y)) != 1) stop("multivariate responses not allowed")
+ predictors.names = c( namesof("phi", .lphi, tag=FALSE),
+ namesof("lambda", .llambda, tag=FALSE))
+ if(!length(etastart)) {
+ phi.init = if(length( .iphi)) .iphi else {
+ sum(w[y==0]) / sum(w)
+ }
+ if(phi.init <= 0 || phi.init >=1) phi.init = 0.1 # Last resort
+ lambda.init = y + 1/8
+ etastart = cbind(theta2eta(rep(phi.init, len=n), .lphi),
+ theta2eta(lambda.init, .llambda))
+ }
+ }), list( .lphi=lphi, .llambda=llambda, .iphi=iphi ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ phi = eta2theta(eta[,1], .lphi)
+ lambda = eta2theta(eta[,2], .llambda)
+ (1-phi) * lambda
+ }, list( .lphi=lphi, .llambda=llambda ))),
+ last=eval(substitute(expression({
+ misc$link <- c("phi" = .lphi, "lambda" = .llambda)
+ if(intercept.only) {
+ phi = eta2theta(eta[1,1], .lphi)
+ lambda = eta2theta(eta[1,2], .llambda)
+ misc$prob0 = phi + (1-phi) * exp(-lambda) # P(Y=0)
+ }
+ }), list( .lphi=lphi, .llambda=llambda ))),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ phi = eta2theta(eta[,1], .lphi)
+ lambda = eta2theta(eta[,2], .llambda)
+ index = (y==0)
+ tmp8 = phi + (1-phi)*exp(-lambda)
+ ell0 = log(tmp8[index])
+ ell1 = log((1-phi[!index]) * dpois(y[!index], lambda= lambda[!index]))
+ sum(w[index] * ell0) + sum(w[!index] * ell1)
+ }, list( .lphi=lphi, .llambda=llambda ))),
+ vfamily=c("zipoisson"),
+ deriv=eval(substitute(expression({
+ phi = eta2theta(eta[,1], .lphi)
+ lambda = eta2theta(eta[,2], .llambda)
+ tmp8 = phi + (1-phi)*exp(-lambda)
+ index = (y==0)
+ dl.dphi = (1-exp(-lambda)) / tmp8
+ dl.dphi[!index] = -1 / (1-phi[!index])
+ dl.dlambda = -(1-phi) * exp(-lambda) / tmp8
+ dl.dlambda[!index] = (y[!index] - lambda[!index]) / lambda[!index]
+ dphi.deta = dtheta.deta(phi, .lphi)
+ dlambda.deta = dtheta.deta(lambda, .llambda)
+ ans = w * cbind(dl.dphi * dphi.deta, dl.dlambda * dlambda.deta)
+ if(.llambda == "loge" && (any(lambda[!index] < .Machine$double.eps))) {
+ ans[!index,2] = w[!index] * (y[!index] - lambda[!index])
+ }
+ ans
+ }), list( .lphi=lphi, .llambda=llambda ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), nrow=n, ncol=dimm(M))
+ d2l.dphi2 = (1-exp(-lambda)) / ((1-phi)*tmp8)
+ d2l.dlambda2 = (1-phi)/lambda - phi*(1-phi)*exp(-lambda) / tmp8
+ d2l.dphilambda = -exp(-lambda) / tmp8
+ wz[,iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
+ wz[,iam(2,2,M)] = d2l.dlambda2 * dlambda.deta^2
+ wz[,iam(1,2,M)] = d2l.dphilambda * dphi.deta * dlambda.deta
+ if(.llambda == "loge" && (any(lambda[!index] < .Machine$double.eps))) {
+ ind5 = !index & (lambda < .Machine$double.eps)
+ if(any(ind5))
+ wz[ind5,iam(2,2,M)] = (1-phi[ind5]) * .Machine$double.eps
+ }
+ w * wz
+ }), list( .lphi=lphi, .llambda=llambda ))))
+}
+
+
+
+
+zibinomial = function(lphi="logit", link.mu="logit",
+ iphi=NULL, zero=1, mv=FALSE)
+{
+ if(as.logical(mv)) stop("argument \"mv\" must be FALSE")
+ if(mode(lphi) != "character" && mode(lphi) != "name")
+ lphi = as.character(substitute(lphi))
+ if(mode(link.mu) != "character" && mode(link.mu) != "name")
+ link.mu = as.character(substitute(link.mu))
+ if(is.Numeric(iphi))
+ if(!is.Numeric(iphi, allow=1, posit=TRUE) || iphi >= 1)
+ stop("iphi must be a single number inside the interval (0,1)")
+
+ new("vglmff",
+ blurb=c("Zero-inflated binomial\n\n",
+ "Links: ", namesof("phi", lphi), ", ",
+ namesof("mu", link.mu), "\n",
+ "Mean: (1-phi) * mu / (1 - (1-mu)^w)"),
+ constraints=eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero=zero ))),
+ initialize=eval(substitute(expression({
+ {
+ NCOL = function (x)
+ if(is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+
+ if(NCOL(y) == 1) {
+ if(is.factor(y)) y = y != levels(y)[1]
+ nn = rep(1, n)
+ if(!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ mustart = (0.5 + w * y) / (1 + w)
+ no.successes = w * y
+ if(any(abs(no.successes - round(no.successes)) > 0.001))
+ stop("Number of successes must be integer-valued")
+ } else if(NCOL(y) == 2) {
+ if(any(abs(y - round(y)) > 0.001))
+ stop("Count data must be integer-valued")
+ nn = y[,1] + y[,2]
+ y = ifelse(nn > 0, y[,1]/nn, 0)
+ w = w * nn
+ mustart = (0.5 + nn * y) / (1 + nn)
+ } else
+ stop("Response not of the right form (1 or 2 columns required)")
+ }
+
+ predictors.names = c( namesof("phi", .lphi, tag=FALSE),
+ namesof("mu", .link.mu, tag=FALSE))
+ phi.init = if(length( .iphi)) .iphi else {
+ sum(w[y==0]) / sum(w)
+ }
+ if(phi.init <= 0 || phi.init >=1) phi.init = 0.1 # Last resort
+ mustart = cbind(rep(phi.init, len=n), mustart) # 1st coln not a real mu
+ }), list( .lphi=lphi, .link.mu=link.mu, .iphi=iphi ))),
+ inverse=eval(substitute(function(eta, extra=NULL) {
+ phi = eta2theta(eta[,1], .lphi)
+ mubin = eta2theta(eta[,2], .link.mu)
+ (1-phi) * mubin
+ }, list( .lphi=lphi, .link.mu=link.mu ))),
+ last=eval(substitute(expression({
+ misc$link <- c("phi" = .lphi, "mu" = .link.mu)
+ if(intercept.only && all(w==w[1])) {
+ phi = eta2theta(eta[1,1], .lphi)
+ mubin = eta2theta(eta[1,2], .link.mu)
+ misc$p0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
+ }
+ }), list( .lphi=lphi, .link.mu=link.mu ))),
+ link=eval(substitute(function(mu, extra=NULL)
+ cbind(theta2eta(mu[,1], .lphi), theta2eta(mu[,2], .link.mu))
+ , list( .lphi=lphi, .link.mu=link.mu) )),
+ loglikelihood=eval(substitute(
+ function(mu,y,w,residuals=FALSE, eta, extra=NULL) {
+ phi = eta2theta(eta[,1], .lphi)
+ mubin = eta2theta(eta[,2], .link.mu)
+ index = (y==0)
+ tmp8 = phi + (1-phi)*(1-mubin)^w
+ ell0 = log(tmp8[index])
+ ell1 = log(1-phi[!index]) + dbinom(x=round(w[!index]*y[!index]),
+ size=w[!index], prob=mubin[!index], log=TRUE)
+ sum(ell0) + sum(ell1)
+ }, list( .lphi=lphi, .link.mu=link.mu ))),
+ vfamily=c("zibinomial"),
+ deriv=eval(substitute(expression({
+ phi = eta2theta(eta[,1], .lphi)
+ mubin = eta2theta(eta[,2], .link.mu)
+ prob0 = (1-mubin)^w # Actually q^w
+ tmp8 = phi + (1-phi)*prob0
+ index = (y==0)
+ dl.dphi = (1-prob0) / tmp8
+ dl.dphi[!index] = -1 / (1-phi[!index])
+ dl.dmubin = -w * (1-phi) * (1-mubin)^(w-1) / tmp8
+ dl.dmubin[!index] = w[!index] * (y[!index]/mubin[!index] -
+ (1-y[!index]) / (1-mubin[!index]))
+ dphi.deta = dtheta.deta(phi, .lphi)
+ dmubin.deta = dtheta.deta(mubin, .link.mu)
+ ans = cbind(dl.dphi * dphi.deta, dl.dmubin * dmubin.deta)
+ if(.link.mu == "logit") {
+ ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
+ }
+ ans
+ }), list( .lphi=lphi, .link.mu=link.mu ))),
+ weight=eval(substitute(expression({
+ wz = matrix(as.numeric(NA), nrow=n, ncol=dimm(M))
+ d2l.dphi2 = (1-mubin^w) / ((1-phi) * tmp8)
+ d2l.dmubin2 = w * (1-phi) * ((1 - mubin * (1-mubin)^(w-1)) /
+ (mubin*(1-mubin)) - mubin^(w-2) * (w*phi-tmp8) / tmp8)
+ d2l.dphimubin = -w * (1-mubin)^(w-1) / tmp8
+ wz[,iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
+ wz[,iam(2,2,M)] = d2l.dmubin2 * dmubin.deta^2
+ wz[,iam(1,2,M)] = d2l.dphimubin * dphi.deta * dmubin.deta
+ if(TRUE) {
+ ind6 = wz[,iam(2,2,M)] < .Machine$double.eps
+ if(any(ind6))
+ wz[ind6,iam(2,2,M)] = .Machine$double.eps
+ }
+ wz
+ }), list( .lphi=lphi, .link.mu=link.mu ))))
+}
+
+
+
+dzibinom = function(x, size, prob, log = FALSE, phi=0) {
+ L = max(length(x), length(size), length(prob), length(phi))
+ x = rep(x, len=L); size = rep(size, len=L);
+ prob = rep(prob, len=L); phi = rep(phi, len=L);
+ ans = dbinom(x, size, prob, log=log)
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ifelse(x==0, phi + (1-phi) * ans, (1-phi) * ans)
+}
+
+pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
+ ans = pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
+ phi = rep(phi, length=length(ans))
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ phi + (1-phi) * ans
+}
+
+qzibinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi=0) {
+ nn = max(length(p), length(size), length(prob), length(phi))
+ p = rep(p, len=nn)
+ size = rep(size, len=nn)
+ prob = rep(prob, len=nn)
+ phi = rep(phi, len=nn)
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ans = p
+ ans[p<=phi] = 0
+ ans[p>phi] = qbinom((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), size[p>phi],
+ prob[p>phi], lower.tail = lower.tail, log.p = log.p)
+ ans
+}
+
+rzibinom = function(n, size, prob, phi=0) {
+ if(!is.Numeric(n, positive=TRUE, integer=TRUE, allow=1))
+ stop("n must be a single positive integer")
+ ans = rbinom(n, size, prob)
+ phi = rep(phi, len=length(ans))
+ if(!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
+ stop("phi must be between 0 and 1 inclusive")
+ ifelse(runif(n) < phi, 0, ans)
+}
+
+
diff --git a/R/fitted.vlm.q b/R/fitted.vlm.q
new file mode 100644
index 0000000..d08cfd5
--- /dev/null
+++ b/R/fitted.vlm.q
@@ -0,0 +1,79 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+fitted.vlm <- function(object, matrix=TRUE, ...)
+{
+
+ answer =
+ if(matrix)
+ object at fitted.values else
+ {
+ if(!is.matrix(object at fitted.values) || !length(object at fitted.values))
+ stop("object at fitted.values is not a matrix or is empty")
+ if(ncol(object at fitted.values) == 1)
+ c(object at fitted.values) else {
+ warning("ncol(object at fitted.values) is not 1")
+ c(object at fitted.values)
+ }
+ }
+
+ if(length(answer) && length(object at na.action)) {
+ napredict(object at na.action[[1]], answer)
+ } else {
+ answer
+ }
+}
+
+setMethod("fitted.values", "vlm",
+ function(object, ...)
+ fitted.vlm(object, ...))
+
+setMethod("fitted", "vlm",
+ function(object, ...)
+ fitted.vlm(object, ...))
+
+setMethod("fitted.values", "vglm",
+ function(object, ...)
+ fitted.vlm(object, ...))
+
+setMethod("fitted", "vglm",
+ function(object, ...)
+ fitted.vlm(object, ...))
+
+
+predictors.vglm <- function(object, matrix=TRUE, ...)
+{
+ answer =
+ if(matrix)
+ object at predictors else
+ {
+ if(!is.matrix(object at predictors) || !length(object at predictors))
+ stop("object at predictors is not a matrix or is empty")
+ if(ncol(object at predictors) == 1)
+ c(object at predictors) else {
+ warning("ncol(object at predictors) is not 1")
+ c(object at predictors)
+ }
+ }
+
+ if(length(answer) && length(object at na.action)) {
+ napredict(object at na.action[[1]], answer)
+ } else {
+ answer
+ }
+}
+
+
+if(!isGeneric("predictors"))
+ setGeneric("predictors", function(object, ...) standardGeneric("predictors"))
+
+setMethod("predictors", "vglm",
+ function(object, ...)
+ predictors.vglm(object, ...))
+
+
+
+
+
diff --git a/R/generic.q b/R/generic.q
new file mode 100644
index 0000000..2a40412
--- /dev/null
+++ b/R/generic.q
@@ -0,0 +1,38 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+add1.vgam <- function(...)
+ stop("no add1 method implemented for vgam() models (yet)")
+alias.vgam <- function(...)
+ stop("no alias method implemented for vgam() models (yet)")
+anova.vgam <- function(...)
+ stop("no anova method implemented for vgam() models (yet)")
+drop1.vgam <- function(...)
+ stop("no drop1 method implemented for vgam() models (yet)")
+effects.vgam <- function(...)
+ stop("no effects method implemented for vgam() models (yet)")
+proj.vgam <- function(...)
+ stop("no proj method implemented for vgam() models (yet)")
+step.vgam <- function(...)
+ stop("no step method implemented for vgam() models (yet)")
+update.vgam <- function(...)
+ stop("no update method implemented for vgam() models (yet)")
+
+add1.vglm <- function(...)
+ stop("no add1 method implemented for vglm() models (yet)")
+alias.vglm <- function(...)
+ stop("no alias method implemented for vglm() models (yet)")
+anova.vglm <- function(...)
+ stop("no anova method implemented for vglm() models (yet)")
+drop1.vglm <- function(...)
+ stop("no drop1 method implemented for vglm() models (yet)")
+plot.vglm <- function(...)
+ stop("no plot method implemented for vglm() models (yet)")
+proj.vglm <- function(...)
+ stop("no proj method implemented for vglm() models (yet)")
+step.vglm <- function(...)
+ stop("no step method implemented for vglm() models (yet)")
+update.vglm <- function(...)
+ stop("no update method implemented for vglm() models (yet)")
+
diff --git a/R/links.q b/R/links.q
new file mode 100644
index 0000000..8782a99
--- /dev/null
+++ b/R/links.q
@@ -0,0 +1,1105 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+
+
+TypicalVGAMlinkFunction <- function(theta,
+ earg=list(), inverse=FALSE, deriv=0, short=TRUE, tag=FALSE) {
+ NULL
+}
+
+
+
+namesof <- function(theta,
+ link,
+ earg=list(),
+ tag=FALSE,
+ short=TRUE)
+{
+
+
+ string <- paste(link,
+ "(theta=theta, earg=earg, short=short, tag=tag)", sep="")
+ calls <- parse(text=string)[[1]]
+ ans <- eval(calls)
+ return(ans)
+}
+
+theta2eta <- function(theta, link, earg=list()) {
+ string <- paste(link, "(theta=theta, earg=earg)", sep="")
+ calls <- parse(text=string)[[1]]
+ eval(calls)
+}
+
+
+
+
+eta2theta <- function(theta, link="identity", earg=list()) {
+ if(is.null(link))
+ link <- "identity"
+
+
+
+ llink <- length(link)
+ if(llink == 1) {
+ string <- paste(link, "(theta=theta, earg=earg, inverse=TRUE)", sep="")
+ calls <- parse(text=string)[[1]]
+ return(eval(calls))
+ } else
+ if(llink > 1) {
+ if(is.matrix(theta) && llink == ncol(theta)) {
+
+
+
+ ans <- NULL
+ for(iii in 1:llink) {
+ use.earg = if(is.list(earg) && length(earg)==llink &&
+ is.list(earg[[iii]])) earg[[iii]] else earg
+ string = paste(link[iii],
+ "(theta=theta[,iii], earg=use.earg, inverse=TRUE)",
+ sep="")
+ calls <- parse(text=string)[[1]]
+ ans <- cbind(ans, eval(calls))
+ }
+ } else {
+ if(length(theta) < llink)
+ theta = rep(theta, len=llink)
+
+ if(length(theta) != llink)
+ stop("length of theta and link don't match")
+
+ ans <- NULL
+ for(iii in 1:llink) {
+ string = paste(link[iii],
+ "(theta=theta[iii], earg=earg, inverse=TRUE)",
+ sep="")
+ calls <- parse(text=string)[[1]]
+ ans <- c(ans, eval(calls))
+ }
+ }
+ return(ans)
+ } else
+ stop("length(link)==0 not allowed")
+}
+
+
+
+dtheta.deta <- function(theta, link, earg=list()) {
+
+ string <- paste(link, "(theta=theta, earg=earg, deriv=1)", sep="")
+ calls <- parse(text=string)[[1]]
+ eval(calls)
+}
+
+
+d2theta.deta2 <- function(theta, link, earg=list())
+{
+
+ string <- paste(link, "(theta=theta, earg=earg, deriv=2)", sep="")
+ calls <- parse(text=string)[[1]]
+ eval(calls)
+}
+
+
+
+
+
+.all.links = c("cloglog",
+ "fisherz", "fsqrt", "identity", "inverse",
+ "logc", "loge", "logit", "loglog",
+ "logoff", "nreciprocal", "nloge",
+ "powl", "probit", "reciprocal", "rhobit",
+ "golf", "polf", "nbolf")
+
+
+loglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("loglog(",theta,")", sep="") else
+ paste("log(log(",theta,"))", sep="")
+ if(tag)
+ string <- paste("Log-Log:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg))
+ theta[theta <= 1.0] <- earg$bval
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ exp(exp(theta))
+ }
+ } else {
+ switch(deriv+1, {
+ log(log(theta))},
+ theta * log(theta),
+ { junk <- log(theta)
+ -junk^2 / (1 + junk)
+ },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+
+
+cloglog <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("cloglog(",theta,")", sep="") else
+ paste("log(-log(1-",theta,"))", sep="")
+ if(tag)
+ string <- paste("Complementary log-log:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta <= 0.0] <- earg$bval
+ theta[theta >= 1.0] <- 1.0 - earg$bval
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ junk <- exp(theta)
+ 1 - exp(-junk)
+ }
+ } else {
+ switch(deriv+1, {
+ log(-log(1-theta))},
+ -(1-theta) * log(1-theta),
+ { junk <- log(1 - theta)
+ -(1-theta) * (1 + junk) * junk
+ },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+
+
+probit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("probit(",theta,")", sep="") else
+ paste("qnorm(", theta, ")", sep="")
+ if(tag)
+ string <- paste("Probit:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta <= 0.0] <- earg$bval
+ theta[theta >= 1.0] <- 1-earg$bval
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ ans <- pnorm(theta)
+ if(is.matrix(theta))
+ dim(ans) <- dim(theta)
+ ans
+ }
+ } else {
+ switch(deriv+1,{
+ ans <- qnorm(theta)
+ if(is.matrix(theta))
+ dim(ans) <- dim(theta)
+ ans
+ },
+ {
+ if(is.matrix(theta)) {
+ ans <- dnorm(qnorm(theta))
+ dim(ans) <- dim(theta)
+ ans
+ } else dnorm(qnorm(as.vector(theta)))
+ },
+ {
+ junk <- qnorm(theta)
+ ans <- -junk * dnorm(junk)
+ if(is.vector(theta)) ans else
+ if(is.matrix(theta)) {
+ dim(ans) <- dim(theta)
+ ans
+ } else {
+ warning("can only handle vectors and matrices; converting to vector")
+ ans
+ }
+ })
+ }
+}
+
+
+
+
+
+
+
+
+loge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("log(",theta,")", sep="") else
+ paste("log(", theta, ")", sep="")
+ if(tag)
+ string <- paste("Log:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg))
+ theta[theta <= 0.0] <- earg$bval
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ exp(theta)
+ }
+ } else {
+ switch(deriv+1, {
+ log(theta)},
+ theta,
+ theta)
+ }
+}
+
+
+
+
+identity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- theta
+ if(tag)
+ string <- paste("Identity:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ theta
+ }
+ } else {
+ switch(deriv+1,
+ theta,
+ theta*0 + 1,
+ theta*0)
+ }
+}
+
+nidentity <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- paste("-", theta, sep="")
+ if(tag)
+ string <- paste("Negative-Identity:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ -theta
+ }
+ } else {
+ switch(deriv+1,
+ -theta,
+ theta*0 - 1,
+ theta*0)
+ }
+}
+
+
+reciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- paste("1/",theta, sep="")
+ if(tag)
+ string <- paste("Reciprocal:", string)
+ return(string)
+ }
+ if(!inverse.arg && is.list(earg) && length(earg))
+ theta[theta == 0.0] <- earg$bval
+ if(inverse.arg) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse.arg=FALSE, deriv)
+ } else {
+ 1/theta
+ }
+ } else {
+ switch(deriv+1,{
+ 1/theta},
+ -theta^2,
+ 2*theta^3)
+ }
+}
+
+
+nloge <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("-log(",theta,")", sep="") else
+ paste("-log(", theta, ")", sep="")
+ if(tag)
+ string <- paste("Negative log:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg))
+ theta[theta <= 0.0] <- earg$bval
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ exp(-theta)
+ }
+ } else {
+ switch(deriv+1, {
+ -log(theta)},
+ -theta,
+ theta)
+ }
+}
+
+
+
+nreciprocal <- function(theta, earg=list(), inverse.arg=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- paste("-1/",theta, sep="")
+ if(tag)
+ string <- paste("Negative reciprocal:", string)
+ return(string)
+ }
+ if(!inverse.arg && is.list(earg) && length(earg))
+ theta[theta == 0.0] <- earg$bval
+ if(inverse.arg) {
+ if(deriv>0) {
+ 1 / nreciprocal(theta, earg=earg, inverse.arg=FALSE, deriv)
+ } else {
+ -1/theta
+ }
+ } else {
+ switch(deriv+1, {
+ -1/theta},
+ theta^2,
+ 2*theta^3)
+ }
+}
+
+
+natural.ig <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+
+ if(is.character(theta)) {
+ string <- paste("-1/",theta, sep="")
+ if(tag)
+ string <- paste("Negative inverse:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1 / nreciprocal(theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ 1/ sqrt(-2*theta)
+ }
+ } else {
+ switch(deriv+1,
+ -1/(2*theta^2),
+ theta^3,
+ 3*theta^5)
+ }
+}
+
+
+
+
+
+rhobit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("rhobit(",theta,")", sep="") else
+ paste("log((1+", theta, ")/(1-", theta, "))", sep="")
+ if(tag)
+ string <- paste("Rhobit:", string)
+ return(string)
+ }
+
+ if(!inverse && is.list(earg) && length(earg)) {
+ bminvalue = if(length(earg$bminval)) earg$bminval else NULL
+ bmaxvalue = if(length(earg$bmaxval)) earg$bmaxval else NULL
+ if(!inverse && length(bminvalue)) theta[theta <= -1.0] <- bminvalue
+ if(!inverse && length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ junk <- exp(theta)
+ (junk-1.0) / (junk+1.0)
+ }
+ } else {
+ switch(deriv+1,{
+ log((1+theta)/(1-theta))},
+ (1 - theta^2) / 2,
+ (1 - theta^2)^2 / (4*theta))
+ }
+}
+
+
+
+fisherz <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("fisherz(",theta,")", sep="") else
+ paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep="")
+ if(tag)
+ string <- paste("Fisher's Z transformation:", string)
+ return(string)
+ }
+
+ if(!inverse && is.list(earg) && length(earg)) {
+ bminvalue = if(length(earg$bminval)) earg$bminval else NULL
+ bmaxvalue = if(length(earg$bmaxval)) earg$bmaxval else NULL
+ if(!inverse && length(bminvalue)) theta[theta <= -1.0] <- bminvalue
+ if(!inverse && length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ junk <- exp(2*theta)
+ (junk-1.0)/(junk+1.0)
+ }
+ } else {
+ switch(deriv+1,
+ 0.5 * log((1.0+theta)/(1.0-theta)),
+ 1.0 - theta^2,
+ (1.0 - theta^2)^2 / (2*theta))
+ }
+}
+
+
+
+
+fsqrt <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("fsqrt(",theta,")", sep="") else
+ paste("sqrt(2*",theta,") - sqrt(2*(1-",theta,"))", sep="")
+ if(tag)
+ string <- paste("Folded Square Root:", string)
+ return(string)
+ }
+
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta <= 0.0] <- earg$bval
+ theta[theta >= 1.0] <- 1.0 - earg$bval
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv=deriv)
+ } else {
+ temp <- theta * sqrt(4-theta^2) / 4
+ ans <- 0.5 - temp
+ ans[ans<0] <- 0.5 + temp[ans<0]
+ ans[ans>1] <- 0.5 + temp[ans>1]
+ ans
+ }
+ } else {
+ switch(deriv+1,
+ sqrt(2*theta) - sqrt(2*(1-theta)),
+ 1/(1/sqrt(2*theta) + 1/sqrt(2*(1-theta))),
+ -sqrt(8) / (theta^(-3/2) - (1-theta)^(-3/2)))
+ }
+}
+
+
+
+powl <- function(theta, earg=list(power=1), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+
+ if(!length(earg) || is.list(earg)) {
+ exponent = if(length(earg$power)) earg$power else 1
+ if(exponent == 0)
+ stop("use the 'loge' link")
+ } else {
+ stop("'earg' must be a list or NULL")
+ }
+
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("powl(",theta,",", as.character(exponent), ")", sep="") else
+ paste(theta, "^(", as.character(exponent), ")", sep="")
+ if(tag)
+ string <- paste("Power:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ theta^(1/exponent)
+ }
+ } else {
+ switch(deriv+1,
+ {
+ theta^exponent
+ },
+ {
+ (theta^(1-exponent)) / exponent
+ },
+ {
+ (theta^(2-exponent)) / (exponent * (exponent-1))
+ })
+ }
+}
+
+
+elogit <- function(theta, earg=list(min=0, max=1), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(!length(earg) || is.list(earg)) {
+ A = if(length(earg$min)) earg$min else 0
+ B = if(length(earg$max)) earg$max else 1
+ bminvalue = if(length(earg$bminval)) earg$bminval else NULL
+ bmaxvalue = if(length(earg$bmaxval)) earg$bmaxval else NULL
+ if(!inverse && length(bminvalue)) theta[theta <= A] <- bminvalue
+ if(!inverse && length(bmaxvalue)) theta[theta >= B] <- bmaxvalue
+ } else {
+ stop("'earg' must be a list or NULL")
+ }
+ if(is.character(theta)) {
+ string <- if(short) {
+ if(A != 0 || B != 1)
+ paste("elogit(",theta,", earg=list(min=",A,
+ ", max=",B,"))",sep="") else
+ paste("elogit(",theta,")",sep="")
+ } else
+ paste("log((",theta,"-min)/(max-",theta,"))", sep="")
+ if(tag)
+ string <- paste("Extended logit:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ junk <- if(is.R()) care.exp(theta) else care.exp(theta)
+ (A + B*junk) / (1 + junk)
+ }
+ } else {
+ switch(deriv+1, {
+ log((theta-A)/(B-theta))},
+ (theta-A) * (B - theta) / (B-A),
+ (theta-A) * (B - theta) * (B - 2 * theta + A) / (B-A)^2)
+ }
+}
+
+
+
+logit <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("logit(",theta,")", sep="") else
+ paste("log(",theta,"/(1-",theta,"))", sep="")
+ if(tag)
+ string <- paste("Logit:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta <= 0.0] <- earg$bval;
+ theta[theta >= 1.0] <- 1.0 - earg$bval;
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ eta <- care.exp(theta)
+ eta / (1 + eta)
+ }
+ } else {
+ switch(deriv+1, {
+ log(theta/(1-theta))},
+ theta * (1 - theta),
+ theta * (1 - theta) * (1 - 2 * theta))
+ }
+}
+
+
+logc <- function(theta, earg=list(), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("logc(",theta,")", sep="") else
+ paste("log(1-",theta,")", sep="")
+ if(tag)
+ string <- paste("Log Complementary:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta >= 1.0] <- earg$bval;
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ 1 - exp(theta)
+ }
+ } else {
+ switch(deriv+1,{
+ log(1-theta)},
+ -(1 - theta),
+ -(1 - theta)^2)
+ }
+}
+
+
+
+logoff <- function(theta, earg=list(offset=0), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(!length(earg) || is.list(earg)) {
+ offset = if(length(earg$offset)) earg$offset else 0
+ } else {
+ stop("'earg' must be a list or NULL")
+ }
+
+ if(!is.Numeric(offset))
+ stop("bad input for argument 'earg'")
+
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("logoff(",theta,", ",as.character(offset),")", sep="") else
+ paste("log(", as.character(offset), "+", theta, ")", sep="")
+ if(tag)
+ string <- paste("Log with offset:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv=deriv)
+ } else {
+ exp(theta) - offset
+ }
+ } else {
+ switch(deriv+1,
+ log(theta+offset),
+ theta + offset,
+ theta + offset)
+ }
+}
+
+
+if(FALSE)
+nlogoff <- function(theta, earg=0, inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ offset = earg
+ if(!is.Numeric(offset))
+ stop("bad input for argument earg")
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("nlogoff(",theta,",",as.character(offset),")", sep="") else
+ paste("log(", as.character(offset), "-", theta, ")", sep="")
+ if(tag)
+ string <- paste("Negative-log with offset:", string)
+ return(string)
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv=deriv)
+ } else {
+ offset - exp(theta)
+ }
+ } else {
+ switch(deriv+1,
+ log(-theta+offset),
+ theta - offset,
+ theta - offset)
+ }
+}
+
+
+
+cauchit <- function(theta, earg=list(bvalue= .Machine$double.eps),
+ inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("cauchit(",theta,")", sep="") else
+ paste("tan(pi*(",theta,"-0.5))", sep="")
+ if(tag)
+ string <- paste("Cauchit:", string)
+ return(string)
+ }
+ if(!inverse && is.list(earg) && length(earg)) {
+ theta[theta <= 0.0] <- earg$bval
+ theta[theta >= 1.0] <- 1.0 - earg$bval
+ }
+ if(inverse) {
+ if(deriv>0) {
+ 1/Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ 0.5 + atan(theta)/pi
+ }
+ } else {
+ switch(deriv+1, {
+ tan(pi * (theta-0.5))},
+ cos(pi * (theta-0.5))^2 / pi,
+ -sin(2 * pi * (theta-0.5)))
+ }
+}
+
+
+
+golf <- function(theta, earg=list(lambda=1), inverse=FALSE, deriv=0,
+ short=TRUE, tag=FALSE)
+{
+
+
+ cutpoint = lambda = NULL
+ if(!length(earg)) {
+ lambda = 1
+ cutpoint = NULL
+ } else if(is.list(earg)) {
+ lambda = earg$lambda
+ cutpoint = earg$cutpoint # Optional; if so then is a NULL
+ } else
+ stop("'earg' must be a list")
+ if(!is.Numeric(lambda, posit=TRUE))
+ stop('could not determine lambda or lambda has negative values')
+
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("golf(",theta,")", sep="") else {
+ if(is.Numeric(cutpoint)) {
+ paste("-3*log(1-qnorm(",theta,")/(3*sqrt(lambda)))",
+ " + log(cutpoint)", sep="")
+ } else {
+ paste("-3*log(1-qnorm(",theta,")/(3*sqrt(lambda)))", sep="")
+ }
+ }
+ if(tag)
+ string <- paste("Gamma-ordinal link function:", string)
+ return(string)
+ }
+
+ thmat = cbind(theta)
+ lambda = rep(lambda, len=ncol(thmat)) # Allow recycling for lambda
+ if(is.Numeric(cutpoint)) cutpoint = rep(cutpoint, len=ncol(thmat))
+ if(length(lambda) != ncol(thmat))
+ stop(paste("'lambda' should be of length", ncol(thmat)))
+ if(ncol(thmat) > 1) {
+ answer = thmat
+ for(ii in 1:ncol(thmat))
+ answer[,ii] = Recall(theta=thmat[,ii],
+ earg=list(lambda=lambda[ii],
+ cutpoint=if(is.Numeric(cutpoint)) cutpoint[ii] else NULL),
+ inverse=inverse, deriv=deriv)
+ return(answer)
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ if(is.Numeric(cutpoint)) {
+ pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
+ } else {
+ pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
+ }
+ }
+ } else {
+ smallno = 1 * .Machine$double.eps
+ Theta = theta
+ Ql = qnorm(Theta)
+ switch(deriv+1, {
+ temp = Ql / (3*sqrt(lambda))
+ temp = pmin(temp, 1.0 - smallno) # 100 / .Machine$double.eps
+ -3*log(1-temp) + if(is.Numeric(cutpoint)) log(cutpoint) else 0},
+ (1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql),
+ { stop('cannot handle deriv=2') },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+polf <- function(theta, earg=stop("'earg' must be given"),
+ inverse=FALSE, deriv=0, short=TRUE, tag=FALSE)
+{
+ if(ncol(cbind(theta)) > 1) {
+ }
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("polf(",theta,")", sep="") else
+ paste("2*log(0.5*qnorm(",theta,") + sqrt(cutpoint+7/8))", sep="")
+ if(tag)
+ string <- paste("Poisson-ordinal link function:", string)
+ return(string)
+ }
+
+ cutpoint = NULL
+ if(is.Numeric(earg)) cutpoint = earg
+ if(is.list(earg)) cutpoint = earg$cutpoint
+ if(!is.Numeric(cutpoint))
+ stop('could not determine the cutpoint')
+
+ thmat = cbind(theta)
+ if(ncol(thmat) > 1) {
+ answer = thmat
+ cutpoint = rep(cutpoint, len=ncol(thmat)) # Reqd for the for loop
+ for(ii in 1:ncol(thmat))
+ answer[,ii] = Recall(theta=thmat[,ii], earg=cutpoint[ii],
+ inverse=inverse, deriv=deriv)
+ return(answer)
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ pnorm(2 * care.exp(theta/2) - 2 * sqrt(cutpoint + 7/8))
+ }
+ } else {
+ smallno = 1 * .Machine$double.eps
+ SMALLNO = 1 * .Machine$double.xmin
+ Theta = theta
+ Ql = qnorm(Theta)
+ switch(deriv+1, {
+ temp = 0.5 * Ql + sqrt(cutpoint + 7/8)
+ temp = pmax(temp, SMALLNO)
+ 2 * log(temp)},
+ (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql),
+ { stop('cannot handle deriv=2') },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+nbolf <- function(theta, earg=stop("'earg' must be given"),
+ inverse=FALSE, deriv=0, short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("nbolf(",theta,")", sep="") else
+ paste("2*log(sqrt(k) * sinh(qnorm(",theta,")/(2*sqrt(k)) + ",
+ "asinh(sqrt(cutpoint/k))))", sep="")
+ if(tag)
+ string <- paste("Negative binomial-ordinal link function:", string)
+ return(string)
+ }
+
+ cutpoint = kay = NULL
+ if(is.list(earg)) {
+ cutpoint = earg$cutpoint
+ kay = earg$k
+ }
+ if(!is.Numeric(cutpoint))
+ stop("could not determine the cutpoint")
+ if(!is.Numeric(kay))
+ stop("could not determine 'k'")
+
+ thmat = cbind(theta)
+ kay = rep(kay, len=ncol(thmat)) # Allow recycling for kay
+ if(length(cutpoint) != ncol(thmat))
+ stop(paste("'cutpoint' should be of length", ncol(thmat)))
+ if(ncol(thmat) > 1) {
+ answer = thmat
+ for(ii in 1:ncol(thmat))
+ answer[,ii] = Recall(theta=thmat[,ii],
+ earg=list(cutpoint=cutpoint[ii], k=kay[ii]),
+ inverse=inverse, deriv=deriv)
+ return(answer)
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+ pnorm((asinh(care.exp(theta/2)/sqrt(kay)) -
+ asinh(sqrt(cutpoint/kay))) * 2 * sqrt(kay))
+ }
+ } else {
+ smallno = 1 * .Machine$double.eps
+ SMALLNO = 1 * .Machine$double.xmin
+ Theta = theta
+ Ql = qnorm(Theta)
+ switch(deriv+1, {
+ temp = sqrt(kay) * sinh(Ql/(2*sqrt(kay)) +
+ asinh(sqrt(cutpoint/kay)))
+ temp = pmax(temp, SMALLNO)
+ 2 * log(temp)}, {
+ arg1 = (Ql/(2*sqrt(kay)) + asinh(sqrt(cutpoint/kay)))
+ sqrt(kay) * tanh(arg1) * dnorm(Ql) },
+ { stop('cannot handle deriv=2') },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+
+
+
+nbolf2 <- function(theta, earg=stop("'earg' must be given"),
+ inverse=FALSE, deriv=0, short=TRUE, tag=FALSE)
+{
+ if(is.character(theta)) {
+ string <- if(short)
+ paste("nbolf2(",theta,")", sep="") else
+ paste("3*log(<a complicated expression>)", sep="")
+ if(tag)
+ string = paste("Negative binomial-ordinal link function 2:", string)
+ return(string)
+ }
+
+ cutpoint = kay = NULL
+ if(is.list(earg)) {
+ cutpoint = earg$cutpoint
+ kay = earg$k
+ }
+ if(!is.Numeric(cutpoint))
+ stop("could not determine the cutpoint")
+ if(!is.Numeric(kay))
+ stop("could not determine 'k'")
+
+ thmat = cbind(theta)
+ kay = rep(kay, len=ncol(thmat)) # Allow recycling for kay
+ if(length(cutpoint) != ncol(thmat))
+ stop(paste("'cutpoint' should be of length", ncol(thmat)))
+ if(ncol(thmat) > 1) {
+ answer = thmat
+ for(ii in 1:ncol(thmat))
+ answer[,ii] = Recall(theta=thmat[,ii],
+ earg=list(cutpoint=cutpoint[ii], k=kay[ii]),
+ inverse=inverse, deriv=deriv)
+ return(answer)
+ }
+
+ if(inverse) {
+ if(deriv>0) {
+ 1 / Recall(theta=theta, earg=earg, inverse=FALSE, deriv)
+ } else {
+
+ a1 = -(9*cutpoint+8) / (cutpoint+1)
+ a2 = (9*kay-1) / (kay * (cutpoint+1)^(1/3))
+ a3 = 9 / (kay * (cutpoint+1)^(2/3))
+ a4 = 9 / (cutpoint+1)
+ B = care.exp(theta/3)
+ mymat = rbind(a1^2*a2^2 + 2*a1*a2^3*B + B^2*a2^4, 0,
+ -2*a1*a2*a3*B - 2*a2^2*a3*B^2 - a1^2*a3 - a2^2*a4, 0,
+ B^2 * a3^2 + a3 * a4)
+ ans = Re(t(apply(mymat, 2, polyroot)))
+ theta2 = invfun = pnorm(-ans) # pnorm(-x) = 1-pnorm(x)
+ for(ii in 1:4) {
+ theta2[,ii] = Recall(theta=theta2[,ii],
+ earg=list(cutpoint=cutpoint, k=kay),
+ inverse=FALSE, deriv=deriv)
+ }
+ rankmat = t(apply(abs(theta2 - theta), 1, rank))
+ for(ii in 2:4) {
+ if(any(index4 <- (rankmat[,ii] == 1))) {
+ invfun[index4,1] = invfun[index4,ii]
+ }
+ }
+ invfun[,1]
+ }
+ } else {
+ smallno = 1 * .Machine$double.eps
+ SMALLNO = 1 * .Machine$double.xmin
+ Theta = theta
+ Ql = qnorm(Theta)
+ a1 = -(9*cutpoint+8) / (cutpoint+1)
+ a2 = (9*kay-1) / (kay * (cutpoint+1)^(1/3))
+ a3 = 9 / (kay * (cutpoint+1)^(2/3))
+ a4 = 9 / (cutpoint+1)
+ discrim = a1^2 * a3 + a2^2 * a4 - Ql^2 * a3 * a4
+ denomin = Ql^2 * a3 - a2^2
+ numerat = (a1*a2 - Ql * sqrt(discrim))
+ argmax1 = numerat / denomin
+ switch(deriv+1, {
+ argmax2 = (a1*a2 + Ql * sqrt(discrim)) / denomin
+ temp = ifelse(argmax1 > 0, argmax1, argmax2)
+ temp = pmax(temp, SMALLNO)
+ 3 * log(temp)}, {
+ BB = (sqrt(discrim) - Ql^2 * a3 * a4 / sqrt(discrim)) / dnorm(Ql)
+ CC = 2 * Ql * a3 / dnorm(Ql)
+ dA.dtheta = (-denomin * BB - numerat * CC) / denomin^2
+ argmax1 / (3 * dA.dtheta)
+ },
+ { stop('cannot handle deriv=2') },
+ stop("'deriv' unmatched"))
+ }
+}
+
+
+
+Cut = function(y, breaks=c(-Inf, quantile(c(y), prob = (1:4)/4))) {
+ y = as.matrix(y)
+
+
+ temp = cut(y, breaks=breaks, labels=FALSE)
+ temp = c(temp) # integer vector of integers
+ if(any(is.na(temp))) stop("there are NAs")
+ answer = if(ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
+ if(ncol(y) > 1) {
+ ynames = dimnames(y)[[2]]
+ if(!length(ynames)) ynames = paste("Y", 1:ncol(y), sep="")
+ xnames = dimnames(y)[[1]]
+ if(!length(xnames)) xnames = as.character(1:nrow(y))
+ dimnames(answer) = list(xnames, ynames)
+ }
+ attr(answer, "breaks") = breaks
+ answer
+}
+
+
+checkCut = function(y) {
+ if(!is.Numeric(y, posi=TRUE, integ=TRUE))
+ stop("'y' must contain positive integers only")
+ uy = unique(y)
+ L = max(uy)
+ oklevels = 1:L
+ if(L == 1) stop("only one unique value")
+ for(ii in oklevels) {
+ if(all(ii != uy)) stop(paste("there is no", ii, "value"))
+ }
+ TRUE
+}
+
+
+
+
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
new file mode 100644
index 0000000..52628f7
--- /dev/null
+++ b/R/logLik.vlm.q
@@ -0,0 +1,32 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+logLik.vlm <- function(object, ...)
+ object at criterion$loglikelihood
+
+if(!isGeneric("logLik"))
+ setGeneric("logLik", function(object, ...) standardGeneric("logLik"))
+
+setMethod("logLik", "vlm", function(object, ...)
+ logLik.vlm(object, ...))
+
+
+
+
+if(TRUE) {
+constraints.vlm <- function(object, all=TRUE, which, ...)
+ if(all) slot(object, "constraints") else
+ slot(object, "constraints")[[which]]
+
+if(!isGeneric("constraints"))
+setGeneric("constraints", function(object, ...) standardGeneric("constraints"))
+setMethod("constraints", "vlm", function(object, ...)
+ constraints.vlm(object, ...))
+
+}
+
+
+
+
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
new file mode 100644
index 0000000..cbe7f9c
--- /dev/null
+++ b/R/model.matrix.vglm.q
@@ -0,0 +1,346 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+vlabel <- function(xn, ncolBlist, M, separator=":") {
+
+ if(length(xn) != length(ncolBlist))
+ stop("length of first two arguments not equal")
+
+ n1 <- rep(xn, ncolBlist)
+ if(M==1)
+ return(n1)
+ n2 <- as.list(ncolBlist)
+ n2 <- lapply(n2, seq)
+ n2 <- unlist(n2)
+ n2 <- as.character(n2)
+ n2 <- paste(separator, n2, sep="")
+ n3 <- rep(ncolBlist, ncolBlist)
+ n2[n3==1] <- ""
+ n1n2 <- paste(n1, n2, sep="")
+ n1n2
+}
+
+
+lm2vlm.model.matrix <- function(x, Blist=NULL, assign.attributes=TRUE,
+ M=NULL, xij=NULL, Aarray=NULL, Aindex=NULL)
+{
+
+
+
+ if(length(Blist) != ncol(x))
+ stop("length(Blist) != ncol(x)")
+
+ if(length(xij)) {
+ if(inherits(xij, "formula"))
+ xij = list(xij)
+ if(!is.list(xij))
+ stop("xij is not a list of formulae")
+ }
+
+ if(!is.numeric(M))
+ M <- nrow(Blist[[1]])
+
+ if(length(xij)) {
+ Blist.NAed = Blist
+ atx = attr(x, "assign")
+ for(i in 1:length(xij)) {
+ form = xij[[i]]
+ if(length(form) != 3)
+ stop(paste("xij[[", i, "]] is not a formula with a response"))
+ tform = terms(form)
+ atform = attr(tform, "term.labels") # doesn't include response
+ if(length(atform) != M) {
+ stop(paste("xij[[", i, "]] does not contain", M, " terms"))
+ }
+ for(k in 1:length(atform)) {
+ for(s in atx[[(atform[[k]])]]) {
+ if(length(Blist[[s]])) {
+ Blist[[s]] = ei(k, M) # Easy for later
+ Blist.NAed[[s]] = Blist[[s]] * NA # NA'ed
+ }
+ }
+ }
+ }
+ }
+ n <- nrow(x)
+ if(all(trivial.constraints(Blist)) && !length(Aarray)) {
+ xbig <- if(M > 1) kronecker(x, diag(M)) else x
+ ncolBlist <- rep(M, ncol(x))
+ } else {
+ allB <- matrix(unlist(Blist), nrow=M)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ R <- sum(ncolBlist)
+
+ X1 <- rep(c(t(x)), rep(ncolBlist,n))
+ dim(X1) <- c(R, n)
+ BB <- kronecker(matrix(1,n,1), allB)
+ if(length(Aarray)) {
+ tmp34 = aperm(Aarray, c(1,3,2)) # c(M,n,r)
+ for(ii in 1:length(Aindex))
+ BB[,Aindex[[ii]]] = c(tmp34)
+ }
+ xbig <- kronecker(t(X1), matrix(1,M,1)) * BB
+ }
+
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+ dimnames(xbig) <- list(vlabel(yn, rep(M, n), M),
+ vlabel(xn, ncolBlist, M))
+
+ if(assign.attributes) {
+
+ attr(xbig, "contrasts") <- attr(x, "contrasts")
+ attr(xbig, "factors") <- attr(x, "factors")
+ attr(xbig, "formula") <- attr(x, "formula")
+ attr(xbig, "class") <- attr(x, "class")
+ attr(xbig, "order") <- attr(x, "order")
+ attr(xbig, "term.labels") <- attr(x, "term.labels")
+
+
+ nasgn <- oasgn <- attr(x, "assign")
+ low <- 0
+ for(i in 1:length(oasgn)) {
+ len <- length(oasgn[[i]]) * ncolBlist[oasgn[[i]][1]]
+ nasgn[[i]] <- (low+1):(low+len)
+ low = low + len
+ }
+ if(low != ncol(xbig))
+ stop("something gone wrong")
+ attr(xbig, "assign") <- nasgn
+
+
+ fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
+ vasgn <- vector("list", sum(fred))
+ k <- 0
+ for(i in 1:length(oasgn)) {
+ temp <- matrix(nasgn[[i]], ncol=length(oasgn[[i]]))
+ for(j in 1:nrow(temp)) {
+ k <- k + 1
+ vasgn[[k]] <- temp[j,]
+ }
+ }
+ names(vasgn) <- vlabel(names(oasgn), fred, M)
+ attr(xbig, "vassign") <- vasgn
+
+
+ attr(xbig, "constraints") <- Blist
+ }
+
+
+ xasgn <- attr(x, "assign")
+
+ if(length(xij)) {
+ rm.col.index = NULL # Remove these columns from xbig
+ for(i in 1:length(xij)) {
+ form = xij[[i]] # deparse(form1[[3]])
+ tform = terms(form)
+ atform = attr(tform, "term.labels") # doesn't include response
+ response.name = (dimnames(attr(tform, "factors"))[[1]])[1]
+
+ ptr0 = NULL
+ for(s in 1:M)
+ if(length(nasgn[[atform[s]]])) {
+ ptr0 = s
+ break
+ }
+ if(!is.numeric(ptr0)) stop("no destination column indices")
+ dest.col.index = nasgn[[atform[ptr0]]]
+
+ if(M > 1)
+ for(k in ((1:M)[-ptr0])) {
+ from.col.index = nasgn[[atform[k]]] # May be NULL
+ if(length(from.col.index)) {
+ xbig[,dest.col.index] = xbig[,dest.col.index] +
+ xbig[,from.col.index]
+
+ rm.col.index = c(rm.col.index, from.col.index)
+
+ vasgn[[atform[k]]] = NULL # Delete it
+ }
+ }
+
+ d2 = dimnames(xbig)[[2]]
+ d2[dest.col.index] = vlabel(response.name,
+ length(dest.col.index), M=M, separator="")
+ dimnames(xbig) = list(dimnames(xbig)[[1]], d2)
+
+ ptr = (1:length(names(vasgn)))[(names(vasgn)==atform[[ptr0]])]
+ names(vasgn)[ptr] = response.name
+
+ }
+
+ if(length(rm.col.index))
+ xbig = xbig[,-rm.col.index,drop=FALSE] # Delete the columns in 1 go
+
+ if(assign.attributes) {
+ attr(xbig, "constraints") <- Blist.NAed # Not quite right
+ attr(xbig, "vassign") <- vasgn
+ attr(xbig, "assign") <- nasgn
+ attr(xbig, "xij") <- xij
+ }
+
+ }
+
+
+ xbig
+}
+
+
+model.matrixvlm = function(object, type=c("vlm","lm"), ...) {
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("vlm","lm"))[1]
+
+ x <- slot(object, "x")
+ if(!length(x)) {
+ data = model.frame(object, xlev=object at xlevels, ...)
+
+ kill.con = if(length(object at contrasts)) object at contrasts else NULL
+
+ x <- vmodel.matrix.default(object, data=data,
+ contrasts.arg = kill.con)
+ if(is.R()) {
+
+if(TRUE) {
+ attrassigndefault <- function(mmat, tt) {
+ if (!inherits(tt, "terms"))
+ stop("need terms object")
+ aa <- attr(mmat, "assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll <- attr(tt, "term.labels")
+ if (attr(tt, "intercept") > 0)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ split(order(aa), aaa)
+ }
+}
+ tt = terms(object)
+ attr(x, "assign") <- attrassigndefault(x, tt)
+ }
+ }
+
+
+
+ if(type == "lm") {
+ return(x)
+ } else {
+ M <- object at misc$M
+ Blist <- object at constraints # Is NULL if there were no constraints?
+ lm2vlm.model.matrix(x=x, Blist=Blist, xij=object at control$xij)
+ }
+}
+
+
+
+
+setMethod("model.matrix", "vlm", function(object, ...)
+ model.matrixvlm(object, ...))
+
+
+
+
+
+ if(is.R()) {
+
+model.framevlm = function(object, ...) {
+
+ dots <- list(...)
+ nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
+ if(length(nargs) || !length(object at model)) {
+ fcall <- object at call
+ fcall$method <- "model.frame"
+ fcall[[1]] <- as.name("vlm")
+
+ fcall$smart <- FALSE
+ if(length(object at smart.prediction)) {
+ setup.smart("read", smart.prediction=object at smart.prediction)
+ }
+
+ fcall[names(nargs)] <- nargs
+ env <- environment(object at terms$terms) # @terms or @terms$terms ??
+ if (is.null(env))
+ env <- parent.frame()
+ ans = eval(fcall, env, parent.frame())
+
+ if(length(object at smart.prediction)) {
+ wrapup.smart()
+ }
+
+ ans
+ } else object at model
+}
+
+
+if(!isGeneric("model.frame"))
+ setGeneric("model.frame", function(formula, ...)
+ standardGeneric("model.frame"))
+
+setMethod("model.frame", "vlm", function(formula, ...)
+ model.framevlm(object=formula, ...))
+
+}
+
+
+
+
+vmodel.matrix.default = function (object, data = environment(object),
+ contrasts.arg = NULL, xlev = NULL, ...) {
+
+ t <- terms(object)
+ if (is.null(attr(data, "terms")))
+ data <- model.frame(object, data, xlev = xlev) else
+ {
+ reorder <- match(sapply(attr(t, "variables"), deparse,
+ width.cutoff = 500)[-1], names(data))
+ if (any(is.na(reorder)))
+ stop("model frame and formula mismatch in model.matrix()")
+ data <- data[, reorder, drop = FALSE]
+ }
+ int <- attr(t, "response")
+ if (length(data)) {
+ contr.funs <- as.character(getOption("contrasts"))
+ isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
+ isF[int] <- FALSE
+ isOF <- sapply(data, is.ordered)
+ namD <- names(data)
+ for (nn in namD[isF]) if (is.null(attr(data[[nn]], "contrasts")))
+ contrasts(data[[nn]]) <- contr.funs[1 + isOF[nn]]
+ if (!is.null(contrasts.arg) && is.list(contrasts.arg)) {
+ if (is.null(namC <- names(contrasts.arg)))
+ stop("invalid contrasts argument")
+ for (nn in namC) {
+ if (is.na(ni <- match(nn, namD)))
+ warning(paste("Variable", nn,
+ "absent, contrast ignored")) else {
+ ca <- contrasts.arg[[nn]]
+ if (is.matrix(ca))
+ contrasts(data[[ni]], ncol(ca)) <- ca else
+ contrasts(data[[ni]]) <- contrasts.arg[[nn]]
+ }
+ }
+ }
+ } else {
+ isF <- FALSE
+ data <- list(x = rep(0, nrow(data)))
+ }
+ ans <- .Internal(model.matrix(t, data))
+ cons <- if (any(isF))
+ lapply(data[isF], function(x) attr(x, "contrasts")) else NULL
+ attr(ans, "contrasts") <- cons
+ ans
+}
+
+
+
+
+
+
+
+
diff --git a/R/mux.q b/R/mux.q
new file mode 100644
index 0000000..96db8c7
--- /dev/null
+++ b/R/mux.q
@@ -0,0 +1,349 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+mux2 <- function(cc, xmat)
+{
+
+
+ if(!is.matrix(xmat))
+ xmat <- as.matrix(xmat)
+ d <- dim(xmat)
+ n <- d[1]
+ p <- d[2]
+ if(is.matrix(cc))
+ cc <- array(cc, c(dim(cc),n))
+ d <- dim(cc)
+ M <- d[1]
+ if(d[2] != p || d[3] != n)
+ stop("dimension size inconformable")
+ ans <- rep(as.numeric(NA), n*M)
+ fred <- dotC(name="mux2", as.double(cc), as.double(t(xmat)),
+ ans=as.double(ans), as.integer(p), as.integer(n),
+ as.integer(M), NAOK=TRUE)
+ matrix(fred$ans,n,M,byrow=TRUE)
+}
+
+
+mux22 <- function(cc, xmat, M, upper=FALSE, as.matrix=FALSE)
+{
+
+ n <- ncol(cc)
+
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- nrow(cc) # Usually M or M(M+1)/2
+
+ ans <- rep(as.numeric(NA), n*M)
+ fred <- dotC(name="mux22", as.double(cc), as.double(t(xmat)),
+ ans=as.double(ans), as.integer(dimm.value),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(n), as.integer(M), wk=double(M*M),
+ as.integer(as.numeric(upper)), NAOK=TRUE)
+ if(!as.matrix) fred$ans else {
+ dim(fred$ans) <- c(M, n)
+ t(fred$ans)
+ }
+}
+
+
+
+mux5 <- function(cc, x, M, matrix.arg=FALSE)
+{
+
+
+
+ dimx <- dim(x)
+ dimcc <- dim(cc)
+ r <- dimx[2]
+
+ if(matrix.arg) {
+ n <- dimcc[1]
+ neltscci <- ncol(cc)
+ cc <- t(cc)
+ } else {
+ n <- dimcc[3]
+ if(dimcc[1]!=dimcc[2] || dimx[1]!=dimcc[1] ||
+ (length(dimx)==3 && dimx[3]!=dimcc[3]))
+ stop('input nonconformable')
+ neltscci <- M*(M+1)/2
+ }
+
+ if(is.matrix(x))
+ x <- array(x,c(M,r,n))
+ index.M <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ index.r <- iam(NA, NA, r, both=TRUE, diag=TRUE)
+
+ size <- if(matrix.arg) dimm(r)*n else r*r*n
+ fred <- dotC(name="mux5", as.double(cc), as.double(x), ans=double(size),
+ as.integer(M), as.integer(n), as.integer(r),
+ as.integer(neltscci),
+ as.integer(dimm(r)),
+ as.integer(as.numeric(matrix.arg)),
+ double(M*M), double(r*r),
+ as.integer(index.M$row), as.integer(index.M$col),
+ as.integer(index.r$row), as.integer(index.r$col),
+ ok3=as.integer(1), NAOK=TRUE)
+ if(fred$ok3 == 0) stop("can only handle matrix.arg == 1")
+
+
+ if(matrix.arg) {
+ ans <- fred$ans
+ dim(ans) <- c(dimm(r), n)
+ t(ans)
+ } else {
+ array(fred$ans, c(r,r,n))
+ }
+}
+
+
+mux55 <- function(evects, evals, M)
+{
+
+ d <- dim(evects)
+ n <- ncol(evals)
+ if(d[1]!=M || d[2]!=M || d[3]!=n || nrow(evals)!=M || ncol(evals)!=n)
+ stop("input nonconformable")
+ MM12 <- M*(M+1)/2 # The answer is a full-matrix
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+
+ fred <- dotC(name="mux55", as.double(evects), as.double(evals),
+ ans=double(MM12 * n),
+ double(M*M), double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(M), as.integer(n), NAOK=TRUE)
+ dim(fred$ans) <- c(MM12, n)
+ fred$ans
+}
+
+
+mux7 <- function(cc, x)
+{
+
+ dimx <- dim(x)
+ dimcc <- dim(cc)
+ if(dimx[1]!=dimcc[2] || (length(dimx)==3 && dimx[3]!=dimcc[3]))
+ stop('input nonconformable')
+ M <- dimcc[1]
+ qq <- dimcc[2]
+ n <- dimcc[3]
+ r <- dimx[2]
+ if(is.matrix(x))
+ x <- array(x,c(qq,r,n))
+
+ ans <- array(NA, c(M,r,n))
+ fred <- dotC(name="mux7", as.double(cc), as.double(x), ans=as.double(ans),
+ as.integer(M), as.integer(qq), as.integer(n),
+ as.integer(r), NAOK=TRUE)
+ array(fred$ans,c(M,r,n))
+}
+
+
+
+mux9 <- function(cc, xmat)
+{
+
+ if(is.vector(xmat))
+ xmat <- cbind(xmat)
+ dimxmat <- dim(xmat)
+ dimcc <- dim(cc)
+
+ if(dimcc[1]!=dimcc[2] || dimxmat[1]!=dimcc[3] || dimxmat[2]!=dimcc[1])
+ stop('input nonconformable')
+ M <- dimcc[1]
+ n <- dimcc[3]
+
+ ans <- matrix(as.numeric(NA),n,M)
+ fred <- dotC(name="mux9", as.double(cc), as.double(xmat), ans=as.double(ans),
+ as.integer(M), as.integer(n), NAOK=TRUE)
+ matrix(fred$ans,n,M)
+}
+
+
+mux11 <- function(cc, xmat)
+{
+
+
+ dcc <- dim(cc)
+ d <- dim(xmat)
+ M <- dcc[1]
+ R <- d[2]
+ n <- dcc[3]
+ if(M!=dcc[2] || d[1]!=n*M)
+ stop("input inconformable")
+
+ Xmat <- array(c(t(xmat)), c(R,M,n))
+ Xmat <- aperm(Xmat, c(2,1,3)) # Xmat becomes M x R x n
+ mat <- mux7(cc, Xmat) # mat is M x R x n
+ mat <- aperm(mat, c(2,1,3)) # mat becomes R x M x n
+ mat <- matrix(c(mat), n*M, R, byrow=TRUE)
+ mat
+}
+
+
+
+mux111 <- function(cc, xmat, M, upper=TRUE)
+{
+
+
+ R <- ncol(xmat)
+ n <- nrow(xmat) / M
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- nrow(cc) # M or M(M+1)/2
+
+ fred <- dotC(name="mux111", as.double(cc), b=as.double(t(xmat)), as.integer(M),
+ as.integer(R), as.integer(n), wk=double(M*M),
+ wk2=double(M*R), as.integer(index$row),
+ as.integer(index$col), as.integer(dimm.value),
+ as.integer(as.numeric(upper)), NAOK=TRUE)
+
+ ans <- fred$b
+ dim(ans) <- c(R, nrow(xmat))
+ d <- dimnames(xmat)
+ dimnames(ans) <- list(d[[2]], d[[1]])
+ t(ans)
+}
+
+
+
+mux15 <- function(cc, xmat)
+{
+
+ n <- nrow(xmat)
+ M <- ncol(xmat)
+ if(nrow(cc) != M || ncol(cc) != M)
+ stop("input inconformable")
+ if(max(abs(t(cc)-cc))>0.000001)
+ stop("cc not symmetric")
+
+ ans <- rep(as.numeric(NA),n*M*M)
+ fred <- dotC(name="mux15", as.double(cc), as.double(t(xmat)),
+ ans=as.double(ans), as.integer(M),
+ as.integer(n), NAOK=TRUE)
+ array(fred$ans,c(M,M,n))
+}
+
+
+
+
+
+vforsub <- function(cc, b, M, n)
+{
+
+
+
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- nrow(cc) # M or M(M+1)/2
+
+
+ fred <- dotC(name="vforsub", as.double(cc), b=as.double(t(b)),
+ as.integer(M), as.integer(n), wk=double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(dimm.value), NAOK=TRUE)
+
+ dim(fred$b) <- c(M, n)
+ fred$b
+}
+
+
+
+
+vbacksub <- function(cc, b, M, n)
+{
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ dimm.value <- nrow(cc)
+ if(nrow(b)!=M || ncol(b)!=n)
+ stop("dimension size inconformable")
+
+ fred <- dotC(name="vbacksub", as.double(cc), b=as.double(b),
+ as.integer(M), as.integer(n), wk=double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(dimm.value), NAOK=TRUE)
+
+ if(M==1) fred$b else {
+ dim(fred$b) <- c(M,n)
+ t(fred$b)
+ }
+}
+
+
+vchol <- function(cc, M, n, silent=FALSE)
+{
+
+
+
+ index <- iam(NA, NA, M, both=TRUE, diag=TRUE)
+ cc <- t(cc)
+ MM <- nrow(cc) # cc is big enough to hold its Cholesky decom.
+
+ fred <- dotC(name="vchol", cc=as.double(cc), as.integer(M),
+ as.integer(n), ok=integer(n),
+ wk=double(M*M), as.integer(index$row),
+ as.integer(index$col),
+ as.integer(MM),
+ NAOK=TRUE)
+
+ failed <- fred$ok != 1
+ if((correction.needed <- any(failed))) {
+ index <- (1:n)[failed]
+ if(!silent) {
+ if(length(index) < 11)
+ warning(paste("weight matri", ifelse(length(index)>1, "ces ","x "),
+ paste(index, collapse=", "), " not positive-definite", sep=""))
+ }
+ }
+
+ ans <- fred$cc
+ dim(ans) <- c(MM, n)
+
+ if(correction.needed) {
+ temp <- cc[,index,drop=FALSE]
+ tmp777 <- vchol.greenstadt(temp, M=M, silent=silent)
+
+
+ if(length(index)==n) {
+ ans=tmp777[1:nrow(ans),,drop=FALSE] # was tmp777 prior to 7/3/03
+ } else {
+
+
+ ans[,index] <- tmp777 # restored 16/10/03
+ }
+ }
+ dim(ans) <- c(MM, n) # Make sure
+
+ ans
+}
+
+
+
+vchol.greenstadt <- function(cc, M, silent=FALSE)
+{
+
+
+
+
+ MM <- dim(cc)[1]
+ n <- dim(cc)[2]
+
+ if(!silent)
+ cat(paste("Applying Greenstadt modification to", n, "matrices\n"))
+
+ temp <- veigen(cc, M=M) # , mat=TRUE)
+ dim(temp$vectors) <- c(M,M,n) # Make sure (when M=1) for mux5
+ dim(temp$values) <- c(M,n) # Make sure (when M=1) for mux5
+
+ zero <- temp$values==0
+ neg <- temp$values<0
+ pos <- temp$values>0
+
+ temp$values <- abs(temp$values)
+
+ small.value <- quantile(temp$values[!zero], prob=0.15)
+ temp$values[zero] <- small.value
+
+ temp3 <- mux55(temp$vectors, temp$values, M=M) # , matrix.arg=TRUE)
+ ans <- vchol(t(temp3), M=M, n=n, silent=silent) # , matrix.arg=TRUE)
+ ans
+}
+
+
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
new file mode 100644
index 0000000..4ba388f
--- /dev/null
+++ b/R/plot.vglm.q
@@ -0,0 +1,936 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+if(!exists("is.R")) is.R <- function()
+ exists("version") && !is.null(version$language) && version$language=="R"
+
+
+
+plotvgam <- function(x, newdata=NULL, y=NULL, residuals=NULL, rugplot=TRUE,
+ se= FALSE, scale=0,
+ raw= TRUE, offset.arg=0, deriv.arg=0, overlay= FALSE,
+ type.residuals=c("deviance","working","pearson","response"),
+ plot.arg= TRUE, which.term=NULL, which.cf=NULL,
+ control=plotvgam.control(...),
+ ...)
+{
+
+ missing.control = missing(control)
+
+ na.act = x at na.action
+ x at na.action = list() # Don't want NAs returned from predict() or resid()
+
+ missing.type.residuals = missing(type.residuals)
+ if(mode(type.residuals) != "character" && mode(type.residuals) != "name")
+ type.residuals <- as.character(substitute(type.residuals))
+ if(!missing.type.residuals)
+ type.residuals <- match.arg(type.residuals,
+ c("deviance","working","pearson","response"))[1]
+
+
+ if(!is.numeric(deriv.arg) || deriv.arg<0 ||
+ deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
+ stop("bad input for the deriv argument")
+
+ if(se && deriv.arg>0) {
+ warning("standard errors not available with derivatives. Setting se=FALSE")
+ se = FALSE
+ }
+
+ preplot.object <- x at preplot
+ if(!length(preplot.object)) {
+ if(is.R()) {
+ preplot.object <- preplotvgam(x, newdata=newdata,
+ raw=raw, deriv=deriv.arg, se=se)
+ } else {
+ preplot.object <- preplotvgam(x, raw=raw, deriv=deriv.arg, se=se)
+ }
+ }
+
+ x at preplot = preplot.object
+
+
+ if(!is.null(residuals) && length(residuals) == 1) {
+ if(residuals) {
+ if(missing.type.residuals) {
+ for(rtype in type.residuals)
+ if(!is.null(residuals <- resid(x, type=rtype))) break
+ } else {
+ residuals=resid(x,typ=type.residuals) #Get the prespecified type
+ if(!length(residuals))
+ warning("residuals are NULL. Ignoring residuals=T")
+ }
+ } else {
+ residuals <- NULL
+ }
+ }
+
+ if(!missing.control) {
+ control = if(is.R()) c(plotvgam.control(.include.dots= FALSE, ...),
+ control,
+ plotvgam.control(...)) else
+ c(plotvgam.control(.include.dots= FALSE, ...),
+ control,
+ plotvgam.control(...))
+ }
+
+ x at post$plotvgam.control = control # Add it to the object
+
+ if(plot.arg)
+ plotpreplotvgam(preplot.object, residuals=residuals,
+ rugplot=rugplot, scale=scale, se=se,
+ offset.arg=offset.arg, deriv.arg=deriv.arg,
+ overlay=overlay,
+ which.term=which.term, which.cf=which.cf,
+ control=control)
+
+ x at na.action = na.act # Restore it's original value
+ invisible(x)
+}
+
+
+
+
+ylim.scale <- function(ylim, scale=0) {
+ if(length(ylim) != 2 || ylim[2] < ylim[1])
+ stop("error in ylim")
+ try <- ylim[2] - ylim[1]
+ if(try > scale) ylim else
+ c(ylim[1]+ylim[2]-scale, ylim[1]+ylim[2]+scale) / 2
+}
+
+
+
+preplotvgam = function(object, newdata=NULL,
+ terms=if(is.R()) labels(object) else v.labels.lm(object),
+ raw= TRUE, deriv.arg=deriv.arg, se= FALSE)
+{
+ Terms <- terms(object) # 11/8/03; object at terms$terms
+ aa <- attributes(Terms)
+
+ if(!is.R()) Call <- object at call
+
+ all.terms <- labels(Terms)
+ xvars <- as.vector(Terms)
+
+ if(!is.R()) {
+ names(xvars) <- all.terms
+ terms <- match.arg(terms, all.terms)
+ }
+
+ Interactions <- aa$order > 1
+ if(any(Interactions)) {
+ if(is.R())
+ stop("can't handle interactions")
+ all.terms <- all.terms[!Interactions]
+ TM <- match(terms, all.terms, 0)
+ if(!all(TM)) {
+ terms <- terms[TM > 0]
+ warning("No terms saved for \"a:b\" style interaction terms")
+ }
+ }
+
+ if(!is.R()) {
+ xvars <- xvars[terms]
+ xnames <- as.list(terms)
+ names(xnames) <- terms
+ modes <- sapply(xvars, mode)
+ for(term in terms[modes != "name"])
+ {
+ evars <- all.names(xvars[term], functions= FALSE, unique= TRUE)
+ if(!length(evars))
+ next
+ xnames[[term]] <- evars
+ evars <- parse(text=evars)
+ if(length(evars) == 1)
+ evars <- evars[[1]]
+ else {
+ evars <- c(as.name("list"), evars)
+ mode(evars) <- "call"
+ }
+ xvars[[term]] <- evars
+ }
+
+ act.vars <- as.character(xvars)
+
+ xvars <- c(as.name("list"), xvars)
+ mode(xvars) <- "call"
+ if(length(newdata))
+ xvars <- eval(xvars, newdata) else
+ {
+ if(!is.null(Call$subset) | !is.null(Call$na.action) |
+ !is.null(options("na.action")[[1]]))
+ {
+ Rownames <- names(fitted(object))
+ if(!(Rl <- length(Rownames)))
+ Rownames <- dimnames(fitted(object))[[1]]
+ if(!(Rl <- length(Rownames)))
+ stop(paste("need to have names for fitted.values",
+ "when call has a subset or na.action argument"))
+ Mcall <- c(as.name("model.frame"), list(formula=
+ terms.inner(parse(text=unlist(xnames))),
+ subset=Rownames, na.action=function(x) x))
+ mode(Mcall) <- "call"
+ Mcall$data <- Call$data
+ xvars <- eval(xvars, eval(Mcall))
+ } else {
+ ecall <- substitute(eval(expression(xvars)))
+ ecall$local <- Call$data
+ xvars <- eval(ecall)
+ }
+ }
+ }
+
+ if(!length(newdata)) {
+ pred <- predict(object, type="terms",
+ raw=raw, se.fit=se, deriv.arg=deriv.arg)
+ } else {
+ pred <- predict(object, newdata, type="terms",
+ raw=raw, se.fit=se, deriv.arg=deriv.arg)
+ }
+
+ fits <- pred$fit
+ se.fit <- pred$se.fit
+ if(is.null(fits))
+ fits <- pred
+ fred <- attr(fits, "vterm.assign") # NULL for M==1
+
+ if(is.R()) {
+ xnames <- vector("list", length(fred))
+ names(xnames) <- names(fred)
+ }
+
+ gamplot <- xnames
+
+ if(is.R()) {
+ s.x = if(any(slotNames(object)=="s.xargument")) object at s.xargument else
+ NULL
+ n.s.x = names(s.x)
+ }
+
+ loop.var = if(is.R()) names(fred) else terms
+ for(term in loop.var)
+ {
+ if(is.R()) {
+ useterm <- term
+ if(length(n.s.x) && any(n.s.x == useterm))
+ useterm <- s.x[useterm]
+ innerx <- parse(text=useterm)
+ innerx <- all.vars(innerx)
+ if(length(innerx) == 0)
+ warning(paste("couldn't extract variable from", useterm, "\n"))
+ if(length(innerx) > 1) {
+ warning(paste("using the first of \"", innerx,
+ "\" terms\n", sep=""))
+ innerx <- innerx[1]
+ }
+ }
+
+ if(!is.R()) {
+ .VGAM.x <- xvars[[term]]
+ } else {
+ .VGAM.x <- if(length(newdata)) newdata[[innerx]] else {
+ if(( is.R() && object at misc$dataname != "list") ||
+ (!is.R() && object at misc$dataname != "sys.parent")) {
+ mytext <- paste(object at misc$dataname,
+ "[['", innerx, "']]", sep="")
+ } else {
+ mytext <- innerx
+ }
+ getx <- parse(text=mytext)
+ .VGAM.ans = if(exists(x=mytext, envir = .GlobalEnv))
+ eval(getx, envir = .GlobalEnv) else eval(getx)
+ .VGAM.ans
+ }
+ }
+
+ if(is.R()) {
+ class(.VGAM.x)=unique(c(class(.VGAM.x),data.class(unclass(.VGAM.x))))
+ }
+
+ TT <- list(x = .VGAM.x,
+ y = fits[, if(is.null(fred)) term else fred[[term]]],
+ se.y = if(is.null(se.fit)) NULL else
+ se.fit[, if(is.null(fred)) term else fred[[term]]],
+ xlab = if(is.R()) innerx else xnames[[term]],
+ ylab = term)
+ class(TT) <- "preplotvgam"
+ gamplot[[term]] <- TT
+ }
+ if(!is.R())
+ class(gamplot) <- "preplotvgam" # Commented out 8/6/02
+
+ invisible(gamplot)
+}
+
+
+if(!is.R())
+v.labels.lm <- function(object, ...)
+{
+ TL <- terms(object) # 11/8/03; object at terms$terms
+ if(!is.null(TL)) {
+ TL <- attr(TL, "term.labels")
+ TA <- object at assign
+ if(!is.null(TA)) {
+ TA <- names(TA)
+ TL <- TL[match(TA, TL, 0.)]
+ }
+ }
+ TL
+}
+
+
+plotvlm <- function(object, residuals=NULL, rugplot= FALSE, ...)
+{
+ stop("sorry, this function hasn't been written yet")
+}
+
+
+plotvglm <- function(x, residuals=NULL, smooths= FALSE,
+ rugplot= FALSE, id.n= FALSE, ...)
+{
+ stop("this function hasn't been written yet") # zz
+
+ M <- x at misc$M
+ true.mu <- object at misc$true.mu
+ response <- as.matrix(x at y)
+ if(is.null(true.mu))
+ true.mu <- T
+
+ Residuals <- resid(x, type="deviance")
+ if(!is.null(residuals))
+ {
+ if(length(residuals) == 1 && residuals)
+ residuals <- Residuals else
+ Residuals <- residuals
+ }
+
+ if(ncol(response)==1 && true.mu && !is.null(Residuals))
+ invisible(NextMethod("plot")) else
+ invisible(x)
+}
+
+
+
+if(!is.R()) jitter <- function(x, factor=1)
+{
+
+ z <- diff(range(x[!is.na(x)]))
+ if(all(z==0))
+ return(x)
+ z <- factor * (z/50)
+ x + runif(length(x), - z, z)
+}
+
+
+
+plotpreplotvgam <- function(x, y=NULL, residuals=NULL,
+ rugplot= TRUE, se= FALSE, scale=0,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.term=NULL, which.cf=NULL,
+ control=NULL)
+{
+ listof <- inherits(x[[1]], "preplotvgam")
+ if(listof) {
+ TT <- names(x)
+ if(is.null(which.term))
+ which.term = TT # Plot them all
+ plot.no = 0
+ for(i in TT) {
+ plot.no = plot.no + 1
+ if((is.character(which.term) && any(which.term==i)) ||
+ (is.numeric(which.term) && any(which.term==plot.no)))
+ plotpreplotvgam(x[[i]], y=NULL,
+ residuals, rugplot, se, scale,
+ offset.arg=offset.arg,
+ deriv.arg=deriv.arg, overlay=overlay,
+ which.cf=which.cf,
+ control=control)
+ }
+ } else {
+ dummy <- function(residuals=NULL, rugplot= TRUE, se= FALSE, scale=0,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL, control=plotvgam.control(...))
+ c(list(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
+ offset.arg=offset.arg, deriv.arg=deriv.arg,
+ overlay=overlay,
+ which.cf=which.cf),
+ control)
+
+ d <- dummy(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
+ offset.arg=offset.arg, deriv.arg=deriv.arg,
+ overlay=overlay,
+ which.cf=which.cf,
+ control=control)
+
+ uniq.comps <- unique(c(names(x), names(d)))
+ Call <- c(as.name("vplot"), c(d, x)[uniq.comps])
+
+ mode(Call) <- "call"
+ invisible(eval(Call))
+ }
+}
+
+
+vplot.default <- function(x, y, se.y=NULL, xlab="", ylab="",
+ residuals=NULL, rugplot= FALSE,
+ scale=0, se= FALSE,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL, ...) {
+ switch(data.class(x)[1],
+ logical=vplot.factor(factor(x), y, se.y, xlab, ylab, residuals,
+ rugplot, scale, se,
+ offset.arg=offset.arg, overlay=overlay, ...),
+ if(is.numeric(x)) {
+ vplot.numeric(as.vector(x), y, se.y, xlab, ylab,
+ residuals, rugplot, scale, se,
+ offset.arg=offset.arg, overlay=overlay, ...)
+ } else {
+ warning(paste("The \"x\" component of \"", ylab,
+ "\" has class \"",
+ paste(class(x), collapse="\", \""),
+ "\"; no vplot() methods available", sep=""))
+ }
+ )
+}
+
+
+
+vplot.list <- function(x, y, se.y=NULL, xlab, ylab,
+ residuals=NULL, rugplot= FALSE, scale=0, se= FALSE,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL, ...)
+{
+
+ if(is.numeric(x[[1]])) {
+ vplot.numeric(x[[1]], y, se.y, xlab, ylab,
+ residuals, rugplot, scale, se,
+ offset.arg=offset.arg, deriv.arg=deriv.arg,
+ overlay=overlay, ...)
+ } else
+ stop("this function hasn't been written yet")
+}
+
+
+plotvgam.control = function(
+ which.cf=NULL,
+ xlim=NULL, ylim=NULL,
+ llty=par()$lty,
+ slty=if(is.R()) "dashed" else 3,
+ pcex=par()$cex,
+ pch=par()$pch,
+ pcol=par()$col,
+ lcol=par()$col,
+ rcol=par()$col,
+ scol=par()$col,
+ llwd=par()$lwd,
+ slwd=par()$lwd,
+ add.arg= FALSE,
+ one.at.a.time= FALSE,
+ .include.dots= TRUE,
+ ...) {
+
+
+ ans =
+ list(which.cf=which.cf,
+ xlim=xlim, ylim=ylim,
+ llty=llty, slty=slty,
+ pcex=pcex, pch=pch,
+ pcol=pcol, lcol=lcol, rcol=rcol, scol=scol,
+ llwd=llwd, slwd=slwd,
+ add.arg=add.arg,
+ one.at.a.time=one.at.a.time)
+
+ if(.include.dots) {
+ c(list(...), ans)
+ } else {
+ default.vals = plotvgam.control()
+ return.list = list()
+ for(i in names(default.vals)) {
+ replace.val =
+ if(is.R())
+ !((length(ans[[i]]) == length(default.vals[[i]])) &&
+ (length(default.vals[[i]]) > 0) &&
+ (is.logical(all.equal(ans[[i]], default.vals[[i]]))) &&
+ all.equal(ans[[i]], default.vals[[i]])) else
+ !((length(ans[[i]]) == length(default.vals[[i]])) &&
+ (length(default.vals[[i]]) > 0) &&
+ all(ans[[i]] == default.vals[[i]]))
+
+ if(FALSE && replace.val) {
+ }
+ if(replace.val)
+ return.list[[i]] = ans[[i]]
+ }
+ if(length(return.list)) {
+ names(return.list) = names(return.list)
+ return.list
+ } else NULL
+ }
+}
+
+
+vplot.numeric <- function(x, y, se.y=NULL, xlab, ylab,
+ residuals=NULL, rugplot= FALSE, se= FALSE, scale=0,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL,
+ xlim=NULL, ylim=NULL,
+
+ llty=par()$lty,
+ slty=if(is.R()) "dashed" else 3,
+ pcex=par()$cex,
+ pch=par()$pch,
+ pcol=par()$col,
+ lcol=par()$col,
+ rcol=par()$col,
+ scol=par()$col,
+ llwd=par()$lwd,
+ slwd=par()$lwd,
+ add.arg= FALSE,
+ one.at.a.time= FALSE,
+ separator = ":",
+
+ ...)
+{
+
+
+
+ ylim0 <- ylim
+
+ if(length(y)/length(x) != round(length(y)/length(x)))
+ stop("length of x and y do not seem to match")
+ y <- as.matrix(y)
+ if(!length(which.cf))
+ which.cf = 1:ncol(y) # Added 7/8/04
+
+ if(!is.null(se.y))
+ se.y <- as.matrix(se.y)
+ if(!is.null(se.y) && any(is.na(se.y)))
+ se.y <- NULL
+
+ if(!is.null(residuals)) {
+ residuals <- as.matrix(residuals)
+ if(ncol(residuals) != ncol(y)) {
+ warning("ncol(residuals) != ncol(y) so residuals are not plotted")
+ residuals <- NULL
+ }
+ }
+
+ offset.arg <- matrix(offset.arg, nrow(y), ncol(y), byrow= TRUE)
+ y <- y + offset.arg
+
+ ylab <- add.hookey(ylab, deriv.arg)
+
+ ux <- unique(sort(x))
+ o <- match(ux, x)
+ uy <- y[o,,drop= FALSE]
+ xlim <- range(xlim, ux)
+ ylim <- range(ylim, uy[,which.cf], na.rm= TRUE)
+ if(rugplot) {
+ jx <- jitter(x[!is.na(x)])
+ xlim <- range(c(xlim, jx))
+ }
+
+ if(se && !is.null(se.y)) {
+ se.upper <- uy[,which.cf] + 2 * se.y[o,which.cf,drop= FALSE]
+ se.lower <- uy[,which.cf] - 2 * se.y[o,which.cf,drop= FALSE]
+ ylim <- range(c(ylim, se.upper, se.lower))
+ }
+
+ if(!is.null(residuals)) {
+ if(length(residuals) == length(y)) {
+ residuals <- as.matrix(y + residuals)
+ ylim <- range(c(ylim, residuals[,which.cf]), na.rm= TRUE)
+ } else {
+ residuals <- NULL
+ warning(paste("Residuals do not match x in \"", ylab,
+ "\" preplot object", sep=""))
+ }
+ }
+
+
+ all.missingy <- all(is.na(y))
+
+ if(all.missingy)
+ return()
+
+ ylim <- ylim.scale(ylim, scale)
+
+ if(overlay) {
+ if(!length(which.cf)) which.cf = 1:ncol(uy) # Added 7/8/04
+ if(!add.arg) {
+ if(is.R()) {
+ matplot(ux, uy[,which.cf], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=ylab, ...)
+ } else {
+ matplot(ux, uy[,which.cf], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=ylab, ...)
+ }
+ }
+ matlines(ux, uy[,which.cf],
+ lwd=llwd, col=lcol, lty=llty)
+ if(!is.null(residuals))
+ if(ncol(y)==1) {
+ points(x, residuals, pch=pch, col=pcol, cex=pcex)
+ } else {
+ matpoints(x, residuals[,which.cf],
+ pch=pch, col=pcol, cex=pcex) # add.arg=TRUE,
+ }
+ if(rugplot)
+ if(is.R()) rug(jx, col=rcol) else rug(jx)
+ if(se && !is.null(se.y)) {
+ matlines(ux, se.upper[,which.cf], lty= slty, lwd=slwd, col=scol)
+ matlines(ux, se.lower[,which.cf], lty= slty, lwd=slwd, col=scol)
+ }
+ } else {
+ YLAB <- ylab
+
+ pcex = rep(pcex, len=ncol(uy))
+ pch = rep(pch , len=ncol(uy))
+ pcol = rep(pcol, len=ncol(uy))
+ lcol = rep(lcol, len=ncol(uy))
+ llty = rep(llty, len=ncol(uy))
+ llwd = rep(llwd, len=ncol(uy))
+ slty = rep(slty, len=ncol(uy))
+ rcol = rep(rcol, len=ncol(uy))
+ scol = rep(scol, len=ncol(uy))
+ slwd = rep(slwd, len=ncol(uy))
+
+ for(i in 1:ncol(uy))
+ if(!length(which.cf) ||
+ (length(which.cf) && any(which.cf==i))) {
+
+ ylim <- range(ylim0, uy[,i], na.rm= TRUE)
+ if(se && !is.null(se.y))
+ ylim <- range(ylim0, se.lower[,i], se.upper[,i], na.rm= TRUE)
+ if(!is.null(residuals))
+ ylim <- range(c(ylim, residuals[,i]), na.rm= TRUE)
+ ylim <- ylim.scale(ylim, scale)
+ if(ncol(uy)>1 && length(separator))
+ YLAB <- paste(ylab, separator, i, sep="")
+ if(!add.arg) {
+ if(one.at.a.time) {
+ readline("Hit return for the next plot ")
+ }
+ if(is.R()) {
+ plot(ux, uy[,i], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=YLAB, ...)
+ } else {
+ plot(ux, uy[,i], type="n",
+ xlim=xlim, ylim=ylim,
+ xlab=xlab, ylab=YLAB, ...)
+ }
+ }
+ lines(ux, uy[,i],
+ lwd=llwd[i], col=lcol[i], lty=llty[i])
+ if(!is.null(residuals))
+ points(x, residuals[,i], pch=pch[i], col=pcol[i], cex=pcex[i])
+ if(rugplot)
+ if(is.R()) rug(jx, col=rcol[i]) else rug(jx)
+
+ if(se && !is.null(se.y)) {
+ lines(ux, se.upper[,i], lty=slty[i], lwd=slwd[i], col=scol[i])
+ lines(ux, se.lower[,i], lty=slty[i], lwd=slwd[i], col=scol[i])
+ }
+ }
+ }
+}
+
+
+
+vplot.matrix <- function(x, y, se.y=NULL, xlab, ylab,
+ residuals=NULL, rugplot= FALSE, scale=0, se= FALSE,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL, ...)
+{
+ stop("you shouldn't ever call this function!")
+}
+
+
+add.hookey <- function(ch, deriv.arg=0) {
+
+ if(!is.numeric(deriv.arg) || deriv.arg<0 ||
+ deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
+ stop("bad input for the deriv argument")
+
+ if(deriv.arg==0)
+ return(ch)
+
+ hookey <- switch(deriv.arg, "'", "''", "'''", "''''",
+ "'''''", stop("too high a derivative"))
+ nc <- nchar(ch)
+ sub <- substring(ch, 1:nc, 1:nc)
+ if(nc >= 2 && sub[1]=="s" && sub[2]=="(") {
+ paste("s", hookey, substring(ch, 2, nc), sep="", coll="")
+ } else {
+ paste(ch, hookey, sep="", collapse="")
+ }
+}
+
+
+
+vplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
+ residuals=NULL, rugplot= FALSE, scale=0,
+ se= FALSE, xlim=NULL, ylim=NULL,
+ offset.arg=0, deriv.arg=0, overlay= FALSE,
+ which.cf=NULL, ...)
+{
+ if(deriv.arg>0)
+ return(NULL)
+
+ if(length(y)/length(x) != round(length(y)/length(x)))
+ stop("length of x and y do not seem to match")
+ y <- as.matrix(y)
+
+ if(!is.null(se.y))
+ se.y <- as.matrix(se.y)
+ if(!is.null(se.y) && any(is.na(se.y)))
+ se.y <- NULL
+
+ if(!is.null(residuals)) {
+ residuals <- as.matrix(residuals)
+ if(ncol(residuals) != ncol(y)) {
+ warning("ncol(residuals) != ncol(y) so residuals are not plotted")
+ residuals <- NULL
+ }
+ }
+ if(overlay) {
+ vvplot.factor(x, y,
+ se.y=if(is.null(se.y)) NULL else se.y,
+ xlab=xlab, ylab=ylab,
+ residuals=residuals,
+ rugplot=rugplot, scale=scale,
+ se=se, xlim=xlim, ylim=ylim, ...)
+ } else {
+ for(i in 1:ncol(y)) {
+ ylab <- rep(ylab, len=ncol(y))
+ if(ncol(y)>1)
+ ylab <- dimnames(y)[[2]]
+ vvplot.factor(x, y[,i,drop= FALSE],
+ se.y=if(is.null(se.y)) NULL else se.y[,i,drop= FALSE],
+ xlab=xlab, ylab=ylab[i],
+ residuals= if(is.null(residuals))
+ NULL else residuals[,i,drop= FALSE],
+ rugplot=rugplot, scale=scale,
+ se=se, xlim=xlim, ylim=ylim, ...)
+
+ }
+ }
+ invisible(NULL)
+}
+
+
+
+
+
+vvplot.factor <- function(x, y, se.y=NULL, xlab, ylab,
+ residuals=NULL, rugplot= FALSE, scale=0,
+ se= FALSE, xlim=NULL, ylim=NULL,
+ ...)
+{
+
+ M <- ncol(y)
+ nn <- as.numeric(table(x))
+ codex <- as.numeric(x)
+ ucodex <- seq(nn)[nn > 0]
+ o <- match(ucodex, codex, 0)
+
+ uy <- y[o,,drop= FALSE]
+ ylim <- range(ylim, uy)
+ xlim <- range(c(0, sum(nn), xlim))
+ rightx <- cumsum(nn)
+ leftx <- c(0, rightx[ - length(nn)])
+ ux <- (leftx + rightx)/2
+ delta <- (rightx - leftx)/8
+
+ jx <- runif(length(codex), (ux - delta)[codex], (ux + delta)[codex])
+ nnajx <- jx[!is.na(jx)]
+
+ if(rugplot)
+ xlim <- range(c(xlim, nnajx))
+ if(se && !is.null(se.y)) {
+ se.upper <- uy + 2 * se.y[o,,drop= FALSE]
+ se.lower <- uy - 2 * se.y[o,,drop= FALSE]
+ ylim <- range(c(ylim, se.upper, se.lower))
+ }
+ if(!is.null(residuals)) {
+ if(length(residuals) == length(y)) {
+ residuals <- y + residuals
+ ylim <- range(c(ylim, residuals))
+ } else {
+ residuals <- NULL
+ warning(paste("Residuals do not match x in \"", ylab,
+ "\" preplot object", sep=""))
+ }
+ }
+ ylim <- ylim.scale(ylim, scale)
+ Levels <- levels(x)
+ if(!all(nn)) {
+ keep <- nn > 0
+ nn <- nn[keep]
+ ux <- ux[keep]
+ delta <- delta[keep]
+ leftx <- leftx[keep]
+ rightx <- rightx[keep]
+ Levels <- Levels[keep]
+ }
+
+
+ about <- function(ux, M, Delta=1/M) {
+ if(M==1) return(cbind(ux))
+ ans <- matrix(as.numeric(NA), length(ux), M)
+ grid <- seq(-Delta, Delta, len=M)
+ for(i in 1:M) {
+ ans[,i] <- ux + grid[i]
+ }
+ ans
+ }
+
+ uxx <- about(ux, M, Delta=min(delta))
+ xlim <- range(c(xlim, uxx))
+
+ if(is.R()) {
+ matplot(ux, uy, ylim=ylim, xlim=xlim, xlab="", type="n",
+ ylab=ylab, axes= FALSE, frame.plot=TRUE, ...)
+ mtext(xlab, 1, 2, adj=0.5)
+ axis(side=2)
+
+ lpos <- par("mar")[3]
+ mtext(Levels, side=3, line=lpos/2, at=ux, adj=0.5, srt=45)
+ } else {
+ matplot(ux, uy, ylim=ylim, xlim=xlim, xlab="", type="n",
+ ylab=ylab, xaxt="c", ...) # xaxt="c", xaxt="n",
+ mtext(xlab, 1, 2, adj=0.5)
+ axis(side=3, at=ux, labels=Levels, srt=45, ticks= FALSE, adj=0)
+ }
+
+ for(i in 1:M)
+ segments(uxx[,i] - 1.0 * delta, uy[,i],
+ uxx[,i] + 1.0 * delta, uy[,i])
+ if(!is.null(residuals)) {
+ for(i in 1:M) {
+ jux <- uxx[,i]
+ jux <- jux[codex]
+ jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
+ if(M==1) points(jux, residuals[,i]) else
+ points(jux, residuals[,i], pch=as.character(i))
+ }
+ }
+ if(rugplot)
+ rug(nnajx)
+ if(se) {
+ for(i in 1:M) {
+ segments(uxx[,i]+0.5*delta, se.upper[,i],
+ uxx[,i]-0.5*delta, se.upper[,i])
+ segments(uxx[,i]+0.5*delta, se.lower[,i],
+ uxx[,i]-0.5*delta, se.lower[,i])
+ segments(uxx[,i], se.lower[,i], uxx[,i], se.upper[,i], lty=2)
+ }
+ }
+ invisible(diff(ylim))
+}
+
+
+if(!isGeneric("vplot"))
+setGeneric("vplot", function(x, ...) standardGeneric("vplot"))
+setMethod("vplot", "factor", function(x, ...)
+ vplot.factor(x, ...))
+setMethod("vplot", "list", function(x, ...)
+ vplot.list(x, ...))
+setMethod("vplot", "matrix", function(x, ...)
+ vplot.matrix(x, ...))
+setMethod("vplot", "numeric", function(x, ...)
+ vplot.numeric(x, ...))
+
+
+
+setMethod("plot", "vlm",
+ function(x, y, ...) {
+ if(!missing(y)) stop("can't process the \"y\" argument")
+ invisible(plotvlm(x, y, ...))})
+setMethod("plot", "vglm",
+ function(x, y, ...) {
+ if(!missing(y)) stop("can't process the \"y\" argument")
+ invisible(plotvglm(x, y, ...))})
+setMethod("plot", "vgam",
+ function(x, y, ...) {
+ if(!missing(y)) stop("can't process the \"y\" argument")
+ invisible(plotvgam(x, ...))})
+
+
+
+
+if(FALSE)
+vmerge.list = function(list1, list2) {
+
+
+
+
+ if(!is.list(list1) || !is.list(list1))
+ stop("list1 and list2 must be lists")
+
+ n1 = names(list1)
+ n2 = names(list2)
+ un12 = unique(c(n1, n2))
+ ans = vector("list", length(un12))
+ names(ans) = un12
+ for(i in un12) {
+ ans[[i]] = if(length(list1[[i]])) list1[[i]] else list2[[i]]
+ }
+ ans
+}
+
+
+
+plotqrrvglm = function(object,
+ rtype = c("pearson", "response", "deviance", "working"),
+ ask = FALSE,
+ main = paste(Rtype, "residuals vs latent variable(s)"),
+ xlab="Latent Variable",
+ ITolerances = object at control$EqualTolerances,
+ ...) {
+ M = object at misc$M
+ n = object at misc$n
+ Rank = object at control$Rank
+ Coef.object = Coef(object, ITolerances = ITolerances)
+ rtype <- match.arg(rtype, c("pearson", "response", "deviance", "working"))[1]
+ res = resid(object, type=rtype)
+
+ my.ylab = if(length(object at misc$ynames)) object at misc$ynames else
+ rep(" ", len=M)
+ Rtype = switch(rtype, pearson="Pearson", response="Response",
+ deviance="Deviance", working="Working")
+
+ done = 0
+ for(r in 1:Rank)
+ for(i in 1:M) {
+ plot(Coef.object at lv[,r], res[,i],
+ xlab=paste(xlab, if(Rank==1) "" else r, sep=""),
+ ylab=my.ylab[i],
+ main = main, ...)
+ done = done + 1
+ if(done >= prod(par()$mfrow) && ask && done != Rank*M) {
+ done = 0
+ readline("Hit return for the next plot: ")
+ }
+ }
+ object
+}
+
+setMethod("plot", "qrrvglm", function(x, y, ...)
+ invisible(plotqrrvglm(object=x, ...)))
+
+
+
+
+
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
new file mode 100644
index 0000000..69e9432
--- /dev/null
+++ b/R/predict.vgam.q
@@ -0,0 +1,356 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+predict.vgam <- function(object, newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE, deriv.arg=0, terms.arg=NULL,
+ raw=FALSE,
+ all=TRUE, offset=0,
+ untransform = FALSE,
+ dispersion=NULL, ...)
+{
+ if(missing(newdata)) {
+ newdata <- NULL
+ } else {
+ newdata <- as.data.frame(newdata)
+ }
+ no.newdata = length(newdata)==0
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("link", "response", "terms"))[1]
+
+
+ if(untransform && (type!="link" || se.fit || deriv.arg != 0 || offset != 0))
+ stop(paste("argument \"untransform\"=TRUE only if type=\"link\",",
+ "se.fit=FALSE, deriv=0"))
+
+ if(raw && type!="terms")
+ stop("raw=TRUE only works when type=\"terms\"")
+
+ if(!is.numeric(deriv.arg) || deriv.arg<0 ||
+ deriv.arg!=round(deriv.arg) || length(deriv.arg)>1)
+ stop("bad input for the deriv argument")
+
+ if(deriv.arg>0 && type!="terms")
+ stop("deriv>0 can only be specified if type=\"terms\"")
+
+ if(deriv.arg != 0 && !(type!="response" && !se.fit))
+ stop(paste("deriv= only works with type!=\"response\"and se.fit=FALSE"))
+
+ if(se.fit && length(newdata))
+ stop("can't specify se.fit=TRUE when there is newdata")
+
+
+ tt <- terms(object) # 11/8/03; object at terms$terms
+
+ ttf <- attr(tt, "factors")
+ tto <- attr(tt, "order")
+ intercept <- attr(tt, "intercept")
+ if(!intercept)
+ stop("an intercept is assumed")
+
+ M <- object at misc$M
+ Blist <- object at constraints
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if(intercept)
+ ncolBlist <- ncolBlist[-1]
+ if(raw) {
+ Blist <- canonical.Blist(Blist)
+ object at constraints <- Blist
+ }
+
+ if(!length(newdata)) {
+ if(type=="link") {
+ if(se.fit) {
+ stop("cannot handle this option (se.fit=TRUE) currently") # zz
+ } else {
+ if(length(na.act)) {
+ answer = napredict(na.act[[1]], object at predictors)
+ } else {
+ answer = object at predictors
+ }
+ if(untransform) return(untransformVGAM(object, answer)) else
+ return(answer)
+ }
+ } else
+ if(type=="response") {
+ if(se.fit) {
+ stop("cannot handle this option (se.fit=TRUE) currently") # zz
+ } else {
+ if(length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
+ }
+ }
+ }
+
+ predictor <- predict.vlm(object,
+ type="terms",
+ se.fit=se.fit,
+ terms.arg=terms.arg,
+ raw=raw,
+ all=all, offset=offset,
+ dispersion=dispersion, ...) # deriv.arg=deriv.arg,
+
+ newdata <- model.matrixvlm(object, type="lm")
+
+
+ } else {
+
+ temp.type <- if(type=="link") "response" else type
+
+
+ predictor <- predict.vlm(object, newdata,
+ type=temp.type,
+ se.fit=se.fit,
+ terms.arg=terms.arg,
+ raw=raw,
+ all=all, offset=offset,
+ dispersion=dispersion, ...) # deriv.arg=deriv.arg,
+ }
+
+
+ if(deriv.arg>0)
+ if(se.fit) {
+ predictor$fitted.values <- predictor$fitted.values * 0
+ predictor$se.fit <- predictor$se.fit * NA
+ } else {
+ predictor <- predictor * 0
+ }
+
+
+ if(length(s.xargument <- object at s.xargument)) {
+
+
+
+
+ dnames2 <- dimnames(newdata)[[2]]
+ index1 <- match(s.xargument, dnames2, nomatch=FALSE)
+ index2 <- match(names(s.xargument), dnames2, nomatch=FALSE)
+ index <- index1 | index2
+ if(!length(index) || any(!index))
+ stop("required variables not found in newdata")
+
+
+
+
+ if(is.null(tmp6 <- attr(if(se.fit) predictor$fitted.values else
+ predictor, "vterm.assign"))) {
+
+ Blist <- subconstraints(object at misc$orig.assign,
+ object at constraints)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if(intercept)
+ ncolBlist <- ncolBlist[-1]
+
+ cs <- if(raw) cumsum(c(1, ncolBlist)) else
+ cumsum(c(1, M + 0*ncolBlist))
+ tmp6 <- vector("list", length(ncolBlist))
+ for(i in 1:length(tmp6))
+ tmp6[[i]] <- cs[i]:(cs[i+1]-1)
+ names(tmp6) <- names(ncolBlist)
+ }
+
+ n.s.xargument <- names(s.xargument) # e.g., c("s(x)", "s(x2)")
+ for(i in n.s.xargument) {
+
+ fred <- s.xargument[i]
+ if(!any(dimnames(newdata)[[2]] == fred))
+ fred <- i
+
+ xx <- newdata[,fred] # [,s.xargument[i]] # [,nindex[i]]
+ ox <- order(xx)
+
+ raw.mat <- predictvsmooth.spline.fit(
+ object at Bspline[[i]],
+ x=xx,
+ deriv=deriv.arg)$y
+
+
+ eta.mat <- if(raw) raw.mat else
+ raw.mat %*% t(Blist[[i]])
+
+ if(type=="terms") {
+ ii <- tmp6[[i]]
+ if(se.fit) {
+ predictor$fitted.values[,ii] =
+ predictor$fitted.values[,ii] + eta.mat
+
+ TS <- predictor$sigma^2
+
+ temp.var <- if(raw) {
+ iii <- object at misc$varassign
+ iii <- iii[[i]]
+ object at var[,iii,drop=FALSE]
+ } else
+ stop("can't handle se's with raw=FALSE")
+
+ predictor$se.fit[,ii] <- (predictor$se.fit[,ii]^2 +
+ TS * temp.var)^0.5
+ } else {
+ predictor[,ii] <- predictor[,ii] + eta.mat
+ }
+ } else {
+ if(se.fit) {
+ predictor$fitted.values <- predictor$fitted.values + eta.mat
+
+ TS <- 1 # out$residual.scale^2
+ TS <- predictor$sigma^2
+
+ TT <- ncol(object at var)
+ predictor$se.fit <- sqrt(predictor$se.fit^2 + TS *
+ object at var %*% rep(1, TT))
+
+
+ } else {
+ predictor <- predictor + eta.mat
+ }
+ }
+ }
+ }
+
+ if(type=="link") {
+ if(no.newdata && length(na.act)) {
+ return(napredict(na.act[[1]], predictor))
+ } else {
+ return(predictor)
+ }
+ } else
+ if(type=="response") {
+ fv <- object at family@inverse(if(se.fit) predictor$fitted.values else
+ predictor, object at extra)
+ if(is.matrix(fv) && is.matrix(object at fitted.values))
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ if(is.matrix(fv) && ncol(fv)==1)
+ fv <- c(fv)
+ if(no.newdata && length(na.act)) {
+ if(se.fit) {
+ fv = napredict(na.act[[1]], fv)
+ } else {
+ fv = napredict(na.act[[1]], fv)
+ }
+ }
+ if(se.fit) {
+ return(list(fit=fv, se.fit=fv*NA))
+ } else {
+ return(fv)
+ }
+ } else {
+ if(deriv.arg >= 1)
+ if(se.fit) {
+ attr(predictor$fitted.values, "constant") <- NULL
+ } else {
+ attr(predictor, "constant") <- NULL
+ }
+
+ if(deriv.arg >= 1) {
+ v = attr(if(se.fit) predictor$fitted.values else
+ predictor, "vterm.assign")
+ is.lin <- is.linear.term(names(v))
+ coefmat <- coef(object, matrix=TRUE)
+ ord <- 0
+ for(i in names(v)) {
+ ord <- ord + 1
+ index <- v[[i]]
+ lindex <- length(index)
+ if(is.lin[i]) {
+ if(tto[ord]>1 || (length(ttf) && ttf[i,i])) {
+ if(se.fit) {
+ predictor$fitted.values[,index] =
+ if(tto[ord]>1) NA else NA
+ } else {
+ predictor[,index] <- if(tto[ord]>1) NA else NA
+ }
+ } else {
+ ans <- coefmat[i, 1:lindex]
+ if(se.fit) {
+ predictor$fitted.values[,index] = if(deriv.arg==1)
+ matrix(ans, ncol=lindex, byrow=TRUE) else 0
+ } else {
+ predictor[,index] <- if(deriv.arg==1)
+ matrix(ans, ncol=lindex, byrow=TRUE) else 0
+ }
+ }
+ } else
+ if(length(s.xargument) && any(n.s.xargument == i)) {
+ ans <- coefmat[i, 1:lindex]
+ if(se.fit) {
+ predictor$fitted.values[,index] =
+ predictor$fitted.values[,index] +
+ (if(deriv.arg==1)
+ matrix(ans, nrow=nrow(predictor$fitted.values),
+ ncol=lindex, byrow=TRUE) else 0)
+ } else {
+ predictor[,index] <- predictor[,index] +
+ (if(deriv.arg==1)
+ matrix(ans, nrow=nrow(predictor),
+ ncol=lindex, byrow=TRUE) else 0)
+ }
+ } else {
+ cat("Derivatives of term ", i, "are unknown\n")
+ if(se.fit) {
+ predictor$fitted.values[,index] <- NA
+ } else {
+ predictor[,index] <- NA
+ }
+ }
+ }
+ }
+
+ if(no.newdata && length(na.act)) {
+ if(se.fit) {
+ predictor$fitted.values = napredict(na.act[[1]],
+ predictor$fitted.values)
+ predictor$se.fit = napredict(na.act[[1]], predictor$se.fit)
+ } else {
+ predictor = napredict(na.act[[1]], predictor)
+ }
+ }
+
+ if(se.fit) {
+ attr(predictor$fitted.values, "derivative") <- deriv.arg
+ } else {
+ attr(predictor, "derivative") <- deriv.arg
+ }
+
+ return(predictor)
+ }
+}
+
+
+ setMethod("predict", "vgam",
+ function(object, ...)
+ predict.vgam(object, ...))
+
+
+
+varassign <- function(constraints, n.s.xargument) {
+
+ if(!length(n.s.xargument))
+ stop("length(n.s.xargument) must be > 0")
+
+ ans <- vector("list", length(n.s.xargument))
+
+ ncolBlist <- unlist(lapply(constraints, ncol))
+
+ names(ans) <- n.s.xargument
+ ptr <- 1
+ for(i in n.s.xargument) {
+ temp <- ncolBlist[[i]]
+ ans[[i]] <- ptr:(ptr+temp-1)
+ ptr <- ptr + temp
+ }
+ ans
+}
+
+
+
+
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
new file mode 100644
index 0000000..2847cf7
--- /dev/null
+++ b/R/predict.vglm.q
@@ -0,0 +1,230 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+untransformVGAM = function(object, pred) {
+ M = object at misc$M
+ Links = object at misc$link
+ if(length(Links) != M && length(Links) != 1)
+ stop("cannot obtain the link functions to untransform the object")
+ upred = pred
+ earg = object at misc$earg
+ for(ii in 1:M) {
+ TTheta = pred[,ii] # Transformed theta
+ newcall = paste(Links[ii], "(theta=TTheta, earg=earg, inverse=TRUE)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ Theta = eval(newcall) # Theta, the untransformed parameter
+ upred[,ii] = Theta
+ }
+ dmn2 = if(length(names(object at misc$link))) names(object at misc$link) else {
+ if(length(object at misc$parameters)) object at misc$parameters else NULL}
+ dimnames(upred) = list(dimnames(upred)[[1]], dmn2)
+ upred
+}
+
+
+
+
+predict.vglm <- function(object,
+ newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ untransform=FALSE,
+ extra=object at extra, ...)
+{
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ mextra <- missing(extra)
+ if(mextra) {
+ }
+
+ if(deriv!=0)
+ stop("deriv must be 0 for predict.vglm")
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("link", "response", "terms"))[1]
+
+ if(untransform && (type!="link" || se.fit || deriv != 0))
+ stop(paste("argument \"untransform\"=TRUE only if type=\"link\",",
+ "se.fit=FALSE, deriv=0"))
+
+
+ pred =
+ if(se.fit)
+ switch(type,
+ response = {
+ warning(paste("type=\"response\" and se.fit=TRUE not valid",
+ "together; setting se.fit=FALSE"))
+ se.fit <- FALSE
+ predictor <- predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...)
+ fv <- object at family@inverse(predictor, extra)
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ fv
+ },
+ link = {
+ type <- "response"
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ },
+ terms={
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ }) else {
+ if(is.null(newdata)) {
+ switch(type,
+ link=object at predictors,
+ response=object at fitted.values,
+ terms={
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ })
+ } else {
+ if(!(length(object at offset)==1 && object at offset==0))
+ warning("zero offset used")
+ switch(type,
+ response={
+ predictor <- predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...)
+ M <- object at misc$M
+
+
+
+ fv <- object at family@inverse(predictor, extra)
+ if(M > 1 && is.matrix(fv)) {
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ } else {
+ }
+ fv
+ },
+ link = {
+ type <- "response"
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ },
+ terms={
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ }
+ )
+ }
+ }
+
+ if(!length(newdata) && length(na.act)) {
+ if(se.fit) {
+ pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit = napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred = napredict(na.act[[1]], pred)
+ }
+ }
+
+ if(untransform) untransformVGAM(object, pred) else pred
+}
+
+
+setMethod("predict", "vglm", function(object, ...)
+ predict.vglm(object, ...))
+
+
+
+
+predict.rrvglm = function(object,
+ newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ extra=object at extra, ...)
+{
+
+ if(se.fit) {
+ stop("11/8/03; predict.rrvglm(..., se.fit=TRUE) not complete yet")
+ pred =
+ switch(type,
+ response = {
+ warning(paste("type=\"response\" and se.fit=TRUE not valid",
+ "together; setting se.fit=FALSE"))
+ se.fit <- FALSE
+ predictor <- predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...)
+ fv <- object at family@inverse(predictor, extra)
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ fv
+ },
+ link = {
+ type <- "response"
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ },
+ terms={
+ predict.vlm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv, dispersion=dispersion, ...)
+ }
+ )
+ } else {
+ return(predict.vglm(object, newdata=newdata,
+ type=type, se.fit=se.fit,
+ deriv=deriv,
+ dispersion=dispersion, ...))
+ }
+
+ if(!length(newdata) && length(na.act)) {
+ if(se.fit) {
+ pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit = napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred = napredict(na.act[[1]], pred)
+ }
+ }
+
+ pred
+}
+
+
+setMethod("predict", "rrvglm", function(object, ...)
+ predict.rrvglm(object, ...))
+
+
+
+if(FALSE) {
+predict.rrvlm = function(object,
+ newdata=NULL,
+ type=c("link", "response", "terms"),
+ se.fit=FALSE,
+ deriv=0,
+ dispersion=NULL,
+ extra=object at extra, ...)
+{
+ stop("this function hasn't been written yet")
+}
+
+
+setMethod("predict", "rrvlm", function(object, ...)
+ predict.rrvlm(object, ...))
+}
+
+
+
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
new file mode 100644
index 0000000..ffa17e4
--- /dev/null
+++ b/R/predict.vlm.q
@@ -0,0 +1,410 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+if(!exists("is.R")) is.R <- function()
+ exists("version") && !is.null(version$language) && version$language=="R"
+
+
+if(FALSE) vmodel.matrix.lm <- function(object, ...) {
+ x <- slot(object, "x")
+ if(!length(x)) {
+ data <- model.framevlm(object, xlev = object at xlevels, ...)
+
+
+ x <- if(is.R()) {
+ kill.con <- if(length(object at contrasts)) object at contrasts else
+ NULL
+ vmodel.matrix.default(object, data = data,
+ contrasts = kill.con)
+ } else
+ model.matrix.default(terms(object), data = data,
+ contrasts = object at contrasts)
+ }
+ x
+}
+
+
+if(!is.R()) {
+if(FALSE) vmodel.frame.lm <- function(formula, data = NULL,
+ na.action = NULL, ...) {
+ m <- formula at model
+ if(!is.null(m))
+ return(m)
+ oc <- formula at call
+ oc$method <- "model.frame"
+ oc[[1.]] <- as.name("lm")
+ if(length(data)) {
+ oc$data <- substitute(data)
+ eval(oc, sys.parent())
+ }
+ else eval(oc, list())
+ }
+}
+
+
+if(is.R()) {
+if(FALSE) vmodel.frame.lm = function (formula, data, na.action, ...) {
+ if (is.null(formula at model)) {
+ fcall <- formula at call
+ fcall$method <- "model.frame"
+ fcall[[1]] <- as.name("lm")
+ env <- environment(fcall$formula)
+ if (is.null(env))
+ env <- parent.frame()
+ eval(fcall, env)
+ } else formula at model
+ }
+}
+
+
+predict.vlm <- function(object, newdata=NULL, type=c("response","terms"),
+ se.fit = FALSE, scale = NULL,
+ terms.arg=NULL,
+ raw=FALSE,
+ dispersion = NULL, ...)
+{
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("response","terms"))[1]
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ if(raw && type != "terms")
+ stop("sorry, raw=TRUE only works when type=\"terms\"")
+
+
+ if(!length(newdata) && type=="response" && !se.fit &&
+ length(object at fitted.values)) {
+ if(length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
+ }
+ }
+
+
+ tt <- terms(object) # 11/8/03; object at terms$terms
+ if(!length(newdata)) {
+ offset <- object at offset
+
+ if(FALSE) {
+
+ X <- model.matrixvlm(object, type="lm")
+
+
+
+ if(is.R() && !length(object at x)) {
+ attrassigndefault <- function(mmat, tt) {
+ if (!inherits(tt, "terms"))
+ stop("need terms object")
+ aa <- attr(mmat, "assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll <- attr(tt, "term.labels")
+ if (attr(tt, "intercept") > 0)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ split(order(aa), aaa)
+ }
+ attrassignlm <- function(object, ...)
+ attrassigndefault(model.matrix(object, type="lm"),
+ terms(object))
+
+ attr(X, "assign") <- attrassignlm(X, tt)
+ }
+ } else {
+ X <- model.matrix(object, type="lm")
+ }
+ } else {
+
+ if(is.smart(object) && length(object at smart.prediction)) {
+ setup.smart("read", smart.prediction=object at smart.prediction)
+
+ }
+
+
+
+ X = model.matrix(delete.response(tt), newdata,
+ contrasts=if(length(object at contrasts)) object at contrasts else NULL,
+ xlev = object at xlevels)
+
+
+ if(is.R() && object at misc$intercept.only && nrow(X)!=nrow(newdata)) {
+ as.save = attr(X, "assign")
+ X = X[rep(1, nrow(newdata)),,drop=FALSE] # =matrix(1,nrow(newdata),1)
+ dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") = as.save # Restored
+ }
+
+ offset <- if (!is.null(off.num<-attr(tt,"offset")))
+ eval(attr(tt,"variables")[[off.num+1]], newdata)
+ else if (!is.null(object at offset))
+ eval(object at call$offset, newdata)
+
+ if(is.smart(object) && length(object at smart.prediction)) {
+ wrapup.smart()
+ }
+
+ if(is.R())
+ attr(X, "assign") <- attrassigndefault(X, tt)
+
+ }
+
+
+ hasintercept <- attr(tt, "intercept")
+
+ dx1 <- dimnames(X)[[1]]
+ M <- object at misc$M
+ Blist <- object at constraints
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if(hasintercept)
+ ncolBlist <- ncolBlist[-1]
+
+ xbar <- NULL
+ if(type == "terms" && hasintercept) {
+ if(is.null(newdata)) {
+ xbar <- apply(X, 2, mean)
+ X <- sweep(X, 2, xbar)
+ } else {
+ xbar <- apply(model.matrixvlm(object, type="lm"), 2, mean)
+ xbar <- apply(X, 2, mean)
+ X <- sweep(X, 2, xbar)
+ }
+ nac <- is.na(object at coefficients)
+ if(any(nac)) {
+ X <- X[, !nac, drop=FALSE]
+ xbar <- xbar[!nac]
+ }
+ }
+
+ if(!is.null(newdata) && !is.data.frame(newdata))
+ newdata <- as.data.frame(newdata)
+
+ nn <- if(!is.null(newdata)) nrow(newdata) else object at misc$n
+ if(raw) {
+ Blist <- canonical.Blist(Blist)
+ object at constraints <- Blist
+ }
+
+
+ X <- lm2vlm.model.matrix(X, Blist=Blist, M=M, xij=object at control$xij)
+ attr(X, "constant") <- xbar
+
+ coefs <- coef(object)
+ vasgn <- attr(X, "vassign")
+
+
+ if(type == "terms") {
+ nv <- names(vasgn)
+ if(hasintercept)
+ nv <- nv[-(1:ncol(object at constraints[["(Intercept)"]]))]
+ terms.arg <- if(is.null(terms.arg)) nv else terms.arg
+
+ index <- if(is.R()) charmatch(terms.arg, nv) else
+ match.arg(terms.arg, nv)
+ if(all(index==0)) {
+ warning("no match found; returning all terms")
+ index <- 1:length(nv)
+ }
+ vasgn <- if(is.R()) vasgn[nv[index]] else {
+ ans <- vector("list", length(index))
+ for(loop in 1:length(index))
+ ans[[loop]] <- vasgn[[index[loop]]]
+ names(ans) <- index
+ ans
+ }
+ }
+
+ if(any(is.na(object at coefficients)))
+ stop("can't handle NA's in object at coefficients")
+
+ dname2 <- object at misc$predictors.names
+ if(se.fit) {
+ class(object) <- "vlm" # coerce
+ fit.summary <- summaryvlm(object, dispersion=dispersion)
+ sigma <- if(!is.null(fit.summary at sigma)) fit.summary at sigma else
+ sqrt(deviance(object) / object at df.resid) # was @rss
+ pred <- Build.terms.vlm(X, coefs,
+ cov=sigma^2 * fit.summary at cov.unscaled,
+ vasgn,
+ collapse=type!="terms", M=M,
+ dimname=list(dx1, dname2),
+ coefmat=coef(object, matrix=TRUE))
+ pred$df <- object at df.residual
+ pred$sigma <- sigma
+ } else
+ pred <- Build.terms.vlm(X, coefs, cov=NULL, assign = vasgn,
+ collapse=type!="terms", M=M,
+ dimname=list(dx1, dname2),
+ coefmat=coef(object, matrix=TRUE))
+ constant <- attr(pred, "constant")
+
+
+ if(length(offset) && any(offset != 0))
+ if(se.fit) {
+ pred$fitted.values <- pred$fitted.values + offset
+ } else {
+ pred <- pred + offset
+ }
+
+ if(type == "terms") {
+ Blist <- subconstraints(object at misc$orig.assign, object at constraints)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if(hasintercept)
+ ncolBlist <- ncolBlist[-1]
+
+ cs <- cumsum(c(1, ncolBlist)) # Like a pointer
+ for(i in 1:(length(cs)-1))
+ if(cs[i+1]-cs[i]>1)
+ for(k in (cs[i]+1):(cs[i+1]-1))
+ if(se.fit) {
+ pred$fitted.values[,cs[i]]=pred$fitted.values[,cs[i]] +
+ pred$fitted.values[,k]
+ pred$se.fit[,cs[i]] = pred$se.fit[,cs[i]] +
+ pred$se.fit[,k]
+ } else {
+ pred[,cs[i]] <- pred[,cs[i]] + pred[,k]
+ }
+
+
+ if(se.fit) {
+ pred$fitted.values=pred$fitted.values[,cs[-length(cs)],drop=FALSE]
+ pred$se.fit <- pred$se.fit[, cs[-length(cs)], drop=FALSE]
+ } else {
+ pred <- pred[, cs[-length(cs)], drop=FALSE]
+ }
+
+ pp <- if(se.fit) ncol(pred$fitted.values) else ncol(pred)
+ if(se.fit) {
+ dimnames(pred$fitted.values) <- dimnames(pred$se.fit) <- NULL
+ dim(pred$fitted.values) <- dim(pred$se.fit) <- c(M, nn, pp)
+ pred$fitted.values <- aperm(pred$fitted.values, c(2,1,3))
+ pred$se.fit <- aperm(pred$se.fit, c(2,1,3))
+ dim(pred$fitted.values) <- dim(pred$se.fit) <- c(nn, M*pp)
+ } else {
+ dimnames(pred) <- NULL # Saves a warning
+ dim(pred) <- c(M, nn, pp)
+ pred <- aperm(pred, c(2,1,3))
+ dim(pred) <- c(nn, M*pp)
+ }
+
+ if(raw) {
+ kindex <- NULL
+ for(i in 1:pp)
+ kindex <- c(kindex, (i-1)*M + (1:ncolBlist[i]))
+ if(se.fit) {
+ pred$fitted.values <- pred$fitted.values[,kindex,drop=FALSE]
+ pred$se.fit <- pred$se.fit[,kindex,drop=FALSE]
+ } else {
+ pred <- pred[,kindex,drop=FALSE]
+ }
+ }
+
+ temp <- if(raw) ncolBlist else rep(M, length(ncolBlist))
+ dd <- vlabel(names(ncolBlist), temp, M)
+ if(se.fit) {
+ dimnames(pred$fitted.values) <-
+ dimnames(pred$se.fit) <- list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
+ } else {
+ dimnames(pred) <- list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
+ }
+
+ if(!length(newdata) && length(na.act)) {
+ if(se.fit) {
+ pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit = napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred = napredict(na.act[[1]], pred)
+ }
+ }
+
+ if(!raw) {
+ cs <- cumsum(c(1, M + 0*ncolBlist))
+ }
+ fred <- vector("list", length(ncolBlist))
+ for(i in 1:length(fred))
+ fred[[i]] <- cs[i]:(cs[i+1]-1)
+ names(fred) <- names(ncolBlist)
+ if(se.fit) {
+ attr(pred$fitted.values, "vterm.assign") <- fred
+ attr(pred$se.fit, "vterm.assign") <- fred
+ } else {
+ attr(pred, "vterm.assign") <- fred
+ }
+ }
+
+ if(!is.null(xbar))
+ if(se.fit) {
+ attr(pred$fitted.values, "constant") <- constant
+ } else {
+ attr(pred, "constant") <- constant
+ }
+
+ pred
+}
+
+
+subconstraints <- function(assign, constraints) {
+
+
+ ans <- vector("list", length(assign))
+ if(!length(assign) || !length(constraints))
+ stop("assign and/or constraints is empty")
+ for(i in 1:length(assign))
+ ans[[i]] <- constraints[[assign[[i]][1]]]
+ names(ans) <- names(assign)
+ ans
+}
+
+
+is.linear.term <- function(ch) {
+ lch <- length(ch)
+ ans <- rep(FALSE, len=lch)
+ for(i in 1:lch) {
+ nc <- nchar(ch[i])
+ x <- substring(ch[i], 1:nc, 1:nc)
+ ans[i] <- all(x!="(" & x!="+" & x!="-" & x!="/" & x!="*" &
+ x!="^")
+ }
+ names(ans) <- ch
+ ans
+}
+
+
+
+
+canonical.Blist <- function(Blist) {
+ for(i in 1:length(Blist)) {
+ temp <- Blist[[i]] * 0
+ r <- ncol(temp)
+ temp[cbind(1:r,1:r)] <- 1
+ Blist[[i]] <- temp
+ }
+ Blist
+}
+
+
+
+
+ setMethod("predict", "vlm",
+ function(object, ...)
+ predict.vlm(object, ...))
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/print.summary.others.q b/R/print.summary.others.q
new file mode 100644
index 0000000..bb20d72
--- /dev/null
+++ b/R/print.summary.others.q
@@ -0,0 +1,33 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+printsummary.lms <- function(x, digits = NULL, quote = TRUE, prefix = "")
+{
+
+ printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
+
+ cat("\nLMS method!!!\n")
+
+ cat("\nfirst 4 rows of $lin are\n")
+ print.matrix(x$testing)
+
+ invisible(NULL)
+}
+
+
+
+printsummary.rc.exponential <- function(x, digits = NULL, quote = TRUE, prefix = "")
+{
+
+ printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
+
+
+ cat("\nNumber of censored observations: ", x$num.censored, "\n")
+
+ invisible(NULL)
+}
+
diff --git a/R/print.vglm.q b/R/print.vglm.q
new file mode 100644
index 0000000..5f77ee0
--- /dev/null
+++ b/R/print.vglm.q
@@ -0,0 +1,95 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+print.vglm <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x at coefficients
+ if(any(nas <- is.na(coef))) {
+ if(is.null(names(coef)))
+ names(coef) <- paste("b", 1:length(coef), sep = "")
+ cat("\nCoefficients: (", sum(nas),
+ " not defined because of singularities)\n", sep = "")
+ } else
+ cat("\nCoefficients:\n")
+ print.default(coef, ...)
+
+ rank <- x at rank
+ if(!length(rank))
+ rank <- sum(!nas)
+ nobs <- if(length(x at df.total)) x at df.total else length(x at residuals)
+ rdf <- x at df.residual
+ if(!length(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
+
+ if(length(deviance(x)))
+ cat("Residual Deviance:", format(deviance(x)), "\n")
+ llx = logLik(x)
+ if(length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
+
+ if(length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(i in ncrit)
+ if(i!="loglikelihood" && i!="deviance")
+ cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
+ }
+
+ invisible(x)
+}
+
+
+print.vgam <- function(x, digits=2, ...)
+{
+
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x at coefficients
+ nas <- is.na(coef)
+
+ rank <- x at rank
+ if(is.null(rank))
+ rank <- sum(!nas)
+ nobs <- if(length(x at df.total)) x at df.total else length(x at residuals)
+ rdf <- x at df.residual
+ if(is.null(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;",
+ format(round(rdf, dig=digits)), "Residual\n")
+
+ if(length(deviance(x)))
+ cat("Residual Deviance:", format(deviance(x)), "\n")
+ llx = logLik(x)
+ if(length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
+
+ criterion <- attr(terms(x), "criterion") # 11/8/03; x at terms$terms,
+ if(!is.null(criterion) && criterion!="coefficients")
+ cat(paste(criterion, ":", sep=""), format(x[[criterion]]),
+ "\n")
+
+ invisible(x)
+}
+
+
+
+
+setMethod("print", "vlm", function(x, ...) print.vlm(x, ...))
+setMethod("print", "vglm", function(x, ...) print.vglm(x, ...))
+setMethod("print", "vgam", function(x, ...) print.vgam(x, ...))
+
+
+ setMethod("show", "vlm", function(object) print.vlm(object))
+ setMethod("show", "vglm", function(object) print.vglm(object))
+ setMethod("show", "vgam", function(object) print.vgam(object))
+
+
diff --git a/R/print.vlm.q b/R/print.vlm.q
new file mode 100644
index 0000000..38d6218
--- /dev/null
+++ b/R/print.vlm.q
@@ -0,0 +1,47 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+if(!is.R()) {
+setMethod("show", "vlm",
+ function(object)
+ print.vlm(object))
+}
+
+setMethod("print", "vlm",
+ function(x, ...)
+ print.vlm(x, ...))
+
+print.vlm <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x at coefficients
+ cat("\nCoefficients:\n")
+ print(coef, ...)
+
+ rank <- x at rank
+ if(is.null(rank))
+ rank <- sum(!is.na(coef))
+ n <- x at misc$n
+ M <- x at misc$M
+ nobs <- if(length(x at df.total)) x at df.total else n*M
+ rdf <- x at df.residual
+ if(is.null(rdf))
+ rdf <- (n - rank) * M
+ cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
+
+ if(length(deviance(x)) && is.finite(deviance(x)))
+ cat("Deviance:", format(deviance(x)), "\n")
+ if(length(x at rss) && is.finite(x at rss))
+ cat("Residual Sum of Squares:", format(x at rss), "\n")
+
+ invisible(x)
+}
+
+
+
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
new file mode 100644
index 0000000..a78553b
--- /dev/null
+++ b/R/qrrvglm.control.q
@@ -0,0 +1,125 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+qrrvglm.control = function(Rank=1,
+ Bestof = if(length(Cinit)) 1 else 10,
+ checkwz=TRUE,
+ Cinit = NULL,
+ Crow1positive=TRUE,
+ epsilon = 1.0e-06,
+ EqualTolerances = ITolerances,
+ Etamat.colmax = 10,
+ FastAlgorithm = TRUE,
+ GradientFunction=TRUE,
+ Hstep = 0.001,
+ isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ iKvector = 0.1,
+ iShape = 0.1,
+ ITolerances = TRUE,
+ maxitl = 40,
+ method.init = 1,
+ Maxit.optim = 250,
+ MUXfactor = rep(7, length=Rank),
+ Norrr = ~ 1,
+ optim.maxit = 20,
+ Parscale = if(ITolerances) 0.001 else 1.0,
+ SD.Cinit = 0.02,
+ SmallNo = 5.0e-13,
+ trace = TRUE,
+ Use.Init.Poisson.QO=TRUE,
+ wzepsilon = .Machine$double.eps^0.75,
+ ...)
+{
+
+
+
+ if(!is.Numeric(iShape, posit=TRUE)) stop("bad input for \"iShape\"")
+ if(!is.Numeric(iKvector, posit=TRUE)) stop("bad input for \"iKvector\"")
+ if(!is.Numeric(isdlv, posit=TRUE)) stop("bad input for \"isdlv\"")
+ if(any(isdlv < 0.2 | isdlv > 10))
+ stop("isdlv values must lie between 0.2 and 10")
+ if(length(isdlv) > 1 && any(diff(isdlv) > 0))
+ stop("successive isdlv values must not increase")
+ if(!is.Numeric(epsilon, posit=TRUE, allow=1))
+ stop("bad input for \"epsilon\"")
+ if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ stop("bad input for \"Etamat.colmax\"")
+ if(!is.Numeric(Hstep, posit=TRUE, allow=1))
+ stop("bad input for \"Hstep\"")
+ if(!is.Numeric(maxitl, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"maxitl\"")
+ if(!is.Numeric(method.init, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"method.init\"")
+ if(!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"Maxit.optim\"")
+ if(!is.Numeric(MUXfactor, posit=TRUE))
+ stop("bad input for \"MUXfactor\"")
+ if(any(MUXfactor < 1 | MUXfactor > 10))
+ stop("MUXfactor values must lie between 1 and 10")
+ if(!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"optim.maxit\"")
+ if(!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"Rank\"")
+ if(!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
+ stop("bad input for \"SD.Cinit\"")
+ if(ITolerances && !EqualTolerances)
+ stop("EqualTolerances must be TRUE if ITolerances is TRUE")
+ if(!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"Bestof\"")
+
+
+ FastAlgorithm = as.logical(FastAlgorithm)[1]
+ if(!FastAlgorithm)
+ stop("FastAlgorithm=TRUE is now required")
+
+ if((SmallNo < .Machine$double.eps) ||
+ (SmallNo > .0001)) stop("SmallNo is out of range")
+ if(any(Parscale <= 0))
+ stop("Parscale must contain positive numbers only")
+
+ if(!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for \"checkwz\"")
+ if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ stop("bad input for \"wzepsilon\"")
+
+ ans = list(
+ Bestof = Bestof,
+ checkwz=checkwz,
+ Cinit = Cinit,
+ Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ ConstrainedQO = TRUE, # A constant, not a control parameter
+ Corner = FALSE, # Needed for valt.1iter()
+ Dzero = NULL,
+ epsilon = epsilon,
+ EqualTolerances = EqualTolerances,
+ Etamat.colmax = Etamat.colmax,
+ FastAlgorithm = FastAlgorithm,
+ GradientFunction = GradientFunction,
+ Hstep = Hstep,
+ isdlv = rep(isdlv, len=Rank),
+ iKvector = as.numeric(iKvector),
+ iShape = as.numeric(iShape),
+ ITolerances = ITolerances,
+ maxitl = maxitl,
+ method.init = method.init,
+ Maxit.optim = Maxit.optim,
+ min.criterion = TRUE, # needed for calibrate
+ MUXfactor = rep(MUXfactor, length=Rank),
+ Norrr = Norrr,
+ optim.maxit = optim.maxit,
+ OptimizeWrtC = TRUE,
+ Parscale = Parscale,
+ Quadratic = TRUE,
+ Rank = Rank,
+ save.weight = FALSE,
+ SD.Cinit = SD.Cinit,
+ SmallNo = SmallNo,
+ Structural.zero = NULL,
+ Svd.arg = TRUE, Alpha=0.5, Uncor = TRUE,
+ trace = trace,
+ Use.Init.Poisson.QO=as.logical(Use.Init.Poisson.QO)[1],
+ wzepsilon = wzepsilon)
+ ans
+}
+
diff --git a/R/qtplot.q b/R/qtplot.q
new file mode 100644
index 0000000..f396671
--- /dev/null
+++ b/R/qtplot.q
@@ -0,0 +1,764 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+qtplot.lms.bcn <- function(percentiles=c(25,50,75),
+ eta=NULL, yoffset=0)
+{
+
+ lp = length(percentiles)
+ answer <- matrix(as.numeric(NA), nrow(eta), lp, dimnames=list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep="")))
+ for(i in 1:lp) {
+ answer[,i] <- eta[,2] * (1+eta[,1] * eta[,3] *
+ qnorm(percentiles[i]/100))^(1/eta[,1])
+ }
+ answer
+}
+
+qtplot.lms.bcg <- function(percentiles=c(25,50,75),
+ eta=NULL, yoffset=0)
+{
+
+ cc <- percentiles
+ lp = length(percentiles)
+ answer <- matrix(as.numeric(NA), nrow(eta), lp, dimnames=list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep="")))
+ lambda <- eta[,1]
+ sigma <- eta[,3]
+ shape <- 1 / (lambda * sigma)^2
+ for(i in 1:lp) {
+ ccc <- rep(cc[i]/100, len=nrow(eta))
+ ccc <- ifelse(lambda>0, ccc, 1-ccc)
+ answer[,i] <- eta[,2] * (qgamma(ccc, sh=shape)/shape)^(1/lambda)
+ }
+ answer
+}
+
+qtplot.lms.yjn <- function(percentiles=c(25,50,75),
+ eta=NULL, yoffset=0)
+{
+
+ cc <- percentiles
+ lp = length(percentiles)
+ answer <- matrix(as.numeric(NA), nrow(eta), lp, dimnames=list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep="")))
+ lambda <- eta[,1]
+ mu <- eta[,2]
+ sigma <- eta[,3] # Link function already taken care of above
+ for(i in 1:lp) {
+ ccc <- mu + sigma * qnorm(cc[i]/100)
+ answer[,i] <- yeo.johnson(ccc, lambda, inverse= TRUE) - yoffset
+ }
+ answer
+}
+
+qtplot.default <- function(object, ...) {
+
+ warning("no methods function. Returning the object")
+ invisible(object)
+}
+
+
+
+"qtplot.vglm" <- function(object, Attach= TRUE, ...) {
+
+ LL <- length(object at family@vfamily)
+ newcall = paste("qtplot.", object at family@vfamily[LL],
+ "(object, ...)", sep="")
+ newcall = parse(text=newcall)[[1]]
+
+ if(Attach) {
+ object at post$qtplot = eval(newcall)
+ invisible(object)
+ } else
+ eval(newcall)
+}
+
+
+qtplot.lmscreg <- function(object,
+ newdata=NULL,
+ percentiles=object at misc$percentiles,
+ plot.it= TRUE, ...) {
+
+ same <- length(percentiles) == length(object at misc$percentiles) &&
+ all(percentiles==object at misc$percentiles)
+
+ lp <- length(percentiles)
+ if(same) {
+ fitted.values <- if(!length(newdata)) object at fitted.values else {
+ predict(object, newdata=newdata, type="response")
+ }
+ fitted.values <- as.matrix(fitted.values)
+ } else {
+ if(!is.numeric(percentiles))
+ stop("\"percentiles\" must be specified")
+
+ eta <- if(length(newdata)) predict(object, newdata=newdata, type="link") else
+ object at predictors
+ eta <- eta2theta(eta, object at misc$link) # Now lambda, mu, sigma
+
+ newcall = paste("qtplot.", object at family@vfamily[1],
+ "(percentiles=percentiles, eta=eta, yoffset=object at misc$yoffset)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ fitted.values = as.matrix( eval(newcall) )
+ dimnames(fitted.values) <- list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep=""))
+ }
+
+ if(plot.it) {
+ plotqtplot.lmscreg(fit=fitted.values, obj=object,
+ newdata=newdata,
+ lp=lp,
+ percentiles=percentiles, ...)
+ }
+
+ list(fitted.values = fitted.values, percentiles = percentiles)
+}
+
+
+
+plotqtplot.lmscreg <- function(fitted.values, object,
+ newdata=NULL,
+ percentiles=object at misc$percentiles,
+ lp=NULL,
+ add.arg=FALSE,
+ y=if(length(newdata)) FALSE else TRUE, spline.fit=FALSE,
+ label=TRUE,
+ size.label=0.06,
+ xlab=NULL, ylab="",
+ pch=par()$pch, pcex=par()$cex, pcol.arg=par()$col,
+ xlim=NULL, ylim=NULL,
+ llty.arg=par()$lty,
+ lcol.arg=par()$col, llwd.arg=par()$lwd,
+ tcol.arg=par()$col,
+ tadj=1, ...)
+{
+
+
+
+ if(!length(newdata)) {
+ X <- model.matrixvlm(object, type="lm")
+ if(is.matrix(X) && length(object at y) && ncol(X)==2 &&
+ dimnames(X)[[2]][1]=="(Intercept)")
+ {
+ xx <- X[,2]
+ if(is.null(xlab)) {
+ xlab <- if(object at misc$nonparametric)
+ as.vector(slot(object, "s.xargument")) else
+ names(object at assign)[2]
+ }
+
+ if(!add.arg) {
+ if(!is.numeric(xlim))
+ xlim <- if(label)
+ c(min(xx), max(xx)+size.label*diff(range(xx))) else
+ c(min(xx), max(xx))
+ fred <- cbind(object at y, fitted.values)
+ if(!is.numeric(ylim))
+ ylim <- c(min(fred), max(fred))
+ matplot(x=xx, y=fred,
+ xlab=xlab, ylab=ylab, type="n",
+ xlim=xlim, ylim=ylim, ...)
+ }
+
+ if(y && length(object at y))
+ matpoints(x=xx, y=object at y, pch=pch, cex=pcex,
+ col=pcol.arg)
+ } else {
+ warning(paste("there is not a single covariate.",
+ "Returning the object."))
+ return(fitted.values)
+ }
+ } else {
+
+ firstterm = attr(terms(object), "term.labels")[1]
+
+ if(object at misc$nonparametric &&
+ length(object at s.xargument[firstterm]))
+ firstterm <- object at s.xargument[firstterm]
+
+ xx <- newdata[[firstterm]]
+ if(!is.numeric(xx))
+ stop("couldn't extract the 'primary' variable from newdata")
+
+ if(!add.arg) {
+ if(is.null(xlab))
+ xlab <- firstterm
+ if(!is.numeric(xlim))
+ xlim <- if(label)
+ c(min(xx), max(xx)+size.label*diff(range(xx))) else
+ c(min(xx), max(xx))
+ if(!is.numeric(ylim))
+ ylim <- c(min(fitted.values), max(fitted.values))
+ matplot(x=xx, y=fitted.values,
+ xlab=xlab, ylab=ylab, type="n",
+ xlim=xlim, ylim=ylim, col=pcol.arg)
+ }
+ if(y && length(object at y))
+ matpoints(x=xx, y=object at y, pch=pch, cex=pcex,
+ col=pcol.arg)
+
+ }
+
+ tcol.arg = rep(tcol.arg, length=lp)
+ lcol.arg = rep(lcol.arg, length=lp)
+ llwd.arg = rep(llwd.arg, length=lp)
+ llty.arg = rep(llty.arg, length=lp)
+ for(i in 1:lp) {
+ temp <- cbind(xx, fitted.values[,i])
+ temp <- temp[sort.list(temp[,1]),]
+ index <- !duplicated(temp[,1])
+ if(spline.fit) {
+ lines(spline(temp[index,1], temp[index,2]),
+ lty=llty.arg[i], col=lcol.arg[i], err=-1, lwd=llwd.arg[i])
+ } else {
+ lines(temp[index,1], temp[index,2],
+ lty=llty.arg[i], col=lcol.arg[i], err=-1, lwd=llwd.arg[i])
+ }
+ if(label)
+ text(par()$usr[2], temp[nrow(temp),2],
+ paste( percentiles[i], "%", sep=""),
+ adj=tadj, col=tcol.arg[i], err=-1)
+ }
+
+ invisible(fitted.values)
+}
+
+
+if(TRUE) {
+ if(!isGeneric("qtplot"))
+ setGeneric("qtplot", function(object, ...) standardGeneric("qtplot"))
+
+
+ setMethod("qtplot", signature(object="vglm"),
+ function(object, ...)
+ invisible(qtplot.vglm(object, ...)))
+ setMethod("qtplot", signature(object="vgam"),
+ function(object, ...)
+ invisible(qtplot.vglm(object, ...)))
+}
+
+
+
+"qtplot.vextremes" <- function(object, ...) {
+
+
+ newcall = paste("qtplot.", object at family@vfamily[1],
+ "(object=object, ... )", sep="")
+ newcall = parse(text=newcall)[[1]]
+ eval(newcall)
+}
+
+
+qtplot.egumbel <-
+qtplot.gumbel <-
+ function(object, plot.it=TRUE, y.arg=TRUE, spline.fit=FALSE, label=TRUE,
+ R=object at misc$R,
+ percentiles=object at misc$percentiles,
+ add.arg=FALSE,
+ mpv=object at misc$mpv,
+ xlab=NULL, ylab="", main="",
+ pch=par()$pch, pcol.arg=par()$col,
+ llty.arg=par()$lty, lcol.arg=par()$col, llwd.arg=par()$lwd,
+ tcol.arg=par()$col, tadj=1, ...)
+{
+ if(!is.logical(mpv) || length(mpv) != 1)
+ stop("bad input for \"mpv\"")
+ if(!length(percentiles) ||
+ (!is.Numeric(percentiles, posit=TRUE) || max(percentiles) >= 100))
+ stop("bad input for \"percentiles\"")
+
+
+
+ eta <- predict(object)
+
+
+ if(is.Numeric(R))
+ R <- rep(R, length=nrow(eta))
+
+ if(!is.Numeric(percentiles))
+ stop("the \"percentiles\" argument needs to be assigned a value")
+
+
+ extra = object at extra
+ extra$mpv = mpv # Overwrite if necessary
+ extra$R = R
+ extra$percentiles = percentiles
+ fitted.values = object at family@inverse(eta=eta, extra=extra)
+
+ answer = list(fitted.values = fitted.values,
+ percentiles = percentiles)
+
+ if(!plot.it)
+ return(answer)
+
+
+
+ lp = length(percentiles) # Does not include mpv
+ tcol.arg = rep(tcol.arg, length=lp+mpv)
+ lcol.arg = rep(lcol.arg, length=lp+mpv)
+ llwd.arg = rep(llwd.arg, length=lp+mpv)
+ llty.arg = rep(llty.arg, length=lp+mpv)
+
+ X <- model.matrixvlm(object, type="lm")
+ if(is.matrix(X) && length(object at y) && ncol(X)==2 &&
+ dimnames(X)[[2]][1]=="(Intercept)")
+ {
+ xx <- X[,2]
+ if(!length(xlab))
+ xlab <- if(object at misc$nonparametric &&
+ length(object at s.xargument))
+ object at s.xargument else names(object at assign)[2]
+
+ if(!add.arg)
+ matplot(x=xx, y=cbind(object at y, fitted.values), main=main,
+ xlab=xlab, ylab=ylab, type="n", ...)
+
+ if(y.arg) {
+ matpoints(x=xx, y=object at y, pch=pch, col=pcol.arg)
+ }
+ } else {
+ warning("there is not a single covariate.")
+ return(answer)
+ }
+
+ for(i in 1:(lp+mpv))
+ {
+ temp <- cbind(xx, fitted.values[,i])
+ temp <- temp[sort.list(temp[,1]),]
+ index <- !duplicated(temp[,1])
+ if(spline.fit) {
+ lines(spline(temp[index,1], temp[index,2]),
+ lty=llty.arg[i], col=lcol.arg[i], lwd=llwd.arg[i])
+ } else {
+ lines(temp[index,1], temp[index,2],
+ lty=llty.arg[i], col=lcol.arg[i], lwd=llwd.arg[i])
+ }
+ if(label) {
+ mylabel = (dimnames(answer$fitted)[[2]])[i]
+ text(par()$usr[2], temp[nrow(temp),2],
+ mylabel, adj=tadj, col=tcol.arg[i], err=-1)
+ }
+ }
+
+ invisible(answer)
+}
+
+
+
+
+
+deplot.lms.bcn <- function(object,
+ newdata,
+ y.arg,
+ eta0)
+{
+ if(!any(object at family@vfamily == "lms.bcn"))
+ warning("I think you've called the wrong function")
+
+ Zvec <- ((y.arg/eta0[,2])^(eta0[,1]) -1) / (eta0[,1] * eta0[,3])
+ dZ.dy <- ((y.arg/eta0[,2])^(eta0[,1]-1)) / (eta0[,2] * eta0[,3])
+ yvec <- dnorm(Zvec) * abs(dZ.dy)
+
+ list(newdata=newdata, y=y.arg, density=yvec)
+}
+
+
+
+deplot.lms.bcg <- function(object,
+ newdata,
+ y.arg,
+ eta0)
+{
+ if(!any(object at family@vfamily == "lms.bcg"))
+ warning("I think you've called the wrong function")
+
+ Zvec <- (y.arg/eta0[,2])^(eta0[,1]) # different from lms.bcn
+ dZ.dy <- ((y.arg/eta0[,2])^(eta0[,1]-1)) * eta0[,1] / eta0[,2]
+ lambda <- eta0[,1]
+ sigma <- eta0[,3]
+ shape <- 1 / (lambda * sigma)^2
+ yvec <- dgamma(Zvec, shape=shape, rate=shape) * abs(dZ.dy)
+
+ list(newdata=newdata, y=y.arg, density=yvec)
+}
+
+
+
+deplot.lms.yjn <- function(object,
+ newdata,
+ y.arg,
+ eta0)
+{
+
+ if(!any(object at family@vfamily == "lms.yjn"))
+ warning("I think you've called the wrong function")
+
+ lambda <- eta0[,1]
+ Zvec <- (yeo.johnson(y.arg+object at misc$yoffset, lambda=eta0[,1]) -
+ eta0[,2]) / eta0[,3]
+ dZ.dy <- dyj.dy(y.arg+object at misc$yoffset, lambda=eta0[,1]) / eta0[,3]
+ yvec <- dnorm(Zvec) * abs(dZ.dy)
+
+ list(newdata=newdata, y=y.arg, density=yvec)
+}
+
+
+deplot.default <- function(object, ...) {
+
+ warning("no methods function. Returning the object")
+ invisible(object)
+}
+
+
+
+
+"deplot.vglm" <- function(object, Attach= TRUE, ...) {
+ LL <- length(object at family@vfamily)
+ newcall = paste("deplot.", object at family@vfamily[LL],
+ "(object, ...)", sep="")
+ newcall = parse(text=newcall)[[1]]
+
+ if(Attach) {
+ object at post$deplot = eval(newcall)
+ invisible(object)
+ } else
+ eval(newcall)
+}
+
+
+
+"deplot.lmscreg" <- function(object,
+ newdata=NULL,
+ x0,
+ y.arg, plot.it= TRUE, ...) {
+
+
+ if(!length(newdata)) {
+ newdata <- data.frame(x0=x0)
+ var1name <- attr(terms(object), "term.labels")[1]
+ names(newdata) <- var1name
+
+ ii <- if(object at misc$nonparametric)
+ slot(object, "s.xargument") else NULL
+ if(length(ii) && any(logic.vec <-
+ names(slot(object, "s.xargument"))==var1name))
+ names(newdata) <- ii[logic.vec] # should be the first one
+ }
+
+ eta0 = if(length(newdata)) predict(object, newdata) else predict(object)
+ eta0 <- eta2theta(eta0, object at misc$link) # lambda, mu, sigma
+
+ newcall = paste("deplot.", object at family@vfamily[1],
+ "(object, newdata, y.arg=y.arg, eta0=eta0)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ answer = eval(newcall)
+
+ if(plot.it)
+ plotdeplot.lmscreg(answer, y.arg=y.arg, ...)
+
+ invisible(answer)
+}
+
+
+
+plotdeplot.lmscreg <- function(answer,
+ y.arg,
+ add.arg= FALSE,
+ xlab="", ylab="density",
+ xlim=NULL, ylim=NULL,
+ llty.arg=par()$lty, col.arg=par()$col,
+ llwd.arg=par()$lwd, ...)
+{
+
+ yvec <- answer$density
+ xx <- y.arg
+
+ if(!add.arg) {
+ if(!is.numeric(xlim))
+ xlim <- c(min(xx), max(xx))
+ if(!is.numeric(ylim))
+ ylim <- c(min(yvec), max(yvec))
+ matplot(x=xx, y=yvec,
+ xlab=xlab, ylab=ylab, type="n",
+ xlim=xlim, ylim=ylim, ...)
+ }
+
+ temp <- cbind(xx, yvec)
+ temp <- temp[sort.list(temp[,1]),]
+ index <- !duplicated(temp[,1])
+ lines(temp[index,1], temp[index,2],
+ lty=llty.arg, col=col.arg, err=-1, lwd=llwd.arg)
+
+ invisible(answer)
+}
+
+
+
+
+if(TRUE) {
+
+ if(!isGeneric("deplot"))
+ setGeneric("deplot", function(object, ...) standardGeneric("deplot"))
+
+ setMethod("deplot", signature(object="vglm"),
+ function(object, ...)
+ invisible(deplot.vglm(object, ...)))
+ setMethod("deplot", signature(object="vgam"),
+ function(object, ...)
+ invisible(deplot.vglm(object, ...)))
+}
+
+
+
+
+if(TRUE) {
+
+ if(!isGeneric("cdf"))
+ setGeneric("cdf", function(object, ...) standardGeneric("cdf"))
+
+ setMethod("cdf", signature(object="vglm"),
+ function(object, ...)
+ cdf.vglm(object, ...))
+
+ setMethod("cdf", signature(object="vgam"),
+ function(object, ...)
+ cdf.vglm(object, ...))
+}
+
+
+"cdf.vglm" <- function(object, newdata=NULL, Attach= FALSE, ...) {
+ LL <- length(object at family@vfamily)
+ newcall = paste("cdf.", object at family@vfamily[LL],
+ "(object, newdata, ...)", sep="")
+ newcall = parse(text=newcall)[[1]]
+
+ if(Attach) {
+ object at post$cdf = eval(newcall)
+ object
+ } else
+ eval(newcall)
+}
+
+
+
+"cdf.lmscreg" <- function(object,
+ newdata=NULL, ...) {
+
+
+
+ if(!length(newdata))
+ return(object at post$cdf)
+
+ eta0 = if(length(newdata)) predict(object, newdata) else predict(object)
+ eta0 <- eta2theta(eta0, link=object at misc$link) # lambda, mu, sigma
+
+ y = vgety(object, newdata) # Includes yoffset
+
+ newcall = paste("cdf.", object at family@vfamily[1],
+ "(y, eta0, ... )", sep="")
+ newcall = parse(text=newcall)[[1]]
+ eval(newcall)
+}
+
+
+
+cdf.lms.bcn <- function(y, eta0)
+{
+ Zvec <- ((y/eta0[,2])^(eta0[,1]) -1) / (eta0[,1] * eta0[,3])
+ Zvec[abs(eta0[,3]) < 1e-5] = log(y/eta0[,2]) / eta0[,3] # Singularity at 0
+ ans = c(pnorm(Zvec))
+ names(ans) = dimnames(eta0)[[1]]
+ ans
+}
+
+
+cdf.lms.bcg <- function(y, eta0)
+{
+ shape = 1 / (eta0[,1] * eta0[,3])^2
+ Gvec = shape * (y/eta0[,2])^(eta0[,1])
+ ans = c(pgamma(Gvec, sh=shape))
+ ans[eta0[,1] < 0] = 1-ans
+ names(ans) = dimnames(eta0)[[1]]
+ ans
+}
+
+
+cdf.lms.yjn <- function(y, eta0)
+{
+
+
+ Zvec = (yeo.johnson(y, eta0[,1]) - eta0[,2])/eta0[,3]
+ ans = c(pnorm(Zvec))
+ names(ans) = dimnames(eta0)[[1]]
+ ans
+}
+
+
+vgety = function(object, newdata=NULL) {
+
+ y = if(length(newdata)) {
+ yname = dimnames(attr(terms(object at terms),"factors"))[[1]][1]
+ newdata[[yname]]
+ } else {
+ object at y
+ }
+ if(length(object at misc$yoffset))
+ y = y + object at misc$yoffset
+ y
+}
+
+
+
+
+
+
+"rlplot.vglm" <- function(object, Attach= TRUE, ...) {
+
+ LL <- length(object at family@vfamily)
+ newcall = paste("rlplot.", object at family@vfamily[LL],
+ "(object, ...)", sep="")
+ newcall = parse(text=newcall)[[1]]
+
+ if(Attach) {
+ object at post$rlplot = eval(newcall)
+ invisible(object)
+ } else
+ eval(newcall)
+}
+
+
+
+
+
+"rlplot.vextremes" <- function(object, ...) {
+
+
+ newcall = paste("rlplot.", object at family@vfamily[1],
+ "(object=object, ... )", sep="")
+ newcall = parse(text=newcall)[[1]]
+ eval(newcall)
+}
+
+
+rlplot.egev <-
+rlplot.gev <-
+ function(object, plot.it=TRUE,
+ probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
+ add.arg=FALSE,
+ xlab="Return Period",ylab="Return Level", main="Return Level Plot",
+ pch=par()$pch, pcol.arg=par()$col, pcex=par()$cex,
+ llty.arg=par()$lty, lcol.arg=par()$col, llwd.arg=par()$lwd,
+ slty.arg=par()$lty, scol.arg=par()$col, slwd.arg=par()$lwd,
+ ylim=NULL,
+ Log = TRUE,
+ CI = TRUE,
+ epsilon = 1.0e-05,
+ ...)
+{
+ if(!is.Numeric(epsilon, allow=1) || abs(epsilon) > 0.10)
+ stop("bad input for \"epsilon\"")
+ if(!is.Numeric(probability, posit=TRUE) || max(probability) >= 1 ||
+ length(probability) < 5)
+ stop("bad input for \"probability\"")
+ if(!is.logical(Log) || length(Log) != 1)
+ stop("bad input for argument \"Log\"")
+ if(!is.logical(CI) || length(CI) != 1)
+ stop("bad input for argument \"CI\"")
+ if(!object at misc$intercept.only)
+ stop("object must be an intercept-only fit, i.e., y ~ 1 is the response")
+
+ extra2 = object at extra
+ extra2$percentiles = 100 * probability # Overwrite
+ zp = object at family@inverse(eta=predict(object)[1:2,], extra=extra2)[1,]
+ yp = -log(probability)
+ ydata = sort(object at y[,1])
+ n = object at misc$n
+ if(Log) {
+ if(!add.arg)
+ plot(log(1/yp), zp, log="", type="n",
+ ylim=if(length(ylim)) ylim else
+ c(min(c(ydata, zp)), max(c(ydata, zp))),
+ xlab=xlab, ylab=ylab, main=main, ...)
+ points(log(-1/log((1:n)/(n+1))), ydata, col=pcol.arg, pch=pch, cex=pcex)
+ lines(log(1/yp), zp,
+ lwd=llwd.arg, col=lcol.arg, lty=llty.arg)
+ } else {
+ if(!add.arg)
+ plot(1/yp, zp, log="x", type="n",
+ ylim=if(length(ylim)) ylim else
+ c(min(c(ydata, zp)), max(c(ydata, zp))),
+ xlab=xlab, ylab=ylab, main=main, ...)
+ points(-1/log((1:n)/(n+1)), ydata, col=pcol.arg, pch=pch, cex=pcex)
+ lines(1/yp, zp, lwd=llwd.arg, col=lcol.arg, lty=llty.arg)
+ }
+
+ if(CI) {
+ zpp = cbind(zp, zp, zp) # lp x 3
+ eta = predict(object)
+ Links = object at misc$link
+ earg = object at misc$earg
+ M = object at misc$M
+ for(ii in 1:M) {
+ TTheta = eta[,ii]
+ use.earg = earg[[ii]]
+ newcall = paste(Links[ii],
+ "(theta=TTheta, earg=use.earg, inverse=TRUE)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ uteta = eval(newcall) # Theta, the untransformed parameter
+ uteta = uteta + epsilon # perturb it
+ newcall = paste(Links[ii],
+ "(theta=uteta, earg=use.earg)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ teta = eval(newcall) # The transformed parameter
+ peta = eta
+ peta[,ii] = teta
+ zpp[,ii] = object at family@inverse(eta=peta, extra=extra2)[1,]
+ zpp[,ii] = (zpp[,ii] - zp) / epsilon # On the transformed scale
+ }
+ VCOV = vcov(object, untransform=TRUE)
+ v = numeric(nrow(zpp))
+ for(ii in 1:nrow(zpp))
+ v[ii] = t(as.matrix(zpp[ii,])) %*% VCOV %*% as.matrix(zpp[ii,])
+ if(Log) {
+ lines(log(1/yp), zp - 1.96 * sqrt(v),
+ lwd=slwd.arg, col=scol.arg, lty=slty.arg)
+ lines(log(1/yp), zp + 1.96 * sqrt(v),
+ lwd=slwd.arg, col=scol.arg, lty=slty.arg)
+ } else {
+ lines(1/yp, zp - 1.96 * sqrt(v),
+ lwd=slwd.arg, col=scol.arg, lty=slty.arg)
+ lines(1/yp, zp + 1.96 * sqrt(v),
+ lwd=slwd.arg, col=scol.arg, lty=slty.arg)
+ }
+ }
+ answer = list(yp = yp,
+ zp = zp)
+ if(CI) {
+ answer$lower = zp - 1.96 * sqrt(v)
+ answer$upper = zp + 1.96 * sqrt(v)
+ }
+ invisible(answer)
+}
+
+if(!isGeneric("rlplot"))
+ setGeneric("rlplot", function(object, ...) standardGeneric("rlplot"))
+
+setMethod("rlplot", "vglm", function(object, ...)
+ rlplot.vglm(object, ...))
+
+
+
+
+
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
new file mode 100644
index 0000000..a14e3a3
--- /dev/null
+++ b/R/residuals.vlm.q
@@ -0,0 +1,212 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+residualsvlm <- function(object,
+ type = c("response", "deviance", "pearson", "working"))
+{
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("response", "deviance", "pearson", "working"))[1]
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if(is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer =
+ switch(type,
+ working = if(pooled.weight) NULL else object at residuals,
+ pearson = {
+ if(pooled.weight) return(NULL)
+ n <- object at misc$n
+ M <- object at misc$M
+ wz <- weights(object, type="w") # $weights
+ if(!length(wz))
+ wz <- if(M==1) rep(1, n) else matrix(1, n, M)
+
+ if(M==1) {
+ if(any(wz < 0))
+ warning(paste("some weights are negative.",
+ "Their residual will be assigned NA"))
+ ans <- sqrt(c(wz)) * c(object at residuals)
+ names(ans) <- names(object at residuals)
+ ans
+ } else {
+ wz.sqrt <- matrix.power(wz, M=M, power=0.5, fast=TRUE)
+ ans <- mux22(wz.sqrt, object at residuals, M=M, upper=FALSE)
+ dim(ans) <- c(M, n)
+ ans <- t(ans)
+ dimnames(ans) <- dimnames(object at residuals) # n x M
+ ans
+ }
+ },
+ deviance = {
+ M <- object at misc$M
+ if(M > 1)
+ return(NULL)
+ ans <- residualsvlm(object, type = "pearson")
+ ans
+ },
+ response = object at residuals
+ )
+
+ if(length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
+}
+
+
+
+residualsvglm <- function(object,
+ type = c("working", "pearson", "response", "deviance", "ldot"),
+ matrix.arg=TRUE)
+{
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type,
+ c("working", "pearson", "response", "deviance", "ldot"))[1]
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if(is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer =
+ switch(type,
+ working = if(pooled.weight) NULL else object at residuals,
+ pearson = {
+ if(pooled.weight) return(NULL)
+
+ n <- object at misc$n
+ M <- object at misc$M
+ wz <- weights(object, type="w") # $weights
+
+ if(M==1) {
+ if(any(wz < 0))
+ warning(paste("some weights are negative.",
+ "Their residual will be assigned NA"))
+ ans <- sqrt(c(wz)) * c(object at residuals)
+ names(ans) <- names(object at residuals)
+ ans
+ } else {
+ wz.sqrt <- matrix.power(wz, M=M, power=0.5, fast=TRUE)
+ ans <- mux22(wz.sqrt, object at residuals, M=M, upper=FALSE)
+ dim(ans) <- c(M,n)
+ ans <- t(ans)
+ dimnames(ans) <- dimnames(object at residuals) # n x M
+ ans
+ }
+ },
+ deviance = {
+ n <- object at misc$n
+
+ y <- as.matrix(object at y) # zz as.matrix
+ mu <- object at fitted.values
+
+
+ w <- object at prior.weights
+ if(!length(w))
+ w <- rep(1, n)
+ eta <- object at predictors
+
+ dev.fn <- object at family@deviance # May not 'exist' for that model
+ if(( is.R() && length(body(dev.fn)) > 0) ||
+ (!is.R() && length(args(dev.fn)) > 1)) {
+ extra <- object at extra
+ ans <- dev.fn(mu=mu,y=y,w=w,residuals=TRUE,eta=eta,extra)
+ if(length(ans)) {
+ lob <- labels(object at residuals)
+ if(is.list(lob)) {
+ if(is.matrix(ans)) dimnames(ans) <- lob else
+ names(ans) <- lob[[1]]
+ } else
+ names(ans) <- lob
+ }
+ ans
+ } else NULL
+ },
+ ldot = {
+ n <- object at misc$n
+ y <- as.matrix(object at y) # zz as.matrix
+ mu <- object at fitted
+ w <- object at prior.weights
+ if(is.null(w))
+ w <- rep(1, n)
+ eta <- object at predictors
+ if(!is.null(ll.fn <- object at family@loglikelihood)) {
+ extra <- object at extra
+ ans <- ll.fn(mu=mu,y=y,w=w,residuals=TRUE,eta=eta,extra)
+ if(!is.null(ans)) {
+ ans <- c(ans) # ldot residuals can only be a vector
+ names(ans) <- labels(object at residuals)
+ }
+ ans
+ } else NULL
+ },
+ response = {
+ y <- object at y
+
+ mu <- fitted(object) # zz object at fitted
+
+ true.mu <- object at misc$true.mu
+ if(is.null(true.mu))
+ true.mu <- TRUE
+
+ ans <- if(true.mu) y - mu else NULL
+
+
+ if(!matrix.arg && length(ans)) {
+ if(ncol(ans) == 1) {
+ names.ans = dimnames(ans)[[1]]
+ ans = c(ans)
+ names(ans) = names.ans
+ ans
+ } else {
+ warning("ncol(ans) is not 1")
+ ans
+ }
+ } else ans
+ })
+
+ if(length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
+}
+
+ setMethod("residuals", "vlm",
+ function(object, ...)
+ residualsvlm(object, ...))
+ setMethod("residuals", "vglm",
+ function(object, ...)
+ residualsvglm(object, ...))
+ setMethod("residuals", "vgam",
+ function(object, ...)
+ residualsvglm(object, ...))
+
+ setMethod("resid", "vlm",
+ function(object, ...)
+ residualsvlm(object, ...))
+ setMethod("resid", "vglm",
+ function(object, ...)
+ residualsvglm(object, ...))
+ setMethod("resid", "vgam",
+ function(object, ...)
+ residualsvglm(object, ...))
+
+
+
+
+
diff --git a/R/rrvglm.R b/R/rrvglm.R
new file mode 100644
index 0000000..a1b3297
--- /dev/null
+++ b/R/rrvglm.R
@@ -0,0 +1,195 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+rrvglm <- function(formula,
+ family, data=list(),
+ weights=NULL, subset=NULL, na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=rrvglm.control(...),
+ offset=NULL,
+ method="rrvglm.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=NULL,
+ qr.arg=FALSE, smart=TRUE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "rrvglm"
+
+ ocall <- match.call()
+
+ if(smart)
+ setup.smart("write")
+
+ mt <- terms(formula, data = data)
+ if(missing(data))
+ data <- environment(formula)
+
+ mf <- match.call(expand=FALSE)
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+ mf$coefstart <- mf$etastart <- mf$... <- NULL
+ mf$smart <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if(method == "model.frame")
+ return(mf)
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+ if(is.character(family))
+ family <- get(family)
+ if(is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ eval(vcontrol.expression)
+
+ if(!is.null(family at first))
+ eval(family at first)
+
+ # 10/12/04: testing for an empty (function) slot not elegant:
+ if(control$Quadratic && control$FastAlgorithm &&
+ length(as.list(family at deviance)) <= 1)
+ stop(paste("The fast algorithm requires the family",
+ "function to have a deviance slot"))
+
+
+ rrvglm.fitter <- get(method)
+
+ fit <- rrvglm.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family,
+ control=control,
+ constraints=constraints,
+ criterion=control$criterion,
+ extra=extra,
+ qr.arg = qr.arg,
+ Terms=mt, function.name=function.name, ...)
+
+ if(control$Bestof > 1) {
+ deviance.Bestof = rep(fit$crit.list$deviance, len= control$Bestof)
+ for(tries in 2:control$Bestof) {
+ if(control$trace && (control$Bestof>1))
+ cat(paste("\n========================= Fitting model", tries,
+ "=========================\n\n"))
+ it <- rrvglm.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family,
+ control=control,
+ constraints=constraints,
+ criterion=control$criterion,
+ extra=extra,
+ qr.arg = qr.arg,
+ Terms=mt, function.name=function.name, ...)
+ deviance.Bestof[tries] = it$crit.list$deviance
+ if(min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+ fit = it
+ }
+ fit$misc$deviance.Bestof = deviance.Bestof
+ }
+
+ fit$misc$dataname <- dataname
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ }
+
+ answer <-
+ new(if(control$Quadratic) "qrrvglm" else "rrvglm",
+ "assign" = attr(x, "assign"),
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "criterion" = fit$crit.list,
+ "df.residual" = fit$df.residual,
+ "df.total" = fit$df.total,
+ "dispersion" = 1,
+ "effects" = fit$effects,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "R" = fit$R,
+ "rank" = fit$rank,
+ "residuals" = as.matrix(fit$residuals),
+ "rss" = fit$rss,
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=mt))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ if(qr.arg) {
+ class(fit$qr) = "list"
+ slot(answer, "qr") = fit$qr
+ }
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ if(length(fit$fitted.values))
+ slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+ if(x.arg)
+ slot(answer, "x") = fit$x # The 'small' design matrix
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+
+
+ slot(answer, "control") = fit$control
+ slot(answer, "extra") = if(length(fit$extra)) {
+ if(is.list(fit$extra)) fit$extra else {
+ warning("\"extra\" is not a list, therefore placing \"extra\" into a list")
+ list(fit$extra)
+ }
+ } else list() # R-1.5.0
+
+ slot(answer, "iter") = fit$iter
+ fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") = fit$predictors
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+
+
+
+
+ answer
+}
+attr(rrvglm, "smart") <- TRUE
+
+
+
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
new file mode 100644
index 0000000..aa7441d
--- /dev/null
+++ b/R/rrvglm.control.q
@@ -0,0 +1,147 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+rrvglm.control = function(Rank=1,
+ Algorithm=c("alternating", "derivative"),
+ Corner=TRUE,
+ Uncor=FALSE,
+ Wmat=NULL,
+ Svd.arg=FALSE,
+ Index.corner=if(length(Structural.zero))
+ ((1:1000)[-Structural.zero])[1:Rank] else 1:Rank,
+ Alpha=0.5,
+ Bestof = 1,
+ Cinit=NULL,
+ Etamat.colmax = 10,
+ SD.Cinit = 0.02,
+ Structural.zero = NULL,
+ Norrr = ~ 1,
+ trace = FALSE,
+ Use.Init.Poisson.QO=FALSE,
+ checkwz=TRUE,
+ wzepsilon = .Machine$double.eps^0.75,
+ ...)
+{
+
+
+
+
+ if(mode(Algorithm) != "character" && mode(Algorithm) != "name")
+ Algorithm <- as.character(substitute(Algorithm))
+ Algorithm <- match.arg(Algorithm, c("alternating", "derivative"))[1]
+
+ if(Svd.arg) Corner = FALSE
+
+ if(!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"Rank\"")
+ if(!is.Numeric(Alpha, posit=TRUE, allow=1) || Alpha > 1)
+ stop("bad input for \"Alpha\"")
+ if(!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
+ stop("bad input for \"Bestof\"")
+ if(!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
+ stop("bad input for \"SD.Cinit\"")
+ if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ stop("bad input for \"Etamat.colmax\"")
+
+ if(length(Structural.zero) && (any(round(Structural.zero) != Structural.zero)
+ || any(Structural.zero<1)))
+ stop("bad input for the argument \"Structural.zero\"")
+
+
+ Quadratic = FALSE
+ if(!Quadratic && Algorithm == "derivative" && !Corner) {
+ dd = "derivative algorithm only supports corner constraints"
+ if(length(Wmat) || Uncor || Svd.arg)
+ stop(dd)
+ warning(dd)
+ Corner = TRUE
+ }
+ if(Quadratic && Algorithm != "derivative")
+ stop("Quadratic model can only be fitted using the derivative algorithm")
+
+ if(Corner && (Svd.arg || Uncor || length(Wmat)))
+ stop("can't have Corner=TRUE and either Svd=TRUE or Uncor=TRUE or Wmat")
+
+ if(Corner && length(intersect(Structural.zero, Index.corner)))
+ stop("can't have Structural.zero and Index.corner having common values")
+
+ if(length(Index.corner) != Rank)
+ stop("length(Index.corner) != Rank")
+
+ if(!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for \"checkwz\"")
+ if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ stop("bad input for \"wzepsilon\"")
+
+ ans =
+ c(vglm.control(trace = trace, ...),
+ switch(Algorithm,
+ "alternating" = valt.control(...),
+ "derivative" = if(is.R()) rrvglm.optim.control(...) else
+ nlminbcontrol(...)),
+ list(Rank=Rank,
+ Algorithm=Algorithm,
+ Alpha=Alpha,
+ Bestof = Bestof,
+ Cinit=Cinit,
+ Index.corner=Index.corner,
+ Norrr=Norrr,
+ Corner=Corner, Uncor=Uncor, Wmat=Wmat,
+ OptimizeWrtC = TRUE, # OptimizeWrtC,
+ Quadratic = FALSE, # A constant now, here.
+ SD.Cinit = SD.Cinit,
+ Etamat.colmax = Etamat.colmax,
+ Structural.zero = Structural.zero,
+ Svd.arg=Svd.arg,
+ Use.Init.Poisson.QO=Use.Init.Poisson.QO),
+ checkwz=checkwz,
+ wzepsilon = wzepsilon,
+ if(Quadratic) qrrvglm.control(Rank=Rank, ...) else NULL)
+
+ if(Quadratic && ans$ITolerances) {
+ ans$Svd.arg = FALSE
+ ans$Uncor = FALSE
+ ans$Corner = FALSE
+ }
+
+ ans$half.stepsizing = FALSE # Turn it off
+ ans
+}
+
+
+setClass("summary.rrvglm",
+ representation("rrvglm",
+ coef3="matrix",
+ cov.unscaled="matrix",
+ correlation="matrix",
+ df="numeric",
+ pearson.resid="matrix",
+ sigma="numeric"))
+
+setMethod("summary", "rrvglm",
+ function(object, ...)
+ summary.rrvglm(object, ...))
+
+
+printsummary.rrvglm <- function(x, digits=NULL, quote= TRUE, prefix="")
+{
+
+
+ printsummary.vglm(x, digits = NULL, quote = TRUE, prefix = "")
+
+
+ invisible(x)
+}
+
+
+setMethod("print", "summary.rrvglm",
+ function(x, ...)
+ printsummary.rrvglm(x=x, ...))
+
+ setMethod("show", "summary.rrvglm",
+ function(object)
+ printsummary.rrvglm(x=object))
+
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
new file mode 100644
index 0000000..e57f45a
--- /dev/null
+++ b/R/rrvglm.fit.q
@@ -0,0 +1,631 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ offset=0, family,
+ control=rrvglm.control(...),
+ criterion="coefficients",
+ qr.arg=FALSE,
+ constraints=NULL,
+ extra=NULL,
+ Terms=Terms, function.name="rrvglm", ...)
+{
+ post = list()
+ check.rank = TRUE # !control$Quadratic
+ nonparametric <- FALSE
+ epsilon <- control$epsilon
+ maxit <- control$maxit
+ backchat <- control$backchat && !control$Quadratic # rrr;
+ save.weight <- control$save.weight
+ trace <- control$trace
+ orig.stepsize <- control$stepsize
+ minimize.criterion <- control$min.criterion
+
+
+ n <- dim(x)[1]
+
+ new.s.call <- expression({
+ if(c.list$one.more)
+ {
+ fv <- if(backchat) {
+ if(M>1) matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
+ } else c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if(length(family at middle))
+ eval(family at middle)
+
+ eta <- fv + offset
+
+ mu <- family at inverse(eta, extra)
+
+ if(length(family at middle2))
+ eval(family at middle2)
+
+ old.crit <- new.crit
+ new.crit <-
+ switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+
+
+
+ if(trace && orig.stepsize==1) {
+ cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+ " linear loop ", iter, ": ", criterion, "= ")
+ uuuu =
+ switch(criterion,
+ coefficients= if(is.R())
+ format(new.crit, dig=round(2-log10(epsilon))) else
+ format(round(new.crit, round(2-log10(epsilon)))),
+ format(round(new.crit, 4)))
+ switch(criterion,
+ coefficients={if(length(new.crit) > 2) cat("\n");
+ cat(uuuu, fill=TRUE, sep=", ")},
+ cat(uuuu, fill=TRUE, sep=", "))
+ }
+
+ {
+ take.half.step <- (control$half.stepsizing && length(old.coeffs)) &&
+ !control$Quadratic &&
+ ((orig.stepsize!=1) ||
+ (criterion!="coefficients" &&
+ (if(minimize.criterion) new.crit > old.crit else
+ new.crit < old.crit)))
+ if(take.half.step) {
+ stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+ new.coeffs.save <- new.coeffs
+ if(trace)
+ cat("Taking a modified step")
+ repeat {
+ if(trace) {
+ cat(".")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ stepsize <- stepsize / 2
+ if(too.small <- stepsize < 0.001)
+ break
+ new.coeffs <- (1-stepsize)*old.coeffs +
+ stepsize*new.coeffs.save
+
+ if(length(family at middle))
+ eval(family at middle)
+
+ fv <- xbig.save %*% new.coeffs
+ if(M > 1)
+ fv <- matrix(fv, n, M, byrow=TRUE)
+
+ eta <- fv + offset
+
+ mu <- family at inverse(eta, extra)
+
+ if(length(family at middle2))
+ eval(family at middle2)
+
+
+ new.crit <-
+ switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu,y=y,w=w,res=FALSE,eta=eta,extra))
+
+ if((criterion=="coefficients") ||
+ ( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))
+ break
+ }
+
+ if(trace)
+ cat("\n")
+ if(too.small) {
+ warning(paste("iterations terminated because",
+ "half-step sizes are very small"))
+ one.more <- FALSE
+ } else {
+ if(trace) {
+ cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+ " linear loop ", iter, ": ", criterion, "= ")
+ uuuu =
+ switch(criterion,
+ coefficients=if(is.R())
+ format(new.crit, dig=round(2-log10(epsilon))) else
+ format(round(new.crit, round(2-log10(epsilon)))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients={if(length(new.crit) > 2) cat("\n");
+ cat(uuuu, fill=TRUE, sep=", ")},
+ cat(uuuu, fill=TRUE, sep=", "))
+ }
+
+ one.more <- eval(control$convergence)
+ }
+ } else {
+ one.more <- eval(control$convergence)
+ }
+ }
+ if(exists("flush.console"))
+ flush.console()
+
+ if(one.more) {
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+
+ wz = matrix(wz, nrow=n) # zz 3/10/05
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z = eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
+
+ rrr.expression = paste("rrr", control$Algorithm,
+ "expression", sep=".")
+ rrr.expression = get(rrr.expression)
+ eval(rrr.expression)
+
+ c.list$z <- z # contains \bI_{Rank} \bnu
+ c.list$U <- U
+ if(copyxbig) c.list$xbig <- xbig.save
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ old.coeffs <- new.coeffs
+ }
+ c.list
+ }) # end of new.s.call
+
+
+
+
+
+ copyxbig <- FALSE # May be overwritten in @initialize
+ stepsize <- orig.stepsize
+ old.coeffs <- coefstart
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+
+ n.save <- n
+
+
+
+ Rank <- control$Rank
+ rrcontrol <- control #
+
+ if(length(family at initialize))
+ eval(family at initialize) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+
+ eval(rrr.init.expression)
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else family at inverse(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- family at link(mu, extra)
+ }
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+ if(is.character(rrcontrol$Dzero)) {
+ index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+ if(any(is.na(index)))
+ stop("Dzero argument didn't fully match y-names")
+ if(length(index) == M)
+ stop(paste("all linear predictors are linear in the",
+ "latent variable(s); so set Quadratic=FALSE"))
+ rrcontrol$Dzero = control$Dzero = index
+ }
+
+
+
+
+
+ if(length(family at constraints))
+ eval(family at constraints)
+
+
+ special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
+ findex = trivial.constraints(just.testing, special.matrix)
+ tc1 = trivial.constraints(constraints)
+
+ if(!control$Quadratic && sum(!tc1)) {
+ for(ii in names(tc1))
+ if(!tc1[ii] && !any(ii == names(findex)[findex==1]))
+ warning(paste("\"", ii, "\"", " is a non-trivial constraint",
+ " that will be overwritten by reduced-rank regression", sep=""))
+ }
+
+ if(all(findex))
+ stop("use vglm(), not rrvglm()!")
+ colx1.index = names.colx1.index = NULL
+ dx2 = dimnames(x)[[2]]
+ if(sum(findex)) {
+ asx = attr(x, "assign")
+ for(ii in names(findex))
+ if(findex[ii]) {
+ names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index = c(colx1.index, asx[[ii]])
+ }
+ names(colx1.index) = names.colx1.index
+ }
+ rrcontrol$colx1.index=control$colx1.index=colx1.index #Save it on the object
+ colx2.index = 1:ncol(x)
+ names(colx2.index) = dx2
+ colx2.index = colx2.index[-colx1.index]
+ p1 = length(colx1.index); p2 = length(colx2.index)
+ rrcontrol$colx2.index=control$colx2.index=colx2.index #Save it on the object
+ Index.corner = control$Index.corner
+
+
+
+
+ Amat <- if(length(rrcontrol$Ainit)) rrcontrol$Ainit else
+ matrix(rnorm(M * Rank, sd=rrcontrol$SD.Cinit), M, Rank)
+ Cmat <- if(length(rrcontrol$Cinit)) rrcontrol$Cinit else {
+ if(!rrcontrol$Use.Init.Poisson.QO) {
+ matrix(rnorm(p2 * Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
+ } else
+ .Init.Poisson.QO(ymat=as.matrix(y),
+ X1=x[,colx1.index,drop=FALSE],
+ X2=x[,colx2.index,drop=FALSE],
+ Rank=rrcontrol$Rank, trace=rrcontrol$trace,
+ max.ncol.etamat = rrcontrol$Etamat.colmax,
+ Crow1positive=rrcontrol$Crow1positive,
+ isdlv=rrcontrol$isdlv)
+ }
+
+ if(modelno==3)
+ Amat[c(FALSE,TRUE),] <- 0 # Intercept only for log(k)
+
+
+ if(control$Corner)
+ Amat[control$Index.corner,] = diag(Rank)
+ if(length(control$Structural.zero))
+ Amat[control$Structural.zero,] = 0
+
+ rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
+ rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+
+ Blist <- process.constraints(constraints, x, M)
+
+ nice31 = control$Quadratic && (!control$EqualTol || control$ITolerances) &&
+ all(trivial.constraints(Blist))
+
+ Blist = Blist.save = replace.constraints(Blist, Amat, colx2.index)
+
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
+
+
+ xbig.save <- if(control$Quadratic) {
+ tmp500 = lm2qrrvlm.model.matrix(x=x, Blist=Blist,
+ C=Cmat, control=control)
+ xsmall.qrr = tmp500$new.lv.model.matrix
+ B.list = tmp500$constraints # Doesn't change or contain \bI_{Rank} \bnu
+ if(modelno==3 && FALSE) {
+ B.list[[1]] = (B.list[[1]])[,c(TRUE,FALSE),drop=FALSE] # Amat
+ B.list[[2]] = (B.list[[2]])[,c(TRUE,FALSE),drop=FALSE] # D
+ }
+
+ lv.mat = tmp500$lv.mat
+ if(length(tmp500$offset)) {
+ offset = tmp500$offset
+ }
+ lm2vlm.model.matrix(xsmall.qrr, B.list, xij=control$xij)
+ } else {
+ lv.mat = x[,colx2.index,drop=FALSE] %*% Cmat
+ lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ }
+
+
+
+
+ if(length(coefstart)) {
+ eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
+ offset else xbig.save * coefstart + offset
+ eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
+
+
+ mu <- family at inverse(eta, extra)
+ }
+
+ if(criterion != "coefficients") {
+ tfun <- slot(family, criterion) # family[[criterion]]
+ }
+
+ iter <- 1
+ new.crit <- switch(criterion,
+ coefficients=1,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+ old.crit <- if(minimize.criterion) 10*new.crit+10 else -10*new.crit-10
+
+
+ deriv.mu <- eval(family at deriv)
+
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more=TRUE,
+ coeff=as.double(rep(1,ncol(xbig.save))), U=as.double(U),
+ copyxbig=copyxbig,
+ xbig=if(copyxbig) as.double(xbig.save) else double(3))
+
+
+
+ dxbig <- as.integer(dim(xbig.save))
+ n.big <- dxbig[[1]]
+ p.big <- dxbig[[2]]
+
+ if(n.big < p.big)
+ stop(paste(p.big, "parameters but only", n.big, "observations"))
+
+ if(backchat) {
+ nframe <- sys.nframe()
+ dotC(name="v_init_call", as.integer(nframe), new.s.call)
+ }
+
+
+ if(backchat) {
+ if(control$Quadratic)
+ stop("29/11/02; can't handle zedd")
+ index.vglm <- iam(NA, NA, M, both=TRUE)
+ tfit <- dotFortran(name="vglmf", as.double(xbig.save), n.big, p.big,
+ backchat=as.integer(TRUE),
+ as.integer(n), as.double(z), # not zedd
+ coefficients=double(p.big),
+ predictors=double(n.big), effects=double(n.big),
+ qr=as.double(xbig.save), qraux=double(p.big),
+ rank=as.integer(0), pivot=as.integer(seq(p.big)),
+ work=double(max(n.big, 2 * p.big)),
+ wkmm=double(M*M*5 + M*p.big),
+ as.double(U), as.integer(M),
+ dimu=as.integer(if(is.matrix(U)) nrow(U) else 1),
+ dimm=as.integer(if(is.matrix(wz)) ncol(wz) else 1),
+ as.integer(index.vglm$row), as.integer(index.vglm$col),
+ copyxbig=as.integer(copyxbig),
+ rss=double(1))
+ } else {
+ bf.call = expression(vlm.wfit(xbig.save, zedd,
+ Blist=if(control$Quadratic) B.list else Blist,
+ ncolx=ncol(x), U=U,
+ Eta.range = control$Eta.range,
+ matrix.out=if(control$Quadratic) FALSE else TRUE, XBIG=TRUE,
+ qr=qr.arg, xij=control$xij))
+
+ while(c.list$one.more) {
+ if(control$Quadratic) {
+ zedd = as.matrix(z)
+ if(control$Corner)
+ zedd[,Index.corner] = zedd[,Index.corner] - lv.mat
+ } else {
+ zedd = z
+ }
+
+ if(!nice31)
+ tfit <- eval(bf.call) # tfit$fitted.values is n x M
+
+ if(!control$Quadratic) {
+ Cmat = tfit$mat.coef[colx2.index,,drop=FALSE] %*%
+ Amat %*% solve(t(Amat) %*% Amat)
+ rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
+ rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+ }
+
+ if(!nice31) c.list$coeff <- tfit$coefficients
+
+ if(control$Quadratic) {
+ if(control$Corner)
+ tfit$fitted.values[,Index.corner] =
+ tfit$fitted.values[,Index.corner] + lv.mat
+ }
+
+ if(!nice31)
+ tfit$predictors = tfit$fitted.values # Doesn't contain the offset
+ if(!nice31) c.list$fit = tfit$fitted.values
+ c.list <- eval(new.s.call)
+ NULL
+ }
+ }
+
+ if(maxit>1 && iter>=maxit)
+ warning(paste("convergence not obtained in", maxit, "iterations."))
+
+
+ dn.big <- labels(xbig.save)
+ xn.big <- dn.big[[2]]
+ yn.big <- dn.big[[1]]
+
+ if(length(family at fini))
+ eval(family at fini)
+
+ if(M>1 && !nice31)
+ tfit$predictors <- matrix(tfit$predictors, n, M, byrow=backchat)
+
+ asgn <- attr(xbig.save, "assign")
+ if(nice31) {
+ coefs <- rep(0, len=length(xn.big)) # zz
+ rank <- p.big # zz 3/10/05
+ } else {
+ coefs <- tfit$coefficients
+ names(coefs) <- xn.big
+ rank <- tfit$rank
+ }
+
+ cnames <- xn.big
+
+ if(check.rank && rank < p.big)
+ stop("rrvglm only handles full-rank models (currently)")
+
+ if(nice31) {
+ R <- matrix(as.numeric(NA), 5, 5) # zz 3/10/05
+ } else {
+ R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
+ if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
+ tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
+ }
+ R[lower.tri(R)] <- 0
+ attributes(R) <- if(is.R()) list(dim=c(p.big, p.big),
+ dimnames=list(cnames, cnames), rank=rank) else
+ list(dim=c(p.big, p.big),
+ dimnames=list(cnames, cnames), rank=rank, class="upper")
+ }
+
+ if(nice31) {
+ effects <- rep(0, len=77) # zz 3/10/05
+ } else {
+ effects <- tfit$effects
+ neff <- rep("", n.big)
+ neff[seq(p.big)] <- cnames
+ names(effects) <- neff
+
+ dim(tfit$predictors) <- c(n, M)
+ }
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+
+ if(nice31) {
+ residuals <- z - fv # zz - offset ?? # not sure 3/10/05
+ if(M==1) {
+ residuals <- as.vector(residuals)
+ names(residuals) <- yn
+ } else {
+ dimnames(residuals) <- list(yn, predictors.names)
+ }
+ } else {
+ residuals <- z - tfit$predictors # zz - offset ??
+ if(M==1) {
+ tfit$predictors <- as.vector(tfit$predictors)
+ residuals <- as.vector(residuals)
+ names(residuals) <- names(tfit$predictors) <- yn
+ } else {
+ dimnames(residuals) <- dimnames(tfit$predictors) <- list(yn, predictors.names)
+ }
+ }
+
+ if(is.matrix(mu)) {
+ if(length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if(length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
+
+
+
+ df.residual <- n.big - rank - (if(control$Quadratic) Rank*p2 else 0)
+ fit <- list(assign=asgn,
+ coefficients=coefs,
+ constraints=if(control$Quadratic) B.list else Blist,
+ df.residual=df.residual,
+ df.total=n*M,
+ effects=effects,
+ fitted.values=mu,
+ offset=offset,
+ rank=rank,
+ residuals=residuals,
+ R=R,
+ terms=Terms) # terms: This used to be done in vglm()
+
+ if(qr.arg && !nice31) {
+ fit$qr <- if(is.R())
+ fit$qr <- tfit$qr else {
+ if(backchat) tfit[c("qr", "rank", "pivot", "qraux")] else
+ tfit$qr
+ }
+ dimnames(fit$qr$qr) <- dn.big
+ }
+
+ if(M==1) {
+ wz <- as.vector(wz) # Convert wz into a vector
+ } # else
+ fit$weights <- if(save.weight) wz else NULL
+
+
+ misc <- list(
+ colnames.x = xn,
+ colnames.xbig = xn.big,
+ criterion = criterion,
+ function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ nonparametric = nonparametric,
+ n.big = n.big,
+ orig.assign = attr(x, "assign"),
+ p = ncol(x),
+ p.big = p.big,
+ ynames = dimnames(y)[[2]])
+
+ if(one.more)
+ misc$rrr.expression = rrr.expression #
+
+
+ crit.list <- list()
+ if(criterion != "coefficients")
+ crit.list[[criterion]] <- fit[[criterion]] <- new.crit
+
+ for(i in names(.min.criterion.VGAM)) {
+ if(i != criterion &&
+ any(slotNames(family) == i) &&
+ (( is.R() && length(body(slot(family, i)))) ||
+ ((!is.R() && length(slot(family, i)) > 1)))) {
+ fit[[i]] <- crit.list[[i]] <-
+ (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ }
+ }
+
+
+
+
+
+ if(w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if(length(family at last))
+ eval(family at last)
+
+
+ structure(c(fit, list(predictors=if(nice31) matrix(eta,n,M) else tfit$predictors,
+ contrasts=attr(x, "contrasts"),
+ control=control,
+ crit.list=crit.list,
+ extra=extra,
+ family=family,
+ iter=iter,
+ misc=misc,
+ post = post,
+ rss=if(nice31) 000 else tfit$rss,
+ x=x,
+ y=y)),
+ vclass=family at vfamily)
+}
+
+
diff --git a/R/s.q b/R/s.q
new file mode 100644
index 0000000..4d1d2f1
--- /dev/null
+++ b/R/s.q
@@ -0,0 +1,38 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+s <- function(x, df=4, spar=0, ...)
+{
+
+ xs <- substitute(x)
+ ans <- as.character(xs)
+ if(length(ans) > 1)
+ stop("x argument must be of length one")
+
+ call <- deparse(sys.call())
+
+ if(ncol(as.matrix(x)) > 1)
+ stop("x must be a vector")
+ if(!is.null(levels(x))) {
+ x <- if(inherits(x, "ordered")) codes(x) else
+ stop("unordered factors cannot be used as smoothing variables")
+ }
+ attr(x, "spar") <- spar
+ attr(x, "df") <- df
+ attr(x, "call") <- call
+ attr(x, "class") <- "smooth"
+ attr(x, "s.xargument") <- ans # Needed for prediction and constraints
+
+
+ a <- is.na(x)
+ if(any(a))
+ attr(x, "NAs") <- seq(along = x)[a]
+
+ x
+}
+
+
+
diff --git a/R/s.vam.q b/R/s.vam.q
new file mode 100644
index 0000000..cde20d3
--- /dev/null
+++ b/R/s.vam.q
@@ -0,0 +1,245 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+s.vam <- function(x, z, wz, s, which, smooth.frame, bf.maxit=10,
+ bf.epsilon=0.001, trace=FALSE, se.fit=TRUE,
+ xbig.save, Blist, ncolBlist, M, qbig, U,
+ backchat=if(is.R()) FALSE else TRUE,
+ all.knots=FALSE, nk=NULL,
+ sf.only=FALSE)
+{
+ nwhich <- names(which)
+
+
+ dxbig <- as.integer(dim(xbig.save))
+ pbig <- dxbig[2]
+
+
+ if(!length(smooth.frame$first)) {
+ data <- smooth.frame[, nwhich, drop=FALSE]
+ smooth.frame <- vgam.match(data, all.knots=all.knots, nk=nk)
+ smooth.frame$first <- FALSE # No longer first for next time
+
+ dx <- as.integer(dim(x))
+ smooth.frame$n <- dx[1]
+ smooth.frame$p <- dx[2]
+ attr(data, "class") <- NULL
+
+ spar <- lapply(data, attr, "spar")
+ df <- lapply(data, attr, "df")
+ s.xargument <- lapply(data, attr, "s.xargument")
+
+ for(k in 1:length(nwhich)) {
+ i <- nwhich[k]
+
+ temp <- spar[[i]]
+ if(!is.numeric(temp) || any(temp<0))
+ stop("spar cannot be negative or non-numeric")
+ if(length(temp) > ncolBlist[i])
+ warning(paste("only the first", ncolBlist[i],
+ "values of spar used for variable \"",
+ s.xargument, "\""))
+ spar[[i]] <- rep(temp, length=ncolBlist[i]) # recycle
+
+ temp <- df[[i]]
+ if(!is.numeric(temp) || any(temp<1))
+ stop("df is non-numeric or less than 1")
+ if(length(temp) > ncolBlist[i])
+ warning(paste("only the first", ncolBlist[i],
+ "values of df used for variable \"",
+ s.xargument, "\""))
+ df[[i]] <- rep(temp, length=ncolBlist[i]) # recycle
+ if(max(temp) > smooth.frame$nef[k]-1)
+ stop(paste("df value too high for variable \"",
+ s.xargument, "\""))
+
+ if(any(spar[[i]]!=0) && any(df[[i]]!=4))
+ stop("can't specify both spar and df")
+ }
+
+ spar <- unlist(spar)
+ df <- unlist(df)
+ smooth.frame$spar <- spar # original
+ smooth.frame$df <- df # original
+
+ if(sum(smooth.frame$df[smooth.frame$spar==0]) + pbig >
+ smooth.frame$n * sum(ncolBlist[nwhich]))
+ stop("too many parameters/dof for data on hand")
+
+ xn.big <- labels(xbig.save)[[2]]
+ asgn <- attr(xbig.save, "assign")
+ aa <- NULL
+ for(i in nwhich) {
+ aa <- c(aa, xn.big[asgn[[i]]])
+ }
+ smooth.frame$ndfspar <- aa # Stored here
+ smooth.frame$xn.big <- xn.big # Stored here
+ smooth.frame$s.xargument <- s.xargument # Stored here
+
+ smooth.frame$smap=as.vector(cumsum(
+ c(1,ncolBlist[nwhich]))[1:length(nwhich)])
+
+ smooth.frame$try.spar <- spar
+ smooth.frame$prev.dof <- df
+
+
+ smooth.frame$bindex <- as.integer(cumsum(c(1,
+ smooth.frame$nknots*ncolBlist[nwhich])))
+ smooth.frame$kindex = as.integer(cumsum(c(1, 4+smooth.frame$nknots)))
+ }
+ if(sf.only)
+ return(smooth.frame)
+
+ ldk <- 4 * max(ncolBlist[nwhich]) # was M; # Prior to 11/7/02
+ ldk <- 3 * max(ncolBlist[nwhich]) + 1 # 11/7/02
+
+
+ which <- unlist(which)
+ p <- smooth.frame$p
+ n <- smooth.frame$n
+ dimw <- if(is.matrix(wz)) ncol(wz) else 1
+
+ dimu <- if(is.matrix(U)) nrow(U) else 1
+
+ index <- iam(NA, NA, M, both=TRUE)
+
+ nBlist <- names(Blist)
+ for(i in length(nBlist):1) {
+ if(!any(nBlist[i] == nwhich))
+ Blist[[i]] <- NULL
+ }
+ trivc <- trivial.constraints(Blist)
+
+ ncbvec <- ncolBlist[nwhich]
+ ncolb <- max(ncbvec)
+
+ pmax.mwk <- rep(dimw, length(trivc))
+ pmax.mwk <- pmax(ncbvec*(ncbvec+1)/2, dimw)
+
+ size.twk <- max((4+4*smooth.frame$nef)*ncbvec + dimu*smooth.frame$nef)
+
+ size.twk <- max(size.twk, M*smooth.frame$n)
+
+ fit <- dotFortran(name="vbfa",
+ as.integer(backchat), n = as.integer(n), M = as.integer(M),
+ npetc = as.integer(c(n, p, length(which), se.fit, 0,
+ bf.maxit, 0, M, n*M, pbig,
+ qbig, dimw, dimu, ier=0, ldk=ldk)),
+ as.double(x),
+ y = as.double(z), w = as.double(wz),
+ spar = as.double(smooth.frame$try.spar),
+ df = as.double(smooth.frame$df),
+ as.integer(smooth.frame$o),as.integer(smooth.frame$nef),as.integer(which),
+ etal = double(M*n), smooth = as.double(s), eta = double(M*n),
+ s0 = double((2*M)*(2*M)*2),
+ beta = double(pbig), var = if(se.fit) as.double(s) else double(1),
+ as.double(bf.epsilon),
+ qr = as.double(xbig.save), qraux = double(pbig),
+ qpivot = as.integer(1:pbig),
+ xbig = if(backchat) as.double(xbig.save) else double(1),
+ U = as.double(U),
+ as.double(unlist(Blist)),
+ as.integer(ncbvec), as.integer(smooth.frame$smap),
+ rcind = integer(M*(M+1)), trivc = as.integer(trivc),
+ work1 = double(3*qbig + (9+2*4+max(smooth.frame$nknots))*
+ max(smooth.frame$nknots)),
+ wk2 = double(n*M*3),
+ wkmm = double(M*M*16 + M*pbig),
+ work3 = double(max(max(2 * smooth.frame$nef * ncbvec^2),
+ max(smooth.frame$nknots * ncbvec * (4*ncbvec+1)))),
+ sgdub = double(max(smooth.frame$nknots) * max(4,ncolb)),
+ bmb = double(M*M),
+ lev = double(max(smooth.frame$nef * ncbvec)),
+ mwk = double(max(smooth.frame$nef * (1 + 2*M + pmax.mwk)) ),
+ twk = double(size.twk),
+ bcoefficients = double(sum(smooth.frame$nknots*ncbvec)),
+ knots = as.double(unlist(smooth.frame$knots)),
+ resss = double(1),
+ bindex = as.integer(smooth.frame$bindex),
+ nknots = as.integer(smooth.frame$nknots),
+ itwk = integer(2*M),
+ kindex = as.integer(smooth.frame$kindex))
+
+ dim(fit$qr) = dim(xbig.save)
+ dimnames(fit$qr) = dimnames(xbig.save)
+ dim(fit$y) = dim(z)
+ dimnames(fit$y) = dimnames(z)
+ dim(fit$smooth) = dim(s)
+ dimnames(fit$smooth) = dimnames(s) # Needed for vgam.nlchisq
+ if(se.fit) {
+ dim(fit$var) = dim(s)
+ dimnames(fit$var) = dimnames(s)
+ }
+
+
+
+
+
+
+ if(fit$npetc[14] != 0)
+ stop("something went wrong in the Fortran subroutine vbfa()")
+
+ fit$eta <- if(M>1) matrix(fit$eta,n,M,byrow=TRUE) else c(fit$eta)
+
+ nit <- fit$npetc[5]
+ qrank <- fit$npetc[7]
+
+
+ smooth.frame$try.spar <- fit$spar
+ change <- abs(smooth.frame$prev.dof-fit$df)/fit$df > 0.05 &
+ smooth.frame$spar==0
+ smooth.frame$try.spar[change] <- 0 # For next time
+ smooth.frame$prev.dof <- fit$df
+
+ if((nit == bf.maxit) & bf.maxit > 1)
+ warning(paste("s.vam convergence not obtained in", bf.maxit,
+ "iterations"))
+
+ R <- fit$qr[1:pbig, 1:pbig]
+ R[lower.tri(R)] <- 0
+
+
+
+ Bspline <- vector("list", length(nwhich))
+ names(Bspline) <- nwhich
+ for(i in 1:length(nwhich)) {
+ ans = fit$bcoeff[(smooth.frame$bindex[i]):(smooth.frame$bindex[i+1]-1)]
+ ans = matrix(ans, ncol=ncolBlist[nwhich[i]])
+ Bspline[[i]] = new("vsmooth.spline.fit",
+ "Bcoefficients" = ans,
+ "xmax" = smooth.frame$xmax[i],
+ "xmin" = smooth.frame$xmin[i],
+ "knots" = as.vector(smooth.frame$knots[[i]]))
+ }
+
+
+ rl <- list(
+ Bspline = Bspline,
+ coefficients = fit$beta,
+ df.residual = n*M - qrank - sum(fit$df - 1),
+ fitted.values = fit$eta,
+ nl.df = fit$df - 1,
+ qr = list(qr=fit$qr, rank=qrank, qraux=fit$qraux, pivot=fit$qpivot),
+ R = R,
+ rank = qrank,
+ residuals = fit$y - fit$eta,
+ rss = fit$resss,
+ smooth = fit$smooth,
+ spar = fit$spar,
+ s.xargument = unlist(smooth.frame$s.xargument))
+
+
+ names(rl$coefficients) <- smooth.frame$xn.big
+ names(rl$spar) <- smooth.frame$ndfspar
+ names(rl$nl.df) <- smooth.frame$ndfspar
+
+ if(se.fit)
+ rl <- c(rl, list(var=fit$var))
+ c(list(smooth.frame=smooth.frame), rl)
+}
+
+
diff --git a/R/smart.R b/R/smart.R
new file mode 100644
index 0000000..895fb1a
--- /dev/null
+++ b/R/smart.R
@@ -0,0 +1,1017 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+
+
+lm <-
+function (formula, data, subset, weights, na.action, method = "qr",
+ model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE,
+ contrasts = NULL, offset, smart=TRUE, ...)
+{
+ ret.x <- x
+ ret.y <- y
+
+ if(smart) setup.smart("write")
+
+ cl <- match.call()
+ mf <- match.call(expand.dots = FALSE)
+ m <- match(c("formula", "data", "subset", "weights", "na.action",
+ "offset"), names(mf), 0)
+ mf <- mf[c(1, m)]
+ mf$drop.unused.levels <- TRUE
+ mf$smart <- NULL
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if (method == "model.frame")
+ return(mf) else if (method != "qr")
+ warning(gettextf("method = '%s' is not supported. Using 'qr'",
+ method), domain = NA)
+ mt <- attr(mf, "terms")
+ y <- model.response(mf, "numeric")
+ w <- as.vector(model.weights(mf))
+ if (!is.null(w) && !is.numeric(w))
+ stop("'weights' must be a numeric vector")
+ offset <- as.vector(model.offset(mf))
+ if (!is.null(offset)) {
+ if (length(offset) == 1)
+ offset <- rep(offset, NROW(y)) else if (length(offset) != NROW(y))
+ stop(gettextf("number of offsets is %d, should equal %d (number of observations)",
+ length(offset), NROW(y)), domain = NA)
+ }
+ if (is.empty.model(mt)) {
+ x <- NULL
+ z <- list(coefficients = if (is.matrix(y)) matrix(, 0,
+ 3) else numeric(0), residuals = y, fitted.values = 0 *
+ y, weights = w, rank = 0, df.residual = if (is.matrix(y)) nrow(y) else length(y))
+ if (!is.null(offset))
+ z$fitted.values <- offset
+ } else {
+ x <- model.matrix(mt, mf, contrasts)
+ z <- if (is.null(w))
+ lm.fit(x, y, offset = offset, singular.ok = singular.ok,
+ ...) else
+ lm.wfit(x, y, w, offset = offset, singular.ok = singular.ok,
+ ...)
+ }
+ class(z) <- c(if (is.matrix(y)) "mlm", "lm")
+ z$na.action <- attr(mf, "na.action")
+ z$offset <- offset
+ z$contrasts <- attr(x, "contrasts")
+ z$xlevels <- .getXlevels(mt, mf)
+ z$call <- cl
+ z$terms <- mt
+ if (model)
+ z$model <- mf
+ if (ret.x)
+ z$x <- x
+ if (ret.y)
+ z$y <- y
+ if (!qr)
+ z$qr <- NULL
+
+ if(smart) {
+ z$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ } else
+ z$smart.prediction <- list(smart.arg=FALSE)
+
+ z
+}
+attr(lm, "smart") <- TRUE
+
+
+predict.lm <-
+function (object, newdata, se.fit = FALSE, scale = NULL, df = Inf,
+ interval = c("none", "confidence", "prediction"), level = 0.95,
+ type = c("response", "terms"), terms = NULL, na.action = na.pass,
+ pred.var = res.var/weights, weights = 1, ...)
+{
+ # Smart prediction: handle the prediction flaw
+ if(is.smart(object) && length(object$smart.prediction)) {
+ setup.smart("read", smart.prediction=object$smart.prediction)
+ }
+
+ tt <- terms(object)
+ if (missing(newdata) || is.null(newdata)) {
+ mm <- X <- model.matrix(object)
+ mmDone <- TRUE
+ offset <- object$offset
+ } else {
+ Terms <- delete.response(tt)
+ m <- model.frame(Terms, newdata, na.action = na.action,
+ xlev = object$xlevels)
+ if (!is.null(cl <- attr(Terms, "dataClasses")))
+ .checkMFClasses(cl, m)
+ X <- model.matrix(Terms, m, contrasts = object$contrasts)
+ offset <- if (!is.null(off.num <- attr(tt, "offset")))
+ eval(attr(tt, "variables")[[off.num + 1]], newdata) else
+ if (!is.null(object$offset))
+ eval(object$call$offset, newdata)
+ mmDone <- FALSE
+ }
+ n <- length(object$residuals)
+ p <- object$rank
+ p1 <- seq_len(p)
+ piv <- object$qr$pivot[p1]
+ if (p < ncol(X) && !(missing(newdata) || is.null(newdata)))
+ warning("prediction from a rank-deficient fit may be misleading")
+ beta <- object$coefficients
+ predictor <- drop(X[, piv, drop = FALSE] %*% beta[piv])
+ if (!is.null(offset))
+ predictor <- predictor + offset
+ interval <- match.arg(interval)
+ if (interval == "prediction") {
+ if (missing(newdata))
+ warning("Predictions on current data refer to _future_ responses\n")
+ if (missing(newdata) && missing(weights)) {
+ w <- weights.default(object)
+ if (!is.null(w)) {
+ weights <- w
+ warning("Assuming prediction variance inversely proportional to weights used for fitting\n")
+ }
+ }
+ if (!missing(newdata) && missing(weights) && !is.null(object$weights) &&
+ missing(pred.var))
+ warning("Assuming constant prediction variance even though model fit is weighted\n")
+ if (inherits(weights, "formula")) {
+ if (length(weights) != 2)
+ stop("'weights' as formula should be one-sided")
+ d <- if (missing(newdata) || is.null(newdata))
+ model.frame(object) else newdata
+ weights <- eval(weights[[2]], d, environment(weights))
+ }
+ }
+ type <- match.arg(type)
+ if (se.fit || interval != "none") {
+ res.var <- if (is.null(scale)) {
+ r <- object$residuals
+ w <- object$weights
+ rss <- sum(if (is.null(w))
+ r^2 else r^2 * w)
+ df <- n - p
+ rss/df
+ } else scale^2
+ if (type != "terms") {
+ if (p > 0) {
+ XRinv <- if (missing(newdata) && is.null(w))
+ qr.Q(object$qr)[, p1, drop = FALSE] else
+ X[, piv] %*% qr.solve(qr.R(object$qr)[p1,
+ p1])
+ ip <- drop(XRinv^2 %*% rep(res.var, p))
+ } else ip <- rep(0, n)
+ }
+ }
+ if (type == "terms") {
+ if (!mmDone) {
+ mm <- model.matrix(object)
+ mmDone <- TRUE
+ }
+ aa <- attr(mm, "assign")
+ ll <- attr(tt, "term.labels")
+ hasintercept <- attr(tt, "intercept") > 0
+ if (hasintercept)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ asgn <- split(order(aa), aaa)
+ if (hasintercept) {
+ asgn$"(Intercept)" <- NULL
+ if (!mmDone) {
+ mm <- model.matrix(object)
+ mmDone <- TRUE
+ }
+ avx <- colMeans(mm)
+ termsconst <- sum(avx[piv] * beta[piv])
+ }
+ nterms <- length(asgn)
+ if (nterms > 0) {
+ predictor <- matrix(ncol = nterms, nrow = NROW(X))
+ dimnames(predictor) <- list(rownames(X), names(asgn))
+ if (se.fit || interval != "none") {
+ ip <- matrix(ncol = nterms, nrow = NROW(X))
+ dimnames(ip) <- list(rownames(X), names(asgn))
+ Rinv <- qr.solve(qr.R(object$qr)[p1, p1])
+ }
+ if (hasintercept)
+ X <- sweep(X, 2, avx)
+ unpiv <- rep.int(0, NCOL(X))
+ unpiv[piv] <- p1
+ for (i in seq(1, nterms, length = nterms)) {
+ iipiv <- asgn[[i]]
+ ii <- unpiv[iipiv]
+ iipiv[ii == 0] <- 0
+ predictor[, i] <- if (any(iipiv) > 0)
+ X[, iipiv, drop = FALSE] %*% beta[iipiv] else 0
+ if (se.fit || interval != "none")
+ ip[, i] <- if (any(iipiv) > 0)
+ as.matrix(X[, iipiv, drop = FALSE] %*% Rinv[ii,
+ , drop = FALSE])^2 %*% rep.int(res.var,
+ p) else 0
+ }
+ if (!is.null(terms)) {
+ predictor <- predictor[, terms, drop = FALSE]
+ if (se.fit)
+ ip <- ip[, terms, drop = FALSE]
+ }
+ } else {
+ predictor <- ip <- matrix(0, n, 0)
+ }
+ attr(predictor, "constant") <- if (hasintercept)
+ termsconst else 0
+ }
+ if (interval != "none") {
+ tfrac <- qt((1 - level)/2, df)
+ hwid <- tfrac * switch(interval, confidence = sqrt(ip),
+ prediction = sqrt(ip + pred.var))
+ if (type != "terms") {
+ predictor <- cbind(predictor, predictor + hwid %o%
+ c(1, -1))
+ colnames(predictor) <- c("fit", "lwr", "upr")
+ } else {
+ lwr <- predictor + hwid
+ upr <- predictor - hwid
+ }
+ }
+ if (se.fit || interval != "none")
+ se <- sqrt(ip)
+ if (missing(newdata) && !is.null(na.act <- object$na.action)) {
+ predictor <- napredict(na.act, predictor)
+ if (se.fit)
+ se <- napredict(na.act, se)
+ }
+
+ if(is.smart(object) && length(object$smart.prediction)) {
+ wrapup.smart()
+ }
+
+ if (type == "terms" && interval != "none") {
+ if (missing(newdata) && !is.null(na.act)) {
+ lwr <- napredict(na.act, lwr)
+ upr <- napredict(na.act, upr)
+ }
+ list(fit = predictor, se.fit = se, lwr = lwr, upr = upr,
+ df = df, residual.scale = sqrt(res.var))
+ } else if (se.fit)
+ list(fit = predictor, se.fit = se, df = df,
+ residual.scale = sqrt(res.var)) else predictor
+}
+attr(predict.lm, "smart") <- TRUE
+
+
+predict.glm <-
+function (object, newdata = NULL, type = c("link", "response",
+ "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL,
+ na.action = na.pass, ...)
+{
+ # Smart prediction: handle the prediction flaw
+ if(is.smart(object) && length(object$smart.prediction)) {
+ setup.smart("read", smart.prediction=object$smart.prediction)
+ }
+
+ type <- match.arg(type)
+ na.act <- object$na.action
+ object$na.action <- NULL
+ if (!se.fit) {
+ if (missing(newdata)) {
+ pred <- switch(type, link = object$linear.predictors,
+ response = object$fitted, terms = predict.lm(object,
+ se.fit = se.fit, scale = 1, type = "terms",
+ terms = terms))
+ if (!is.null(na.act))
+ pred <- napredict(na.act, pred)
+ } else {
+ pred <- predict.lm(object, newdata, se.fit, scale = 1,
+ type = ifelse(type == "link", "response", type),
+ terms = terms, na.action = na.action)
+ switch(type, response = {
+ pred <- family(object)$linkinv(pred)
+ }, link = , terms = )
+ }
+ } else {
+ if (inherits(object, "survreg"))
+ dispersion <- 1
+ if (is.null(dispersion) || dispersion == 0)
+ dispersion <- summary(object, dispersion = dispersion)$dispersion
+ residual.scale <- as.vector(sqrt(dispersion))
+ pred <- predict.lm(object, newdata, se.fit, scale = residual.scale,
+ type = ifelse(type == "link", "response", type),
+ terms = terms, na.action = na.action)
+ fit <- pred$fit
+ se.fit <- pred$se.fit
+ switch(type, response = {
+ se.fit <- se.fit * abs(family(object)$mu.eta(fit))
+ fit <- family(object)$linkinv(fit)
+ }, link = , terms = )
+ if (missing(newdata) && !is.null(na.act)) {
+ fit <- napredict(na.act, fit)
+ se.fit <- napredict(na.act, se.fit)
+ }
+ pred <- list(fit = fit, se.fit = se.fit, residual.scale = residual.scale)
+ }
+ if(is.smart(object) && length(object$smart.prediction)) {
+ wrapup.smart()
+ }
+ pred
+}
+attr(predict.glm, "smart") <- TRUE
+
+
+predict.mlm <-
+function (object, newdata, se.fit = FALSE, na.action = na.pass,
+ ...)
+{
+ # Smart prediction: handle the prediction flaw
+ if(is.smart(object) && length(object$smart.prediction)) {
+ setup.smart("read", smart.prediction=object$smart.prediction)
+ }
+
+ if (missing(newdata))
+ return(object$fitted)
+ if (se.fit)
+ stop("the 'se.fit' argument is not yet implemented for \"mlm\" objects")
+ if (missing(newdata)) {
+ X <- model.matrix(object)
+ offset <- object$offset
+ } else {
+ tt <- terms(object)
+ Terms <- delete.response(tt)
+ m <- model.frame(Terms, newdata, na.action = na.action,
+ xlev = object$xlevels)
+ if (!is.null(cl <- attr(Terms, "dataClasses")))
+ .checkMFClasses(cl, m)
+ X <- model.matrix(Terms, m, contrasts = object$contrasts)
+ offset <- if (!is.null(off.num <- attr(tt, "offset")))
+ eval(attr(tt, "variables")[[off.num + 1]], newdata) else
+ if (!is.null(object$offset))
+ eval(object$call$offset, newdata)
+ }
+ piv <- object$qr$pivot[seq(object$rank)]
+ pred <- X[, piv, drop = FALSE] %*% object$coefficients[piv,
+ ]
+ if (!is.null(offset))
+ pred <- pred + offset
+
+ if(is.smart(object) && length(object$smart.prediction)) {
+ wrapup.smart()
+ }
+
+ if (inherits(object, "mlm")) pred else pred[, 1]
+}
+attr(predict.mlm, "smart") <- TRUE
+
+
+
+glm <-
+function (formula, family = gaussian, data, weights, subset,
+ na.action, start = NULL, etastart, mustart, offset, control = glm.control(...),
+ model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL,
+ smart = TRUE, ...)
+{
+ call <- match.call()
+ if (is.character(family))
+ family <- get(family, mode = "function", envir = parent.frame())
+ if (is.function(family))
+ family <- family()
+ if (is.null(family$family)) {
+ print(family)
+ stop("'family' not recognized")
+ }
+ if (missing(data))
+ data <- environment(formula)
+
+ if(smart) setup.smart("write")
+
+ mf <- match.call(expand.dots = FALSE)
+ m <- match(c("formula", "data", "subset", "weights", "na.action",
+ "etastart", "mustart", "offset"), names(mf), 0)
+ mf <- mf[c(1, m)]
+ mf$smart <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ switch(method, model.frame = return(mf), glm.fit = 1, stop("invalid 'method': ",
+ method))
+ mt <- attr(mf, "terms")
+ Y <- model.response(mf, "any")
+ if (length(dim(Y)) == 1) {
+ nm <- rownames(Y)
+ dim(Y) <- NULL
+ if (!is.null(nm))
+ names(Y) <- nm
+ }
+ X <- if (!is.empty.model(mt))
+ model.matrix(mt, mf, contrasts) else matrix(, NROW(Y), 0)
+ weights <- as.vector(model.weights(mf))
+ if (!is.null(weights) && !is.numeric(weights))
+ stop("'weights' must be a numeric vector")
+ offset <- as.vector(model.offset(mf))
+ if (!is.null(weights) && any(weights < 0))
+ stop("negative weights not allowed")
+ if (!is.null(offset)) {
+ if (length(offset) == 1)
+ offset <- rep(offset, NROW(Y)) else
+ if (length(offset) != NROW(Y))
+ stop(gettextf(
+ "number of offsets is %d should equal %d (number of observations)",
+ length(offset), NROW(Y)), domain = NA)
+ }
+ mustart <- model.extract(mf, "mustart")
+ etastart <- model.extract(mf, "etastart")
+ fit <- glm.fit(x = X, y = Y, weights = weights, start = start,
+ etastart = etastart, mustart = mustart, offset = offset,
+ family = family, control = control, intercept = attr(mt,
+ "intercept") > 0)
+ if (length(offset) && attr(mt, "intercept") > 0) {
+ fit$null.deviance <- glm.fit(x = X[, "(Intercept)", drop = FALSE],
+ y = Y, weights = weights, offset = offset, family = family,
+ control = control, intercept = TRUE)$deviance
+ }
+ if (model)
+ fit$model <- mf
+ fit$na.action <- attr(mf, "na.action")
+ if (x)
+ fit$x <- X
+ if (!y)
+ fit$y <- NULL
+ fit <- c(fit, list(call = call, formula = formula, terms = mt,
+ data = data, offset = offset, control = control, method = method,
+ contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt,
+ mf)))
+ class(fit) <- c("glm", "lm")
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ } else
+ fit$smart.prediction <- list(smart.arg=FALSE)
+
+ fit
+}
+attr(glm, "smart") <- TRUE
+
+
+
+
+
+smartpredenv = new.env()
+
+
+smart.mode.is <- function(mode.arg=NULL) {
+ if(!length(mode.arg)) {
+ if(exists(".smart.prediction", env=smartpredenv)) {
+ get(".smart.prediction.mode", env=smartpredenv)
+ } else {
+ "neutral"
+ }
+ } else {
+ if(mode.arg != "neutral" && mode.arg != "read" && mode.arg != "write")
+stop("argument \"mode.arg\" must be one of \"neutral\", \"read\" or \"write\"")
+ if(exists(".smart.prediction", env=smartpredenv)) {
+ get(".smart.prediction.mode", env=smartpredenv)==mode.arg
+ } else {
+ mode.arg=="neutral"
+ }
+ }
+}
+
+
+setup.smart <- function(mode.arg, smart.prediction=NULL, max.smart=30) {
+ actual <- if(mode.arg=="write") vector("list", max.smart) else
+ if(mode.arg=="read") smart.prediction else
+ stop("value of mode.arg unrecognized")
+
+ wrapup.smart() # make sure
+
+ if(length(actual)) {
+ # Double check that smart.prediction is not trivial (in "read" mode)
+ # If it is trivial then ignore it. This saves testing whether
+ # length(object$smart.prediction) > 0 in the predict methods function
+
+
+ assign(".smart.prediction", actual, envir = smartpredenv)
+ assign(".smart.prediction.counter", 0, envir = smartpredenv)
+ assign(".smart.prediction.mode", mode.arg, envir = smartpredenv)
+ assign(".max.smart", max.smart, envir = smartpredenv)
+ assign(".smart.prediction", actual, envir = smartpredenv)
+ }
+}
+
+wrapup.smart <- function() {
+ if(exists(".smart.prediction", envir = smartpredenv))
+ rm(".smart.prediction", envir = smartpredenv)
+ if(exists(".smart.prediction.counter", envir = smartpredenv))
+ rm(".smart.prediction.counter", envir = smartpredenv)
+ if(exists(".smart.prediction.mode", envir = smartpredenv))
+ rm(".smart.prediction.mode", envir = smartpredenv)
+ if(exists(".max.smart", envir = smartpredenv))
+ rm(".max.smart", envir = smartpredenv)
+}
+
+
+get.smart.prediction <- function() {
+
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = smartpredenv)
+ max.smart <- get(".max.smart", envir = smartpredenv)
+
+ if(smart.prediction.counter > 0) {
+ # Save this on the object for smart prediction later
+ smart.prediction <- get(".smart.prediction", envir = smartpredenv)
+ if(max.smart >= (smart.prediction.counter+1))
+ for(i in max.smart:(smart.prediction.counter+1))
+ smart.prediction[[i]] <- NULL
+ smart.prediction
+ } else
+ NULL
+}
+
+
+put.smart <- function(smart) {
+
+ # Puts the info, if possible, in frame 1.
+ # Does not returns whether it did it or not.
+
+
+ # Write the info to frame 0 as well
+ max.smart <- get(".max.smart", envir = smartpredenv)
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = smartpredenv)
+ smart.prediction <- get(".smart.prediction", envir = smartpredenv)
+ smart.prediction.counter <- smart.prediction.counter + 1
+
+ if(smart.prediction.counter > max.smart) {
+ # if list is too small, make it larger
+ max.smart <- max.smart + (inc.smart <- 10) # can change inc.smart
+ smart.prediction <- c(smart.prediction, vector("list", inc.smart))
+ assign(".max.smart", max.smart, envir = smartpredenv)
+ }
+
+ smart.prediction[[smart.prediction.counter]] <- smart
+ assign(".smart.prediction", smart.prediction, envir = smartpredenv)
+ assign(".smart.prediction.counter", smart.prediction.counter,
+ envir = smartpredenv)
+}
+
+
+get.smart <- function() {
+ # Returns one list component of information
+ smart.prediction <- get(".smart.prediction", envir = smartpredenv)
+ smart.prediction.counter <- get(".smart.prediction.counter",
+ envir = smartpredenv)
+ smart.prediction.counter <- smart.prediction.counter + 1
+ assign(".smart.prediction.counter", smart.prediction.counter,
+ envir = smartpredenv)
+ smart <- smart.prediction[[smart.prediction.counter]]
+ smart
+}
+
+smart.expression <- expression({
+
+ # This expression only works if the first argument of the smart
+ # function is "x", e.g., smartfun(x, ...)
+ # Nb. .smart.match.call is the name of the smart function.
+
+ smart <- get.smart()
+ assign(".smart.prediction.mode", "neutral", envir = smartpredenv)
+
+ .smart.match.call = as.character(smart$match.call)
+ smart$match.call = NULL # Kill it off for the do.call
+
+ ans.smart <- do.call(.smart.match.call[1], c(list(x=x), smart))
+ assign(".smart.prediction.mode", "read", envir = smartpredenv)
+
+ ans.smart
+})
+
+
+
+is.smart <- function(object) {
+ if(is.function(object)) {
+ if(is.logical(a <- attr(object, "smart"))) a else FALSE
+ } else {
+ if(length(slotNames(object))) {
+ if(length(object at smart.prediction) == 1 &&
+ is.logical(object at smart.prediction$smart.arg))
+ object at smart.prediction$smart.arg else
+ any(slotNames(object) == "smart.prediction")
+ } else {
+ if(length(object$smart.prediction) == 1 &&
+ is.logical(object$smart.prediction$smart.arg))
+ object$smart.prediction$smart.arg else
+ any(names(object) == "smart.prediction")
+ }
+ }
+}
+
+
+
+
+library(splines)
+
+
+
+bs <-
+function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
+ Boundary.knots = range(x))
+{
+ x <- x # Evaluate x
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ }
+
+ nx <- names(x)
+ x <- as.vector(x)
+ nax <- is.na(x)
+ if (nas <- any(nax))
+ x <- x[!nax]
+ if (!missing(Boundary.knots)) {
+ Boundary.knots <- sort(Boundary.knots)
+ outside <- (ol <- x < Boundary.knots[1]) | (or <- x >
+ Boundary.knots[2])
+ } else outside <- FALSE
+ ord <- 1 + (degree <- as.integer(degree))
+ if (ord <= 1)
+ stop("'degree' must be integer >= 1")
+ if (!missing(df) && missing(knots)) {
+ nIknots <- df - ord + (1 - intercept)
+ if (nIknots < 0) {
+ nIknots <- 0
+ warning("'df' was too small; have used ", ord -
+ (1 - intercept))
+ }
+ knots <- if (nIknots > 0) {
+ knots <- seq(from = 0, to = 1, length = nIknots +
+ 2)[-c(1, nIknots + 2)]
+ stats::quantile(x[!outside], knots)
+ }
+ }
+ Aknots <- sort(c(rep(Boundary.knots, ord), knots))
+ if (any(outside)) {
+ warning(
+"some 'x' values beyond boundary knots may cause ill-conditioned bases")
+ derivs <- 0:degree
+ scalef <- gamma(1:ord)
+ basis <- array(0, c(length(x), length(Aknots) - degree -
+ 1))
+ if (any(ol)) {
+ k.pivot <- Boundary.knots[1]
+ xl <- cbind(1, outer(x[ol] - k.pivot, 1:degree, "^"))
+ tt <- spline.des(Aknots, rep(k.pivot, ord), ord,
+ derivs)$design
+ basis[ol, ] <- xl %*% (tt/scalef)
+ }
+ if (any(or)) {
+ k.pivot <- Boundary.knots[2]
+ xr <- cbind(1, outer(x[or] - k.pivot, 1:degree, "^"))
+ tt <- spline.des(Aknots, rep(k.pivot, ord), ord,
+ derivs)$design
+ basis[or, ] <- xr %*% (tt/scalef)
+ }
+ if (any(inside <- !outside))
+ basis[inside, ] <- spline.des(Aknots, x[inside],
+ ord)$design
+ } else basis <- spline.des(Aknots, x, ord)$design
+ if (!intercept)
+ basis <- basis[, -1, drop = FALSE]
+ n.col <- ncol(basis)
+ if (nas) {
+ nmat <- matrix(NA, length(nax), n.col)
+ nmat[!nax, ] <- basis
+ basis <- nmat
+ }
+ dimnames(basis) <- list(nx, 1:n.col)
+ a <- list(degree = degree, knots = if (is.null(knots)) numeric(0) else knots,
+ Boundary.knots = Boundary.knots, intercept = intercept)
+ attributes(basis) <- c(attributes(basis), a)
+ class(basis) <- c("bs", "basis")
+
+ if(smart.mode.is("write"))
+ put.smart(list(df=df,
+ knots=knots,
+ degree=degree,
+ intercept=intercept,
+ Boundary.knots=Boundary.knots,
+ match.call=match.call()))
+
+ basis
+}
+attr(bs, "smart") <- TRUE
+
+ns <-
+function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(x))
+{
+ x <- x # Evaluate x
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ }
+
+ nx <- names(x)
+ x <- as.vector(x)
+ nax <- is.na(x)
+ if (nas <- any(nax))
+ x <- x[!nax]
+ if (!missing(Boundary.knots)) {
+ Boundary.knots <- sort(Boundary.knots)
+ outside <- (ol <- x < Boundary.knots[1]) | (or <- x >
+ Boundary.knots[2])
+ } else outside <- FALSE
+ if (!missing(df) && missing(knots)) {
+ nIknots <- df - 1 - intercept
+ if (nIknots < 0) {
+ nIknots <- 0
+ warning("'df' was too small; have used ", 1 + intercept)
+ }
+ knots <- if (nIknots > 0) {
+ knots <- seq(0, 1, length = nIknots + 2)[-c(1, nIknots +
+ 2)]
+ stats::quantile(x[!outside], knots)
+ }
+ } else nIknots <- length(knots)
+ Aknots <- sort(c(rep(Boundary.knots, 4), knots))
+ if (any(outside)) {
+ basis <- array(0, c(length(x), nIknots + 4))
+ if (any(ol)) {
+ k.pivot <- Boundary.knots[1]
+ xl <- cbind(1, x[ol] - k.pivot)
+ tt <- spline.des(Aknots, rep(k.pivot, 2), 4, c(0,
+ 1))$design
+ basis[ol, ] <- xl %*% tt
+ }
+ if (any(or)) {
+ k.pivot <- Boundary.knots[2]
+ xr <- cbind(1, x[or] - k.pivot)
+ tt <- spline.des(Aknots, rep(k.pivot, 2), 4, c(0,
+ 1))$design
+ basis[or, ] <- xr %*% tt
+ }
+ if (any(inside <- !outside))
+ basis[inside, ] <- spline.des(Aknots, x[inside],
+ 4)$design
+ } else basis <- spline.des(Aknots, x, 4)$design
+ const <- spline.des(Aknots, Boundary.knots, 4, c(2, 2))$design
+ if (!intercept) {
+ const <- const[, -1, drop = FALSE]
+ basis <- basis[, -1, drop = FALSE]
+ }
+ qr.const <- qr(t(const))
+ basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1:2),
+ drop = FALSE])
+ n.col <- ncol(basis)
+ if (nas) {
+ nmat <- matrix(NA, length(nax), n.col)
+ nmat[!nax, ] <- basis
+ basis <- nmat
+ }
+ dimnames(basis) <- list(nx, 1:n.col)
+ a <- list(degree = 3, knots = if (is.null(knots)) numeric(0) else knots,
+ Boundary.knots = Boundary.knots, intercept = intercept)
+ attributes(basis) <- c(attributes(basis), a)
+ class(basis) <- c("ns", "basis")
+
+ if(smart.mode.is("write"))
+ put.smart(list(df=df,
+ knots=knots,
+ intercept=intercept,
+ Boundary.knots=Boundary.knots,
+ match.call=match.call()))
+
+ basis
+}
+attr(ns, "smart") <- TRUE
+
+
+
+
+poly <-
+function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
+{
+ x <- x # Evaluate x
+ if(!raw && smart.mode.is("read")) {
+ smart <- get.smart()
+ degree <- smart$degree
+ coefs <- smart$coefs
+ raw <- smart$raw
+ }
+
+ dots <- list(...)
+ if (nd <- length(dots)) {
+ if (nd == 1 && length(dots[[1]]) == 1)
+ degree <- dots[[1]] else
+ return(polym(x, ..., degree = degree, raw = raw))
+ }
+ if (is.matrix(x)) {
+ m <- unclass(as.data.frame(cbind(x, ...)))
+ return(do.call("polym", c(m, degree = degree, raw = raw)))
+ }
+ if (degree < 1)
+ stop("'degree' must be at least 1")
+
+ # At prediction time x may be less than the degree
+ if(smart.mode.is("write") || smart.mode.is("neutral"))
+ if (degree >= length(x))
+ stop("degree must be less than number of points")
+
+ if (any(is.na(x)))
+ stop("missing values are not allowed in 'poly'")
+ n <- degree + 1
+ if (raw) {
+ if (degree >= length(x))
+ stop("'degree' must be less than number of points")
+ Z <- outer(x, 1:degree, "^")
+ colnames(Z) <- 1:degree
+ attr(Z, "degree") <- 1:degree
+ class(Z) <- c("poly", "matrix")
+ return(Z)
+ }
+ if (is.null(coefs)) {
+ if (degree >= length(x))
+ stop("'degree' must be less than number of points")
+ xbar <- mean(x)
+ x <- x - xbar
+ X <- outer(x, seq_len(n) - 1, "^")
+ QR <- qr(X)
+ z <- QR$qr
+ z <- z * (row(z) == col(z))
+ raw <- qr.qy(QR, z)
+ norm2 <- colSums(raw^2)
+ alpha <- (colSums(x * raw^2)/norm2 + xbar)[1:degree]
+ Z <- raw/rep(sqrt(norm2), each = length(x))
+ colnames(Z) <- 1:n - 1
+ Z <- Z[, -1, drop = FALSE]
+ attr(Z, "degree") <- 1:degree
+ attr(Z, "coefs") <- list(alpha = alpha, norm2 = c(1,
+ norm2))
+ class(Z) <- c("poly", "matrix")
+ } else {
+ alpha <- coefs$alpha
+ norm2 <- coefs$norm2
+ Z <- matrix(, length(x), n)
+ Z[, 1] <- 1
+ Z[, 2] <- x - alpha[1]
+ if (degree > 1)
+ for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) *
+ Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
+ Z <- Z/rep(sqrt(norm2[-1]), each = length(x))
+ colnames(Z) <- 0:degree
+ Z <- Z[, -1, drop = FALSE]
+ attr(Z, "degree") <- 1:degree
+ attr(Z, "coefs") <- list(alpha = alpha, norm2 = norm2)
+ class(Z) <- c("poly", "matrix")
+ }
+
+ if(smart.mode.is("write"))
+ put.smart(list(degree=degree, coefs=attr(Z, "coefs"),
+ raw=FALSE, # raw is changed above
+ match.call=match.call()))
+
+ Z
+}
+attr(poly, "smart") <- TRUE
+
+
+scale.default <-
+function (x, center = TRUE, scale = TRUE)
+{
+ x <- as.matrix(x)
+
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ }
+
+ nc <- ncol(x)
+ if (is.logical(center)) {
+ if (center) {
+ center <- colMeans(x, na.rm = TRUE)
+ x <- sweep(x, 2, center)
+ }
+ } else if (is.numeric(center) && (length(center) == nc))
+ x <- sweep(x, 2, center) else
+ stop("length of 'center' must equal the number of columns of 'x'")
+ if (is.logical(scale)) {
+ if (scale) {
+ f <- function(v) {
+ v <- v[!is.na(v)]
+ sqrt(sum(v^2)/max(1, length(v) - 1))
+ }
+ scale <- apply(x, 2, f)
+ x <- sweep(x, 2, scale, "/")
+ }
+ } else if (is.numeric(scale) && length(scale) == nc)
+ x <- sweep(x, 2, scale, "/") else
+ stop("length of 'scale' must equal the number of columns of 'x'")
+ if (is.numeric(center))
+ attr(x, "scaled:center") <- center
+ if (is.numeric(scale))
+ attr(x, "scaled:scale") <- scale
+
+ if(smart.mode.is("write")) {
+ put.smart(list(center=center, scale=scale,
+ match.call=match.call()))
+ }
+
+ x
+}
+attr(scale.default, "smart") <- TRUE
+
+
+attr(scale, "smart") <- TRUE
+
+
+
+
+
+"my1" <- function(x, minx=min(x)) {
+
+ x <- x # Evaluate x
+
+ if(smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
+ if(smart.mode.is("write"))
+ put.smart(list(minx=minx))
+
+ (x-minx)^2
+}
+attr(my1, "smart") <- TRUE
+
+
+
+
+"my2" <- function(x, minx=min(x)) {
+
+ x <- x # Evaluate x
+
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ } else
+ if(smart.mode.is("write"))
+ put.smart(list(minx=minx, match.call=match.call()))
+
+ (x-minx)^2
+}
+
+attr(my2, "smart") <- TRUE
+
+
+
+
+"stdze1" <- function(x, center=TRUE, scale=TRUE) {
+
+ x <- x # Evaluate x
+
+ if(!is.vector(x))
+ stop("x must be a vector")
+
+ if(smart.mode.is("read")) {
+ smart <- get.smart()
+ return((x-smart$center)/smart$scale)
+ }
+
+ if(is.logical(center))
+ center <- if(center) mean(x) else 0
+ if(is.logical(scale))
+ scale <- if(scale) sqrt(var(x)) else 1
+
+ if(smart.mode.is("write"))
+ put.smart(list(center=center,
+ scale=scale))
+ # Normal use
+ (x-center)/scale
+}
+attr(stdze1, "smart") <- TRUE
+
+"stdze2" <- function(x, center=TRUE, scale=TRUE) {
+
+ x <- x # Evaluate x
+
+ if(!is.vector(x))
+ stop("x must be a vector")
+
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ }
+
+ if(is.logical(center))
+ center <- if(center) mean(x) else 0
+ if(is.logical(scale))
+ scale <- if(scale) sqrt(var(x)) else 1
+
+ if(smart.mode.is("write"))
+ put.smart(list(center=center,
+ scale=scale,
+ match.call=match.call()))
+
+ (x-center)/scale
+}
+attr(stdze2, "smart") <- TRUE
+
+
+
+
diff --git a/R/step.vglm.q b/R/step.vglm.q
new file mode 100644
index 0000000..8059425
--- /dev/null
+++ b/R/step.vglm.q
@@ -0,0 +1,17 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+step.vglm <- function(fit, ...)
+{
+ cat("Sorry, this function has not been written yet. Returning a NULL.\n")
+ NULL
+}
+
+
+
+
+
+
+
+
diff --git a/R/summary.others.q b/R/summary.others.q
new file mode 100644
index 0000000..30f933d
--- /dev/null
+++ b/R/summary.others.q
@@ -0,0 +1,35 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+summary.lms <- function(object, ...)
+{
+ ans <- NextMethod("summary")
+
+ ans$testing <- object$lin[1:4,]
+
+ class(ans) <- c("summary.lms", class(ans))
+
+
+ ans
+}
+
+
+
+summary.rc.exponential <- function(object, ...)
+{
+
+ ans <- NextMethod("summary")
+
+ ans$num.censored <- attr(object$terms, "num.censored")
+
+ class(ans) <- c("summary.rc.exponential", class(ans))
+
+
+ ans
+}
+
+
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
new file mode 100644
index 0000000..00a36b0
--- /dev/null
+++ b/R/summary.vgam.q
@@ -0,0 +1,245 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+summaryvgam <- function(object, dispersion=NULL, digits=options()$digits-2)
+{
+
+ if(length(dispersion) && dispersion == 0 &&
+ length(object at family@summary.dispersion) &&
+ !object at family@summary.dispersion) {
+ stop(paste("Can't use the general VGLM formula (based on a residual",
+ "sum of squares) for computing the dispersion parameter."))
+ }
+
+ newobject <- object
+ class(newobject) <- "vglm"
+ stuff <- summaryvglm(newobject, dispersion=dispersion)
+ rdf <- stuff at df[2] <- object at df.residual # NA
+
+ M <- object at misc$M
+ n.big <- object at misc$n.big
+ rank <- if(is.null(object at qr$rank)) length(object at coefficients) else
+ object at qr$rank
+
+
+
+
+
+
+
+
+ useF <- object at misc$useF
+ if(is.null(useF))
+ useF <- FALSE
+
+ df <- unlist(lapply(object at misc$new.assign, length))
+ nldf <- object at nl.df
+
+ if(length(df)) {
+ aod <- as.matrix(round(df, 1))
+ dimnames(aod) <- list(names(df), "Df")
+ if(!is.null(object at nl.chisq)) {
+ aod <- cbind(aod, NA, NA, NA)
+ nl.chisq <- object at nl.chisq / object at dispersion
+
+ special = abs(nldf) < 0.1 # This was the quick fix in s.vam()
+ nldf[special] = 1 # Give it a plausible value for pchisq & pf
+
+ snames <- names(nldf)
+ aod[snames, 2] <- round(nldf, 1)
+ aod[snames, 3] <- if(useF) nl.chisq/nldf else nl.chisq
+ aod[snames, 4] <- if(useF) 1-pf(nl.chisq/nldf, nldf, rdf) else
+ 1-pchisq(nl.chisq, nldf)
+
+ if(any(special)) {
+ aod[snames[special], 2:4] = NA
+ }
+
+ rnames <- c("Df", "Npar Df", "Npar Chisq", "P(Chi)")
+ if(useF)
+ rnames[3:4] <- c("Npar F", "Pr(F)")
+ dimnames(aod) <- list(names(df), rnames)
+ heading <- if(useF)
+ "\nDF for Terms and Approximate F-values for Nonparametric Effects\n"
+ else
+ "\nDF for Terms and Approximate Chi-squares for Nonparametric Effects\n"
+ } else heading <- "DF for Terms\n\n"
+ aod <- as.vanova(data.frame(aod, check.names=FALSE), heading)
+
+ if(is.R()) class(aod) = "data.frame"
+ }
+ else aod <- if(is.R()) data.frame() else NULL
+
+ answer <-
+ new("summary.vgam",
+ object,
+ call=stuff at call,
+ cov.unscaled=stuff at cov.unscaled,
+ correlation=stuff at correlation,
+ df=stuff at df,
+ sigma=stuff at sigma)
+
+ slot(answer, "coefficients") = stuff at coefficients # Replace
+ if(is.numeric(stuff at dispersion))
+ slot(answer, "dispersion") = stuff at dispersion
+
+ presid = residuals(object, type="pearson")
+ if(length(presid))
+ answer at pearson.resid= as.matrix(presid)
+
+ slot(answer, "anova") = aod
+
+ answer
+}
+
+
+
+
+printsummary.vgam <- function(x, quote=TRUE, prefix="", digits=options()$digits-2)
+{
+
+ M <- x at misc$M
+
+
+ cat("\nCall:\n")
+ dput(x at call)
+
+ presid <- x at pearson.resid
+ rdf <- x at df[2]
+ if(F && !is.null(presid) && all(!is.na(presid)))
+ {
+ cat("\nPearson Residuals:\n")
+ if(rdf/M > 5) {
+ rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
+ dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
+ x at misc$predictors.names)
+ print(t(rq), digits=digits)
+ } else
+ if(rdf > 0) {
+ print(presid, digits=digits)
+ }
+ }
+
+ cat("\nNumber of linear predictors: ", M, "\n")
+
+ if(!is.null(x at misc$predictors.names))
+ if(M==1)
+ cat("\nName of linear predictor:",
+ paste(x at misc$predictors.names, collapse=", "), "\n") else if(M<=5)
+ cat("\nNames of linear predictors:",
+ paste(x at misc$predictors.names, collapse=", "), "\n")
+
+ prose <- ""
+ if(length(x at dispersion)) {
+ if(is.logical(x at misc$estimated.dispersion) &&
+ x at misc$estimated.dispersion)
+ prose <- "(Estimated) " else {
+
+ if(is.numeric(x at misc$default.dispersion) &&
+ x at dispersion==x at misc$default.dispersion)
+ prose <- "(Default) "
+
+ if(is.numeric(x at misc$default.dispersion) &&
+ x at dispersion!=x at misc$default.dispersion)
+ prose <- "(Pre-specified) "
+ }
+ cat(paste("\n", prose, "Dispersion Parameter for ",
+ x at family@vfamily[1],
+ " family: ", format(round(x at dispersion, digits)), "\n", sep=""))
+ }
+
+ if(length(deviance(x)))
+ cat("\nResidual Deviance: ", format(round(deviance(x), digits)),
+ "on", format(round(rdf, 3)), "degrees of freedom\n")
+ if(length(logLik(x)))
+ cat("\nLog-likelihood:", format(round(logLik(x), digits)),
+ "on", format(round(rdf, 3)), "degrees of freedom\n")
+
+ if(length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(i in ncrit)
+ if(i!="loglikelihood" && i!="deviance")
+ cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
+ }
+
+
+ cat("\nNumber of Iterations: ", x at iter, "\n")
+
+ if(length(x at anova)) {
+ print.vanova(x at anova, dig=digits) # ".vanova" for Splus6
+ }
+
+ invisible(NULL)
+}
+
+
+
+
+ setMethod("summary", "vgam",
+ function(object, ...)
+ summaryvgam(object, ...))
+
+ setMethod("print", "summary.vgam",
+ function(x, ...)
+ printsummary.vgam(x, ...))
+
+
+ setMethod("show", "summary.vgam",
+ function(object)
+ printsummary.vgam(object))
+
+
+
+
+
+print.vanova <- function(x, digits=.Options$digits, ...)
+{
+ rrr <- row.names(x)
+ heading <- attr(x, "heading")
+ if(!is.null(heading))
+ cat(heading, sep="\n")
+ attr(x, "heading") <- NULL
+ for(i in 1:length(x)) {
+ xx <- x[[i]]
+ xna <- is.na(xx)
+ xx <- format(zapsmall(xx, digits))
+ xx[xna] <- ""
+ x[[i]] <- xx
+ }
+ if(is.R()) {
+ invisible(NextMethod("print"))
+ } else {
+ print.data.frame(as.data.frame(x, row.names=rrr))
+ invisible(x)
+ }
+}
+
+as.vanova <- function(x, heading)
+{
+ if(!is.data.frame(x))
+ stop("x must be a data frame")
+ rrr <- row.names(x)
+ attr(x, "heading") <- heading
+ if(is.R()) {
+ class(x) <- c("vanova", class(x))
+ } else {
+ x <- as.data.frame(x, row.names=rrr)
+ }
+ x
+}
+
+
+if(!is.R()) {
+
+setMethod("print", "vanova",
+ function(x, ...)
+ print.vanova(x, ...))
+
+}
+
+
+
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
new file mode 100644
index 0000000..deeefee
--- /dev/null
+++ b/R/summary.vglm.q
@@ -0,0 +1,249 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+yformat = function(x, digits=options()$digits) {
+ format(ifelse(abs(x)<0.001, signif(x, digits), round(x, digits)))
+}
+
+
+
+
+summaryvglm <- function(object, correlation=FALSE, dispersion=NULL, digits=NULL)
+{
+
+
+
+ if(length(dispersion) && dispersion == 0 &&
+ length(object at family@summary.dispersion) &&
+ !object at family@summary.dispersion) {
+ stop(paste("Can't use the general VGLM formula (based on a residual",
+ "sum of squares) for computing the dispersion parameter."))
+ }
+
+ stuff <- summaryvlm(as(object, "vlm"),
+ correlation=correlation,
+ dispersion=dispersion)
+
+
+
+ answer <-
+ new("summary.vglm",
+ object,
+ coef3=stuff at coef3,
+ cov.unscaled=stuff at cov.unscaled,
+ correlation=stuff at correlation,
+ df=stuff at df,
+ sigma=stuff at sigma)
+
+ presid = resid(object, type="pearson")
+ if(length(presid))
+ answer at pearson.resid = as.matrix(presid)
+
+ slot(answer, "misc") = stuff at misc # Replace
+
+ if(is.numeric(stuff at dispersion))
+ slot(answer, "dispersion") = stuff at dispersion
+
+ answer
+}
+
+
+
+
+
+
+setMethod("logLik", "summary.vglm", function(object, ...)
+ logLik.vlm(object, ...))
+
+
+printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "")
+{
+
+ M <- x at misc$M
+ coef <- x at coef3 # icients
+ correl <- x at correlation
+
+ digits <- if(is.null(digits)) options()$digits - 2 else digits
+
+ cat("\nCall:\n")
+ dput(x at call)
+
+ presid <- x at pearson.resid
+ rdf <- x at df[2]
+ if(length(presid) && all(!is.na(presid)) && is.finite(rdf))
+ {
+ cat("\nPearson Residuals:\n")
+ if(rdf/M > 5)
+ {
+ rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
+ dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
+ x at misc$predictors.names)
+ print(t(rq), digits = digits)
+ } else
+ if(rdf > 0) {
+ print(presid, digits = digits)
+ }
+ }
+
+ cat("\nCoefficients:\n")
+ print.default(coef, digits = digits)
+
+ cat("\nNumber of linear predictors: ", M, "\n")
+
+ if(!is.null(x at misc$predictors.names))
+ if(M==1)
+ cat("\nName of linear predictor:",
+ paste(x at misc$predictors.names, collapse=", "), "\n") else if(M<=5)
+ cat("\nNames of linear predictors:",
+ paste(x at misc$predictors.names, collapse=", "), fill=TRUE)
+
+ prose <- ""
+ if(length(x at dispersion)) {
+ if(is.logical(x at misc$estimated.dispersion) &&
+ x at misc$estimated.dispersion)
+ prose <- "(Estimated) " else {
+
+ if(is.numeric(x at misc$default.dispersion) &&
+ x at dispersion==x at misc$default.dispersion)
+ prose <- "(Default) "
+
+ if(is.numeric(x at misc$default.dispersion) &&
+ x at dispersion!=x at misc$default.dispersion)
+ prose <- "(Pre-specified) "
+ }
+ cat(paste("\n", prose, "Dispersion Parameter for ",
+ x at family@vfamily[1],
+ " family: ", yformat(x at dispersion, digits), "\n", sep=""))
+ }
+
+ if(length(deviance(x))) {
+ cat("\nResidual Deviance:", yformat(deviance(x), digits))
+ if(is.finite(rdf))
+ cat(" on", round(rdf, digits), "degrees of freedom\n") else
+ cat("\n")
+ }
+ if(length(logLik(x))) {
+ cat("\nLog-likelihood:", yformat(logLik(x), digits))
+ if(is.finite(rdf))
+ cat(" on", round(rdf, digits), "degrees of freedom\n") else
+ cat("\n")
+ }
+
+ if(length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(i in ncrit)
+ if(i!="loglikelihood" && i!="deviance")
+ cat(paste(i, ":", sep=""), yformat(x at criterion[[i]], digits),
+ "\n")
+ }
+
+
+ cat("\nNumber of Iterations:", format(trunc(x at iter)), "\n")
+
+ if(!is.null(correl))
+ {
+ p.big <- dim(correl)[2]
+ if(p.big > 1)
+ {
+ cat("\nCorrelation of Coefficients:\n")
+ ll <- lower.tri(correl)
+ correl[ll] <- format(round(correl[ll], digits))
+ correl[!ll] <- ""
+ print(correl[-1, -p.big, drop = FALSE], quote = FALSE, digits =
+ digits)
+ }
+ }
+ invisible(NULL)
+}
+
+
+
+ setMethod("summary", "vglm",
+ function(object, ...)
+ summaryvglm(object, ...))
+
+ setMethod("print", "summary.vglm",
+ function(x, ...)
+ invisible(printsummary.vglm(x, ...)))
+
+ setMethod("show", "summary.vglm",
+ function(object)
+ invisible(printsummary.vglm(object)))
+
+
+
+
+
+
+
+vcovdefault <- function(object, ...) {
+ if(is.null(object at vcov))
+ stop("no default")
+ object at vcov
+}
+
+vcovvlm <- function(object, dispersion=NULL, untransform=FALSE) {
+ so <- summaryvlm(object, corr=FALSE, dispersion=dispersion)
+ d = if(any(slotNames(so) == "dispersion") &&
+ is.Numeric(so at dispersion)) so at dispersion else 1
+ answer = d * so at cov.unscaled
+
+ if(!untransform) return(answer)
+
+ if(!is.logical(object at misc$intercept.only))
+ stop(paste("cannot determine whether the object is",
+ "an intercept-only fit, i.e., y ~ 1 is the response"))
+ if(!object at misc$intercept.only)
+ stop("object must be an intercept-only fit, i.e., y ~ 1 is the response")
+
+ M = object at misc$M
+ Links = object at misc$link
+ if(length(Links) != M && length(Links) != 1)
+ stop("cannot obtain the link functions to untransform the object")
+
+
+ tvector = numeric(M)
+ etavector = predict(object)[1,] # Contains transformed parameters
+ earg = object at misc$earg # This could be a NULL
+ if(!is.null(earg) && M > 1 && (!is.list(earg) || length(earg) != M))
+ stop(paste("the earg component of object at misc should be of length ",
+ M, sep=""))
+ for(ii in 1:M) {
+ TTheta = etavector[ii] # Transformed theta
+ use.earg = if(M == 1 || is.null(earg)) earg else earg[[ii]]
+ if(is.list(use.earg) && !length(use.earg))
+ use.earg = NULL
+ newcall = paste(Links[ii],
+ "(theta=TTheta, earg=use.earg, inverse=TRUE)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ Theta = eval(newcall) # Theta, the untransformed parameter
+ newcall = paste(Links[ii],
+ "(theta=Theta, earg=use.earg, deriv=1)", sep="")
+ newcall = parse(text=newcall)[[1]]
+ tvector[ii] = eval(newcall)
+ }
+ tvector = abs(tvector)
+ answer = (cbind(tvector) %*% rbind(tvector)) * answer
+ if(length(dmn2 <- names(object at misc$link)) == M)
+ dimnames(answer) = list(dmn2, dmn2)
+ answer
+}
+
+setMethod("vcov", "vlm",
+ function(object, ...)
+ vcovvlm(object, ...))
+
+setMethod("vcov", "vglm",
+ function(object, ...)
+ vcovvlm(object, ...))
+
+
+
+
+
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
new file mode 100644
index 0000000..6c36eb1
--- /dev/null
+++ b/R/summary.vlm.q
@@ -0,0 +1,193 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+summaryvlm <- function(object, correlation=FALSE, dispersion=NULL)
+{
+
+
+ if(is.logical(object at misc$BFGS) && object at misc$BFGS)
+ warning(paste("the estimated variance-covariance matrix is",
+ "usually inaccurate as the working weight matrices are a",
+ "crude BFGS quasi-Newton approximation"))
+
+ M <- object at misc$M
+ n <- object at misc$n
+ n.big <- object at misc$n.big
+ p.big <- object at misc$p.big # May be NULL for CQO objects
+
+ coef <- object at coefficients
+ cnames <- names(coef)
+ presid = residualsvlm(object, type="pearson") # NULL if pooled.weight
+
+ if(any(is.na(coef))) {
+ warning(paste("Some NAs in the coefficients---no summary",
+ " provided; returning object\n"))
+ return(object)
+ }
+ rdf <- object at df.residual
+
+ if(!length(dispersion)) {
+ if(is.numeric(object at misc$dispersion)) {
+ dispersion <- object at misc$dispersion
+ if(all(dispersion==0))
+ stop("dispersion shouldn't be zero here!")
+ } else {
+ dispersion <- 1
+ object at misc$estimated.dispersion <- FALSE
+ }
+ } else if(dispersion==0) {
+ dispersion <- if(!length(object at rss)) {
+ stop("object at rss is empty")
+ } else {
+ object at rss / object at df.residual
+ }
+ object at misc$estimated.dispersion <- TRUE
+ } else {
+ if(is.numeric(object at misc$dispersion) &&
+ object at misc$dispersion != dispersion)
+ warning("overriding the value of object at misc$dispersion")
+ object at misc$estimated.dispersion <- FALSE
+ }
+ sigma <- dispersion^0.5 # Can be a vector
+
+ if(is.Numeric(p.big)) {
+ R <- object at R
+
+ if(p.big < max(dim(R)))
+ stop("R is rank deficient")
+
+ rinv = diag(p.big)
+ rinv = backsolve(R, rinv)
+ rowlen = drop(((rinv^2) %*% rep(1, p.big))^0.5)
+ covun = rinv %*% t(rinv)
+ dimnames(covun) <- list(cnames, cnames)
+ }
+ coef <- matrix(rep(coef, 3), ncol=3)
+ dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
+ if(length(sigma) == 1 && is.Numeric(p.big)) {
+ coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
+ coef[, 3] <- coef[, 1]/coef[, 2]
+ } else {
+ coef[,1] = coef[,2] = coef[,3] = NA
+ }
+ if(correlation) {
+ correl <- covun * outer(1/rowlen, 1/rowlen)
+ dimnames(correl) <- list(cnames, cnames)
+ } else {
+ correl <- matrix(0, 0, 0) # was NULL, but now a special matrix
+ }
+
+
+
+
+ answer <-
+ new("summary.vlm",
+ object,
+ coef3=coef,
+ correlation=correl,
+ df=c(p.big, rdf),
+ sigma=sigma)
+
+ if(is.Numeric(p.big)) answer at cov.unscaled = covun
+ answer at dispersion = dispersion # Overwrite this
+
+ if(length(presid))
+ answer at pearson.resid = as.matrix(presid)
+
+
+ answer
+}
+
+
+
+
+printsummary.vlm <- function(x, digits=NULL, quote=TRUE, prefix="")
+{
+
+
+ M <- x at misc$M
+ coef3 <- x at coef3 # ficients
+ correl <- x at correlation
+
+ if(is.null(digits)) {
+ digits <- options()$digits
+ } else {
+ old.digits <- options(digits = digits)
+ on.exit(options(old.digits))
+ }
+
+ cat("\nCall:\n")
+ dput(x at call)
+
+ presid <- x at pearson.resid
+ rdf <- x at df[2]
+ if(length(presid) && all(!is.na(presid)))
+ {
+ cat("\nPearson residuals:\n")
+ if(rdf/M > 5) {
+ rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
+ dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
+ x at misc$predictors.names)
+ print(t(rq), digits=digits)
+ } else
+ if(rdf > 0) {
+ print(presid, digits=digits)
+ }
+ }
+
+ if(!all(is.na(coef3))) {
+ cat("\nCoefficients:\n")
+ print(coef3, digits = digits)
+ }
+
+ cat("\nNumber of responses: ", M, "\n")
+
+ if(length(x at misc$predictors.names))
+ if(M==1) {
+ cat("\nName of response:",
+ paste(x at misc$predictors.names, collapse=", "), "\n")
+ } else {
+ uuu = paste(x at misc$predictors.names, collapse=", ")
+ uuu = x at misc$predictors.names
+ cat("\nNames of responses:\n")
+ cat(uuu, fill=TRUE, sep=", ")
+ }
+
+ if(!is.null(x at rss))
+ cat("\nResidual Sum of Squares:", format(round(x at rss, digits)),
+ "on", round(rdf, digits), "degrees of freedom\n")
+
+ if(length(correl)) {
+ p.big <- dim(correl)[2]
+ if(p.big > 1) {
+ cat("\nCorrelation of Coefficients:\n")
+ ll <- lower.tri(correl)
+ correl[ll] <- format(round(correl[ll], digits))
+ correl[!ll] <- ""
+ print(correl[-1, -p.big, drop=FALSE], quote=FALSE, digits=digits)
+ }
+ }
+ invisible(NULL)
+}
+
+
+ setMethod("summary", "vlm",
+ function(object, ...)
+ summaryvlm(object, ...))
+
+ setMethod("print", "summary.vlm",
+ function(x, ...)
+ invisible(printsummary.vlm(x, ...)))
+
+
+ setMethod("show", "summary.vlm",
+ function(object)
+ invisible(printsummary.vlm(object)))
+
+
+
diff --git a/R/uqo.R b/R/uqo.R
new file mode 100644
index 0000000..3bd62fc
--- /dev/null
+++ b/R/uqo.R
@@ -0,0 +1,881 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+uqo.control = function(Rank=1,
+ Bestof = if(length(lvstart) && !jitter.sitescores) 1 else 10,
+ CA1 = FALSE,
+ Crow1positive = TRUE,
+ epsilon = 1.0e-07,
+ EqualTolerances = ITolerances,
+ Etamat.colmax = 10,
+ GradientFunction=TRUE,
+ Hstep = 0.001,
+ isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ ITolerances = FALSE,
+ lvstart = NULL,
+ jitter.sitescores = FALSE,
+ maxitl = 40,
+ Maxit.optim = 250,
+ MUXfactor = rep(3, length=Rank),
+ optim.maxit = 20,
+ nRmax = 250,
+ SD.sitescores = 1.0,
+ SmallNo = 5.0e-13,
+ trace = TRUE,
+ Use.Init.Poisson.QO=TRUE,
+ ...)
+{
+
+ Kinit = 0.001
+ if(!is.Numeric(MUXfactor, posit=TRUE))
+ stop("bad input for \"MUXfactor\"")
+ if(any(MUXfactor < 1 | MUXfactor > 10))
+ stop("MUXfactor values must lie between 1 and 10")
+ if(!is.Numeric(isdlv, posit=TRUE)) stop("bad input for \"isdlv\"")
+ if(any(isdlv < 0.2 | isdlv > 10))
+ stop("isdlv values must lie between 0.2 and 10")
+ if(length(isdlv) > 1 && any(diff(isdlv) > 0))
+ stop("successive isdlv values must not increase")
+ if(!is.Numeric(Rank, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"Rank\"")
+ if(!is.Numeric(Bestof, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"Bestof\"")
+ if(!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ stop("bad input for \"Etamat.colmax\"")
+ if(!is.Numeric(maxitl, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"maxitl\"")
+ if(!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE, allow=1))
+ stop("Bad input for \"Maxit.optim\"")
+ if(!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"optim.maxit\"")
+ if(!is.Numeric(nRmax, allow=1, integ=TRUE, posit=TRUE))
+ stop("Bad input for \"nRmax\"")
+ if(!is.Numeric(Hstep, allow=1, posit=TRUE))
+ stop("Bad input for \"Hstep\"")
+ if(!is.Numeric(epsilon, allow=1, posit=TRUE))
+ stop("Bad input for \"epsilon\"")
+ if(!is.Numeric(SmallNo, allow=1, posit=TRUE))
+ stop("Bad input for \"SmallNo\"")
+
+ if((SmallNo < .Machine$double.eps) || (SmallNo > .0001))
+ stop("SmallNo is out of range")
+
+ if(Use.Init.Poisson.QO && CA1)
+ stop("cannot have both Use.Init.Poisson.QO=TRUE and CA1=TRUE")
+
+ ans = list(
+ Bestof = Bestof,
+ CA1 = CA1,
+ ConstrainedQO = FALSE, # A constant, not a control parameter
+ Corner = FALSE, # Needed for valt.1iter()
+ Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ epsilon = epsilon,
+ EqualTolerances = as.logical(EqualTolerances)[1],
+ Etamat.colmax = Etamat.colmax,
+ FastAlgorithm = TRUE, # A constant, not a control parameter
+ GradientFunction = GradientFunction,
+ Hstep = Hstep,
+ isdlv = rep(isdlv, len=Rank),
+ ITolerances = as.logical(ITolerances)[1],
+ lvstart = lvstart,
+ jitter.sitescores = as.logical(jitter.sitescores),
+ Kinit = Kinit,
+ maxitl= maxitl,
+ Maxit.optim = Maxit.optim,
+ MUXfactor = rep(MUXfactor, length=Rank),
+ nRmax = nRmax,
+ optim.maxit = optim.maxit,
+ OptimizeWrtC = FALSE,
+ Quadratic = TRUE,
+ Rank = Rank,
+ SD.sitescores = SD.sitescores,
+ SmallNo = SmallNo,
+ trace = as.logical(trace),
+ Use.Init.Poisson.QO=as.logical(Use.Init.Poisson.QO)[1])
+ ans
+}
+
+
+
+
+uqo <- function(formula,
+ family, data=list(),
+ weights=NULL, subset=NULL, na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=uqo.control(...),
+ offset=NULL,
+ method="uqo.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=NULL,
+ qr.arg=FALSE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "uqo"
+
+ ocall <- match.call()
+
+ mt <- terms(formula, data = data)
+ if(missing(data))
+ data <- environment(formula)
+
+ mf <- match.call(expand=FALSE)
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+ mf$coefstart <- mf$etastart <- mf$... <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if(method == "model.frame")
+ return(mf)
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+ if(is.character(family))
+ family <- get(family)
+ if(is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ if(!is.null(family at first))
+ eval(family at first)
+
+ uqo.fitter <- get(method)
+ if(ncol(x) != 1 || dimnames(x)[[2]] != "(Intercept)")
+ stop("uqo()'s formula must have ~ 1 on the RHS")
+
+ if(control$FastAlgorithm &&
+ length(as.list(family at deviance)) <= 1)
+ stop(paste("The fast algorithm requires the family",
+ "function to have a deviance slot"))
+ deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
+ for(tries in 1:control$Bestof) {
+ if(control$trace && (control$Bestof>1))
+ cat(paste("\n========================= Fitting model", tries,
+ "=========================\n"))
+ it <- uqo.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family, control=control,
+ constraints=constraints, extra=extra,
+ qr.arg = qr.arg, Terms=mt, function.name=function.name,
+ ca1 = control$CA1 && tries==1, ...)
+ deviance.Bestof[tries] = it$crit.list$deviance
+ if(tries==1||min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+ fit = it
+ }
+ fit$misc$deviance.Bestof = deviance.Bestof
+ fit$misc$criterion = "deviance" # Needed for calibrate; 21/1/05
+
+ fit$misc$dataname <- dataname
+
+ answer <-
+ new("uqo",
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "control" = fit$control,
+ "criterion" = fit$crit.list,
+ "lv" = fit$sitescores,
+ "family" = fit$family,
+ "fitted.values"= as.matrix(fit$fitted.values),
+ "iter" = fit$iter,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "na.action" = if(length(na.act)) list(na.act) else list(),
+ "predictors" = as.matrix(fit$predictors))
+
+ answer at control$min.criterion = TRUE # Needed for calibrate; 21/1/05
+
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+ if(x.arg)
+ slot(answer, "x") = x
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+ slot(answer, "extra") = if(length(fit$extra)) {
+ if(is.list(fit$extra)) fit$extra else {
+ warning(paste("\"extra\" is not a list, therefore",
+ "placing \"extra\" into a list"))
+ list(fit$extra)
+ }
+ } else list() # R-1.5.0
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+ answer
+}
+
+
+calluqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
+ Control,
+ n, M, maxMr5, othint, othdbl, bnumat, Hstep=NA, alldump) {
+ control = Control
+ Rank = control$Rank
+ itol = othint[14]
+ inited = if(is.R()) {
+ as.numeric(existsinVGAMenv("etamat", prefix=".VGAM.UQO."))
+ } else 0
+ othint[5] = inited # Replacement
+ usethiseta = if(inited==1)
+ getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
+ usethisbeta = double(othint[13])
+ pstar = othint[3]
+ nstar = if(nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+
+ sitescores = matrix(sitescores, ncol=Rank)
+ sitescores = scale(sitescores, center = TRUE, scale = FALSE)
+ if(itol) {
+ numat = matrix(sitescores, ncol=Rank)
+ if(Rank > 1) {
+ evnu = eigen(var(numat))
+ numat = numat %*% evnu$vector
+ }
+
+
+
+ sdnumat = sd(numat)
+ for(lookat in 1:Rank)
+ if(sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
+ muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
+ sdnumat[lookat]
+ numat[,lookat] = numat[,lookat] * muxer
+ if(control$trace) {
+ }
+ }
+ } else {
+ numat = matrix(sitescores, ncol=Rank)
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ numat = numat %*% temp7
+ }
+
+ ans1 <-
+ dotFortran(name = if(nice31) "cqo1f" else "cqo2f",
+ numat=as.double(numat), as.double(ymat),
+ as.double(xmat),
+ as.double(wvec), etamat=as.double(usethiseta),
+ moff=double(if(itol) n else 1),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), bnumat=as.double(bnumat),
+ qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
+ as.integer(n), as.integer(M), NOS=as.integer(NOS),
+ as.integer(nstar), dimu=as.integer(M),
+ errcode=integer(1), othint=as.integer(othint),
+ rowind=integer(maxMr5), colind=integer(maxMr5),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(othdbl))
+
+ if(ans1$errcode == 0) {
+ assign2VGAMenv(c("etamat","numat"), ans1, prefix=".VGAM.UQO.")
+ if(alldump) {
+ ans1$fv = matrix(ans1$fv,n,M,byrow=TRUE,dimnames=dimnames(ymat))
+ assign2VGAMenv(c("beta","fv"), ans1, prefix=".VGAM.UQO.")
+ assign2VGAMenv(c("z","U"), ans1, prefix=".VGAM.UQO.")
+ }
+ } else {
+ cat("warning in calluqof: error code =", ans1$errcode, "\n")
+ rmfromVGAMenv(c("etamat"), prefix=".VGAM.UQO.")
+ }
+ ans1$deviance
+}
+
+callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
+ Control,
+ n, M, maxMr5, othint, othdbl, bnumat, Hstep, alldump) {
+ control = Control
+ itol = othint[14]
+ inited = if(is.R()) {
+ if(exists(".VGAM.UQO.etamat", envir = VGAMenv)) 1 else 0
+ } else 0 # 0 means fortran initializes the etamat
+ othint[5] = inited # Replacement
+ usethiseta = if(inited==1)
+ getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
+ usethisbeta = double(othint[13])
+ pstar = othint[3]
+ nstar = if(nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+ Rank = othint[1]
+
+ sitescores = matrix(sitescores, ncol=Rank)
+ sitescores = scale(sitescores, center = TRUE, scale = FALSE)
+ if(itol) {
+ numat = matrix(sitescores, ncol=Rank)
+ if(Rank > 1) {
+ evnu = eigen(var(numat))
+ numat = numat %*% evnu$vector
+ }
+
+ sdnumat = sd(numat)
+ for(lookat in 1:Rank)
+ if(sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
+ muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
+ sdnumat[lookat]
+ numat[,lookat] = numat[,lookat] * muxer
+ if(control$trace) {
+ }
+ }
+ } else {
+ numat = matrix(sitescores, ncol=Rank)
+ evnu = eigen(var(numat))
+ temp7 = if(Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ evnu$vector %*% evnu$value^(-0.5)
+ numat = numat %*% temp7
+ }
+
+
+
+ ans1 <-
+ dotFortran(name = "duqof", numat=as.double(numat), as.double(ymat),
+ as.double(xmat),
+ as.double(wvec), etamat=as.double(usethiseta),
+ moff=double(if(itol) n else 1),
+ fv=double(NOS*n), z=double(n*M), wz=double(n*M),
+ U=double(M*n), bnumat=as.double(bnumat),
+ qr=double(nstar*pstar), qraux=double(pstar), qpivot=integer(pstar),
+ as.integer(n), as.integer(M), NOS=as.integer(M),
+ as.integer(nstar), dimu=as.integer(M),
+ errcode=integer(1), othint=as.integer(othint),
+ rowind=integer(maxMr5), colind=integer(maxMr5),
+ deviance=double(1), beta=as.double(usethisbeta),
+ twk=double(if(nice31) nstar*3 else M*n*2), wkmm=double(M*(M+pstar)),
+ othdbl=as.double(othdbl),
+ onumat=as.double(numat),
+ deriv=double(n*Rank), hstep=as.double(Hstep),
+ betasave=usethisbeta)
+
+ if(ans1$errcode == 0) {
+ assign2VGAMenv(c("etamat"), ans1, prefix=".VGAM.UQO.")
+ } else {
+ cat("warning in callduqof: error code =", ans1$errcode, "\n")
+ rmfromVGAMenv(c("etamat"), prefix=".VGAM.UQO.")
+ }
+ ans1$deriv
+}
+
+
+
+
+uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ offset=0, family, control=uqo.control(...),
+ qr.arg=FALSE, constraints=NULL, extra=NULL,
+ Terms=Terms, function.name="uqo", ca1=TRUE, ...)
+{
+ if(!all(offset == 0)) stop("cqo.fit() cannot handle offsets")
+ nonparametric <- FALSE
+ epsilon <- control$epsilon
+ optim.maxit <- control$optim.maxit
+ save.weight <- control$save.weight
+ trace <- control$trace
+ orig.stepsize <- control$stepsize
+
+
+ n <- dim(x)[1]
+
+
+ copyxbig <- FALSE # May be overwritten in @initialize
+ stepsize <- orig.stepsize
+ old.coeffs <- coefstart
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+ n.save <- n
+
+
+ Rank <- control$Rank
+ rrcontrol <- control #
+
+ backchat = FALSE
+
+ if(length(family at initialize))
+ eval(family at initialize) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+
+ eval(rrr.init.expression)
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else family at inverse(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- family at link(mu, extra)
+ }
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+ if(is.character(rrcontrol$Dzero)) {
+ index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+ if(any(is.na(index)))
+ stop("Dzero argument didn't fully match y-names")
+ if(length(index) == M)
+ stop("all linear predictors are linear in the latent variable(s)")
+ rrcontrol$Dzero = control$Dzero = index
+ }
+
+
+
+ if(length(family at constraints))
+ eval(family at constraints)
+
+
+ colx1.index = 1:ncol(x)
+ names(colx1.index) = names.colx1.index = dimnames(x)[[2]]
+
+ rrcontrol$colx1.index=control$colx1.index=colx1.index #Save it on the object
+ colx2.index = NULL
+ p1 = length(colx1.index); p2 = 0
+ rrcontrol$colx2.index=control$colx2.index=colx2.index #Save it on the object
+ rrcontrol$Quadratic = control$Quadratic = TRUE
+
+
+
+ sitescores <- if(length(rrcontrol$lvstart)) {
+ matrix(rrcontrol$lvstart, n, Rank)
+ } else {
+ if(rrcontrol$Use.Init.Poisson) {
+ .Init.Poisson.QO(ymat=as.matrix(y),
+ X1=x, X2=NULL,
+ Rank=rrcontrol$Rank, trace=rrcontrol$trace,
+ max.ncol.etamat = rrcontrol$Etamat.colmax,
+ Crow1positive=rrcontrol$Crow1positive,
+ isdlv=rrcontrol$isdlv,
+ constwt= any(family at vfamily[1] ==
+ c("negbinomial","gamma2","gaussianff")),
+ takelog= any(family at vfamily[1] != c("gaussianff")))
+ } else if(ca1) {
+ if(Rank == 1) .VGAM.UQO.CA(y)[,1:Rank] else {
+ temp = .VGAM.UQO.CA(y)[,1:Rank]
+ temp %*% solve(chol(var(temp)))
+ }
+ } else {
+ matrix((runif(n*Rank)-0.5)*rrcontrol$SD.sitescores,n,Rank)
+ }
+ }
+ if(rrcontrol$jitter.sitescores)
+ sitescores <- jitteruqo(sitescores)
+
+
+ Blist <- process.constraints(constraints, x, M)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
+
+
+ modelno = switch(family at vfamily[1], "poissonff"=2,
+ "binomialff"=1, "quasipoissonff"=0, "quasibinomialff"=0,
+ "negbinomial"=0,
+ "gamma2"=5,
+ 0) # stop("can't fit this model using fast algorithm")
+ if(!modelno) stop("the family function does not work with uqo()")
+ if(modelno == 1) modelno = get("modelno", envir = VGAMenv)
+ rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.UQO.")
+
+ cqofastok = if(is.R()) (exists("CQO.FastAlgorithm", envir = VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAMenv)) else
+ (exists("CQO.FastAlgorithm", inherits=TRUE) && CQO.FastAlgorithm)
+ if(!cqofastok)
+ stop("can't fit this model using fast algorithm")
+
+ nice31 = (!control$EqualTol || control$ITolerances) && control$Quadratic &&
+ all(trivial.constraints(Blist))
+
+
+ xbig.save1 <- if(nice31) {
+ NULL
+ } else {
+ lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ }
+
+ NOS = ifelse(modelno==3 || modelno==5, M/2, M)
+ p1star = if(nice31) p1*ifelse(modelno==3 || modelno==5,2,1) else ncol(xbig.save1)
+ p2star = if(nice31)
+ ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
+ (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
+
+ pstar = p1star + p2star
+ nstar = if(nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ maxMr = max(M, Rank)
+ maxMr5 = maxMr*(maxMr+1)/2
+ lenbeta = pstar * ifelse(nice31, NOS, 1)
+
+ othint = c(Rank, control$EqualTol, pstar, dimw=1, inited=290, # other ints
+ modelno, maxitl=control$maxitl, actnits=0, twice=0, p1star,
+ p2star, nice31, lenbeta, control$ITolerances, control$trace,
+ p1, p2, control$method.init)
+ othdbl = c(small=control$SmallNo, fseps=control$epsilon,
+ .Machine$double.eps,
+ kinit=rep(control$Kinit, len=NOS),
+ shapeinit=rep(control$shapeinit, len=NOS))
+ bnumat = if(nice31) matrix(0,nstar,pstar) else
+ cbind(matrix(0,nstar,p2star), xbig.save1)
+
+ rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance", "fv",
+ "cmatrix", "ocmatrix"), prefix=".VGAM.UQO.")
+
+
+ for(iter in 1:optim.maxit) {
+ if(control$trace)
+ cat("\nIteration", iter, "\n")
+ conjgrad <- optim(par=sitescores, fn=calluqof,
+ gr=if(control$GradientFunction) callduqof else NULL,
+ method=if(n*Rank>control$nRmax) "CG" else "BFGS",
+ control=list(fnscale=1, trace=as.integer(control$trace),
+ maxit=control$Maxit.optim),
+ etamat=eta, ymat=y, wvec=w, modelno=modelno,
+ Control=rrcontrol,
+ nice31=nice31, xmat = x,
+ n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
+ bnumat=bnumat, Hstep=control$Hstep, alldump=FALSE)
+
+ sitescores = getfromVGAMenv("numat", prefix = ".VGAM.UQO.")
+ dim(sitescores) = c(n, Rank)
+ sitescores = scale(sitescores, center = TRUE, scale = FALSE)
+ sitescores = crow1C(sitescores, rrcontrol$Crow1positive)
+ dimnames(sitescores) = list(dimnames(y)[[1]], if(Rank==1) "lv" else
+ paste("lv", 1:Rank, sep=""))
+
+ if(converged <- (conjgrad$convergence == 0)) break
+ }
+
+ if(!converged && optim.maxit>1)
+ warning(paste("convergence not obtained"))
+
+
+ temp9 =
+ calluqof(sitescores, etamat=eta, ymat=y, wvec=w, modelno=modelno,
+ nice31=nice31, xmat = x,
+ Control=rrcontrol,
+ n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
+ bnumat=bnumat, Hstep=NA, alldump=TRUE)
+
+ coefs = getfromVGAMenv("beta", prefix = ".VGAM.UQO.")
+ VGAM.fv = getfromVGAMenv("fv", prefix = ".VGAM.UQO.")
+ etamat = getfromVGAMenv("etamat", prefix = ".VGAM.UQO.")
+ dim(etamat) = c(M,n)
+ etamat = t(etamat)
+ wresids = getfromVGAMenv("z", prefix = ".VGAM.UQO.") - etamat
+ dim(wresids) = c(n,M)
+
+
+ if(!intercept.only)
+ stop("can only handle intercept.only==TRUE currently")
+ if(nice31) {
+ coefs = c(t(matrix(coefs, ncol=M))) # Get into right order
+ coefs = matrix(coefs, nrow=M)
+ Amat = coefs[,1:Rank,drop=FALSE]
+ if(rrcontrol$IToleran) {
+ B1 = coefs[,-(1:Rank),drop=FALSE]
+ Dmat = matrix(0, M, Rank*(Rank+1)/2)
+ Dmat[,1:Rank] = -0.5
+ } else {
+ Dmat = coefs[,(Rank+1):(Rank + Rank*(Rank+1)/2),drop=FALSE]
+ B1 = coefs[,(1+(Rank + Rank*(Rank+1)/2)):ncol(coefs),drop=FALSE]
+ }
+ } else {
+ Amat = t(matrix(coefs[1:(Rank*M)], Rank, M))
+ cptr1 = (Rank*M)
+ Dmat = coefs[(cptr1+1):(cptr1+Rank*(Rank+1)/2)]
+ Dmat = matrix(Dmat, M, Rank*(Rank+1)/2, byrow=TRUE)
+ cptr1 = (Rank*M) + Rank*(Rank+1)/2
+ B1 = coefs[(cptr1+1):length(coefs)]
+ }
+
+ lv.names = if(Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ lp.names = predictors.names
+ if(!length(lp.names)) lp.names = NULL
+ extra$Amat = matrix(Amat, M, Rank, dimnames = list(lp.names, lv.names))
+ extra$B1 = matrix(B1, ncol=M, dimnames =
+ list(names.colx1.index, predictors.names))
+ extra$Dmat = matrix(Dmat, M, Rank*(Rank+1)/2)
+ extra$Cmat = NULL # This is UQO!!
+
+ VGAM.etamat = getfromVGAMenv("etamat", prefix = ".VGAM.UQO.")
+ VGAM.etamat = matrix(VGAM.etamat, n, M, byrow=TRUE,
+ dimnames = list(dimnames(y)[[1]], predictors.names))
+
+ coefficients = c(coefs) # Make a vector because of class "numeric"
+
+ rmfromVGAMenv(c("etamat", "beta", "fv"), prefix=".VGAM.UQO.")
+
+ if(length(family at fini))
+ eval(family at fini)
+
+ misc <- list(function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ modelno = modelno,
+ M = M, n = n,
+ nstar = nstar, nice31 = nice31,
+ p = ncol(x),
+ pstar = pstar, p1star = p1star, p2star = p2star,
+ ynames = dimnames(y)[[2]])
+
+ crit.list <- list(deviance=conjgrad$value)
+
+
+ structure(c(list(
+ coefficients = coefficients,
+ constraints = Blist,
+ sitescores = sitescores,
+ crit.list = crit.list,
+ control=control,
+ extra=extra,
+ family=family,
+ fitted.values=VGAM.fv,
+ iter=iter,
+ misc=misc,
+ predictors=VGAM.etamat,
+ prior.weights = w,
+ x=x,
+ y=y)),
+ vclass=family at vfamily)
+}
+
+
+
+
+printuqo <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ cat("\n")
+ cat(x at misc$n, "sites and", x at misc$M, "responses/species\n")
+ cat("Rank", x at control$Rank)
+ cat(",", ifelse(x at control$EqualToler, "equal-tolerances",
+ "unequal-tolerances"), "\n")
+
+ if(length(deviance(x)))
+ cat("\nResidual Deviance:", format(deviance(x)), "\n")
+
+ invisible(x)
+}
+
+
+setMethod("print", "uqo", function(x, ...) printuqo(x, ...))
+
+ setMethod("show", "uqo", function(object) printuqo(object))
+
+
+
+deviance.uqo <- function(object, ...)
+ object at criterion$deviance
+
+setMethod("deviance", "uqo", function(object, ...)
+ deviance.uqo(object, ...))
+
+
+setMethod("coefficients", "uqo", function(object, ...)
+ Coef.qrrvglm(object, ...))
+setMethod("coef", "uqo", function(object, ...)
+ Coef.qrrvglm(object, ...))
+setMethod("Coef", "uqo", function(object, ...)
+ Coef.qrrvglm(object, ...))
+
+
+
+
+setMethod("show", "Coef.uqo", function(object)
+ printCoef.qrrvglm(object, C = FALSE))
+setMethod("print", "Coef.uqo", function(x, ...)
+ printCoef.qrrvglm(x, ...))
+
+
+
+
+residualsuqo <- function(object,
+ type = c("deviance", "pearson", "working", "response"),
+ matrix.arg= TRUE) {
+
+ if(mode(type) != "character" && mode(type) != "name")
+ type = as.character(substitute(type))
+ type = match.arg(type, c("deviance", "pearson", "working", "response"))[1]
+
+ switch(type,
+ response = object at y - fitted(object),
+ stop("this type of residual hasn't been implemented yet")
+ )
+}
+
+setMethod("resid", "uqo", function(object, ...)
+ residualsuqo(object, ...))
+setMethod("residuals", "uqo", function(object, ...)
+ residualsuqo(object, ...))
+
+fitted.values.uqo <- function(object, ...)
+ object at fitted.values
+
+setMethod("fitted", "uqo", function(object, ...)
+ fitted.values.uqo(object, ...))
+setMethod("fitted.values", "uqo", function(object, ...)
+ fitted.values.uqo(object, ...))
+
+
+
+predict.uqo <- function(object, newdata = NULL, ...) {
+ if(length(newdata) > 0)
+ stop("can't handle newdata argument yet")
+ object at predictors
+}
+
+setMethod("predict", "uqo", function(object, ...)
+ predict.uqo(object, ...))
+
+
+setMethod("persp", "uqo", function(x, ...)
+ persp.qrrvglm(x, ...))
+
+setMethod("trplot", "uqo", function(object, ...)
+ trplot.qrrvglm(object, check.ok=FALSE, ...))
+
+
+setMethod("plot", "uqo", function(x, y, ...)
+ invisible(plotqrrvglm(object=x, ...)))
+
+
+setMethod("lvplot", "uqo", function(object, ...)
+ invisible(lvplot.qrrvglm(object, C=FALSE, check.ok=FALSE, ...)))
+
+
+
+.VGAM.UQO.CA = function(Y) {
+ Y = as.matrix(Y) / sum(Y)
+ rowsum = c(Y %*% rep(1, len=ncol(Y)))
+ colsum = c(t(Y) %*% rep(1, len=nrow(Y)))
+ rc = outer(rowsum, colsum)
+ Ybar = (Y - rc) / sqrt(rc)
+ Q = qr(Ybar)
+ if (Q$rank > 0) {
+ temp = svd(Ybar)
+ colnames(temp$u) = paste("CA", 1:length(temp$d), sep = "")
+ rownames(temp$u) = dimnames(Y)[[1]]
+ sweep(as.matrix(temp$u[,1:Q$rank, drop=FALSE]),
+ 1, 1/sqrt(rowsum), "*")
+ } else stop("Null rank")
+}
+
+
+
+if(FALSE) {
+ scores.uqo <- function (x, type = c("sites", "species"), ...) {
+ if(mode(type) != "character" && mode(type) != "name")
+ type = as.character(substitute(type))
+ type = match.arg(type, c("sites", "species"))[1]
+
+ switch(type,
+ sites=if(any(slotNames(x)=="lv")) x at lv else Coef(x)@lv,
+ species=if(any(slotNames(x)=="Optimum")) x at Optimum else Coef(x)@Optimum
+ )
+ }
+
+ setMethod("scores", "uqo", function(x, ...) scores.uqo(x, ...))
+}
+
+jitteruqo = function(mat) {
+ mat * ifelse(runif(length(mat)) < 0.5, -1, 1)
+}
+
+setMethod("Opt", "uqo", function(object, ...) Opt.qrrvglm(object, ...))
+setMethod("Max", "uqo", function(object, ...) Max.qrrvglm(object, ...))
+setMethod("lv", "uqo", function(object, ...) lv.qrrvglm(object, ...))
+
+
+
+if(!isGeneric("calibrate"))
+ setGeneric("calibrate", function(object, ...) standardGeneric("calibrate"))
+setMethod("calibrate", "uqo", function(object, ...)
+ calibrate.qrrvglm(object, ...))
+
+
+
+summary.uqo = function(object, ...) {
+ answer = Coef(object, ...)
+ class(answer) = "summary.uqo"
+ answer at call = object at call
+ answer at misc = object at misc
+ answer
+}
+
+printsummary.uqo = function(x, ...) {
+
+ cat("\nCall:\n")
+ dput(x at call)
+
+ printCoef.qrrvglm(x, ...)
+
+ cat("\nNumber of responses/species: ", x at NOS, "\n")
+
+ if(length(x at misc$dispersion) == 1)
+ cat("\nDispersion parameter(s): ", x at misc$dispersion, "\n")
+ invisible(x)
+}
+
+setClass("summary.uqo", representation("Coef.uqo",
+ "misc" = "list",
+ "call" = "call"))
+
+setMethod("summary", "uqo", function(object, ...)
+ summary.uqo(object, ...))
+
+setMethod("print", "summary.uqo",
+ function(x, ...)
+ invisible(printsummary.uqo(x, ...)))
+
+setMethod("show", "summary.uqo",
+ function(object)
+ invisible(printsummary.uqo(object)))
+
+
+
+Tol.uqo = function(object, ...) {
+ Coef(object, ...)@Tolerance
+}
+
+Tol.Coef.uqo = function(object, ...) {
+ if(length(list(...))) warning("Too late! Ignoring the extra arguments")
+ Coef(object, ...)@Tolerance
+}
+
+if(!isGeneric("Tol"))
+ setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
+setMethod("Tol", "uqo", function(object, ...) Tol.uqo(object, ...))
+setMethod("Tol", "Coef.uqo", function(object, ...) Tol.Coef.uqo(object, ...))
+
+
+
+
+
diff --git a/R/vgam.R b/R/vgam.R
new file mode 100644
index 0000000..946c63f
--- /dev/null
+++ b/R/vgam.R
@@ -0,0 +1,281 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+vgam <- function(formula,
+ family,
+ data=list(),
+ weights=NULL,
+ subset=NULL,
+ na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=vgam.control(...),
+ offset=NULL,
+ method="vgam.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=list(),
+ qr.arg=FALSE, smart=TRUE,
+ ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "vgam"
+
+ ocall <- match.call()
+
+ mf <- match.call(expand=FALSE)
+
+ if(smart)
+ setup.smart("write")
+
+
+
+
+ mt <- terms(formula, "s", data = data)
+
+ if(missing(data))
+ data <- environment(formula)
+
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
+ mf$control <- mf$etastart <- mf$mustart <- mf$coefstart <-
+ mf$qr.arg <- mf$contrasts <- mf$constraints <-mf$smart <-
+ mf$extra <- mf$... <- NULL
+ mf[[1]] <- as.name("model.frame")
+ mf2 <- mf
+
+
+ mf <- eval(mf, parent.frame())
+
+ if(!missing(subset)) {
+ mf2$subset <- NULL
+ mf2 <- eval(mf2, parent.frame()) # mf2 is the full data frame.
+ spars2 = lapply(mf2, attr, "spar")
+ dfs2 = lapply(mf2, attr, "df")
+ sx2 = lapply(mf2, attr, "s.xargument")
+ for(i in 1:length(mf)) {
+ if(length(sx2[[i]])) {
+ attr(mf[[i]], "spar") = spars2[[i]]
+ attr(mf[[i]], "dfs2") = dfs2[[i]]
+ attr(mf[[i]], "s.xargument") = sx2[[i]]
+ }
+ }
+ rm(mf2)
+ }
+
+ if(method == "model.frame")
+ return(mf)
+
+ na.act <- attr(mf, "na.action")
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ na.message <- attr(mf, "na.message")
+
+ y <- model.response(mf, "numeric") # model.extract(m, "response")
+ if(!is.empty.model(mt))
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt)
+
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+
+
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+
+ if (is.character(family))
+ family <- get(family)
+ if (is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ eval(vcontrol.expression)
+
+ n <- dim(x)[1]
+
+ if(FALSE && is.R()) {
+ family at inverse <- eval(family at inverse)
+ family at link <- eval(family at link)
+
+ for(i in names(.min.criterion.VGAM))
+ if(length(family[[i]])) family[[i]] <- eval(family[[i]])
+ }
+
+ if(length(slot(family, "first")))
+ eval(slot(family, "first"))
+
+ if(method != "vgam.fit")
+ stop("method must be \"model.frame\" or \"vgam.fit\"")
+
+ # --------------------------------------------------------------
+
+ aa <- attributes(mt)
+ smoothers <- aa$specials
+
+
+
+ nonparametric <- length(smoothers$s) > 0 # zz; different
+ if(nonparametric) {
+
+ ff <- apply(aa$factors[smoothers[["s"]],,drop=FALSE], 2, any)
+ smoothers[["s"]] <- if(any(ff)) seq(along=ff)[aa$order==1 & ff] else
+ NULL
+
+ smooth.labels <- aa$term.labels[unlist(smoothers)]
+ } else
+ function.name = "vglm" # This is effectively so
+
+
+
+ fit <- vgam.fit(x=x, y=y, w=w, m=mf,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ offset=offset, family=family, control=control,
+ criterion=control$criterion,
+ constraints=constraints, extra=extra, qr.arg=qr.arg,
+ Terms=mt,
+ nonparametric=nonparametric, smooth.labels=smooth.labels,
+ function.name=function.name, ...)
+
+
+ if(any(fit$nl.df < 0)) {
+ fit$nl.df[fit$nl.df < 0] = 0
+ }
+
+ # --------------------------------------------------------------
+
+ if(!is.null(fit[["smooth.frame"]])) {
+ fit <- fit[-1] # Strip off smooth.frame
+ } else {
+ }
+
+ fit$smooth <- NULL # Not needed
+
+ fit$call <- ocall
+ if(model)
+ fit$model <- mf
+ if(!x.arg)
+ fit$x <- NULL
+ if(!y.arg)
+ fit$y <- NULL
+
+ if(nonparametric)
+ fit$misc$smooth.labels <- smooth.labels
+
+
+ fit$misc$dataname <- dataname
+
+ attr(fit, "na.message") <- na.message
+
+ if(smart)
+ fit$smart.prediction <- get.smart.prediction()
+
+
+ answer <-
+ new("vgam",
+ "assign" = attr(x, "assign"),
+ "call" = fit$call,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "criterion" = fit$crit.list,
+ "df.residual" = fit$df.residual,
+ "dispersion" = 1,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "rank" = fit$rank,
+ "residuals" = as.matrix(fit$residuals),
+ "rss" = fit$rss,
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=fit$terms))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ if(qr.arg) {
+ class(fit$qr) = "list"
+ slot(answer, "qr") = fit$qr
+ }
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ if(length(fit$fitted.values))
+ slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+ if(x.arg)
+ slot(answer, "x") = x # The 'small' design matrix
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+
+
+
+ slot(answer, "control") = fit$control
+
+ if(length(fit$extra)) {
+ slot(answer, "extra") = fit$extra
+ }
+ slot(answer, "iter") = fit$iter
+ slot(answer, "post") = fit$post
+
+ fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") = fit$predictors
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+
+ if(nonparametric) {
+ slot(answer, "Bspline") = fit$Bspline
+ slot(answer, "nl.chisq") = fit$nl.chisq
+ slot(answer, "nl.df") = fit$nl.df
+ slot(answer, "spar") = fit$spar
+ slot(answer, "s.xargument") = fit$s.xargument
+ if(length(fit$var)) {
+ slot(answer, "var") = fit$var
+ }
+ if(length(fit$R)) {
+ slot(answer, "R") = fit$R # is null if totally parametric model
+ } else
+ warning("the \"R\" slot is NULL. Best to run vglm()") # zz;
+
+ }
+ if(length(fit$effects))
+ slot(answer, "effects") = fit$effects
+
+
+ answer
+}
+attr(vgam, "smart") <- TRUE
+
+
+
+
+care.exp <- function(x, thresh = -log(.Machine$double.eps))
+{
+ x[x > thresh] <- thresh
+ x[x < (-thresh)] <- -thresh
+ exp(x)
+}
+
+
+
+
+
diff --git a/R/vgam.control.q b/R/vgam.control.q
new file mode 100644
index 0000000..02074df
--- /dev/null
+++ b/R/vgam.control.q
@@ -0,0 +1,129 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+vgam.control <- function(all.knots=FALSE,
+ backchat=if(is.R()) FALSE else TRUE,
+ bf.epsilon=1e-7,
+ bf.maxit=30,
+ checkwz=TRUE,
+ criterion = names(.min.criterion.VGAM),
+ epsilon=1e-7,
+ maxit=30,
+ na.action=na.fail,
+ nk=NULL,
+ save.weight=FALSE,
+ se.fit=TRUE,
+ trace=FALSE,
+ wzepsilon = .Machine$double.eps^0.75,
+ xij=NULL, ...)
+{
+
+
+
+
+
+ if(mode(criterion) != "character" && mode(criterion) != "name")
+ criterion <- as.character(substitute(criterion))
+ criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch=1)
+ criterion <- names(.min.criterion.VGAM)[criterion]
+
+ if(!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for \"checkwz\"")
+ if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ stop("bad input for \"wzepsilon\"")
+
+ if(length(all.knots) > 1)
+ warning("all.knots should be of length 1; using first value only")
+ if(!is.Numeric(bf.epsilon, allow=1, posit=TRUE)) {
+ warning("bad input for \"bf.epsilon\"; using 0.00001 instead")
+ bf.epsilon <- 0.00001
+ }
+ if(!is.Numeric(bf.maxit, allow=1, posit=TRUE, integ=TRUE)) {
+ warning("bad input for \"bf.maxit\"; using 20 instead")
+ bf.maxit <- 20
+ }
+ if(!is.Numeric(epsilon, allow=1, posit=TRUE)) {
+ warning("bad input for \"epsilon\"; using 0.0001 instead")
+ epsilon <- 0.0001
+ }
+ if(!is.Numeric(maxit, allow=1, posit=TRUE, integ=TRUE)) {
+ warning("bad input for \"maxit\"; using 20 instead")
+ maxit <- 20
+ }
+
+ convergence <- expression({
+ switch(criterion,
+ coefficients=if(iter==1) iter<maxit else (iter<maxit &&
+ max(abs(new.coeffs - old.coeffs)/(abs(old.coeffs)+epsilon)) > epsilon),
+ abs(old.crit-new.crit)/(abs(old.crit)+epsilon) > epsilon && iter<maxit)
+ })
+
+ list(all.knots=as.logical(all.knots)[1],
+ backchat=as.logical(backchat)[1],
+ bf.epsilon=bf.epsilon,
+ bf.maxit=bf.maxit,
+ checkwz=checkwz,
+ convergence=convergence,
+ criterion=criterion,
+ epsilon=epsilon,
+ maxit=maxit,
+ nk=nk,
+ min.criterion = .min.criterion.VGAM,
+ save.weight=as.logical(save.weight)[1],
+ se.fit=as.logical(se.fit)[1],
+ trace=as.logical(trace)[1],
+ wzepsilon = wzepsilon,
+ xij=xij)
+}
+
+
+vgam.nlchisq <- function(qr, resid, wz, s, deriv, U, smooth.labels,
+ assign, M, n, constraints)
+{
+
+ attr(qr, "class") = "qr"
+ class(qr) <- "qr"
+
+ if(!is.matrix(s)) s <- as.matrix(s)
+ if(!is.matrix(wz)) wz <- as.matrix(wz)
+ if(!is.matrix(deriv)) deriv <- as.matrix(deriv)
+ if(!is.matrix(resid)) resid <- as.matrix(resid)
+
+ trivc <- trivial.constraints(constraints)
+
+ ans <- rep(as.numeric(NA), length=ncol(s))
+ Uderiv <- vbacksub(U, t(deriv), M=M, n=n) # \bU_i^{-1} \biu_i
+ ptr <- 0
+ for(i in 1:length(smooth.labels)) {
+ cmat <- constraints[[ smooth.labels[i] ]]
+ index <- (ptr+1):(ptr+ncol(cmat))
+
+ for(j in index) {
+ yy <- t(cmat[,j-ptr,drop=FALSE])
+ yy <- kronecker(s[,j,drop=FALSE], yy) # n x M
+ Us <- mux22(U, yy, M=M, upper=TRUE, as.matrix=TRUE) # n * M
+
+ Uss <- matrix(c(t(Us)), nrow=n*M, ncol=1)
+
+ Rsw <- qr.resid(qr, Uss)
+
+ vRsw <- matrix(Rsw, nrow=n, ncol=M, byrow=TRUE)
+ newans <- vbacksub(U, t(vRsw), M=M, n=n)
+
+ ans[j] <- sum(vRsw^2 + 2 * newans * deriv)
+
+ }
+ ptr <- ptr + ncol(cmat)
+ }
+
+ names(ans) <- dimnames(s)[[2]]
+ ans
+}
+
+
+
+
+
+
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
new file mode 100644
index 0000000..9c6b7ff
--- /dev/null
+++ b/R/vgam.fit.q
@@ -0,0 +1,446 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+vgam.fit <- function(x, y, w, mf,
+ etastart, mustart, coefstart,
+ offset, family, control, criterion="coefficients",
+ constraints=NULL, extra, qr.arg,
+ Terms,
+ nonparametric, smooth.labels,
+ function.name="vgam", ...)
+{
+ post = list()
+ check.Rank <- TRUE # Set this to false for family functions vppr() etc.
+ epsilon <- control$epsilon
+ maxit <- control$maxit
+ save.weight <- control$save.weight
+ trace <- control$trace
+
+ bf.maxit <- control$bf.maxit
+ bf.epsilon <- control$bf.epsilon
+ se.fit <- control$se.fit
+ minimize.criterion <- control$min.criterion
+
+ n <- dim(x)[1]
+
+
+
+
+ # --------------------------------------------------------------
+ new.s.call <- expression({
+ if(c.list$one.more)
+ {
+ fv <- if(backchat && M>1 && nonparametric)
+ matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if(length(family at middle))
+ eval(family at middle)
+
+ eta <- fv + offset
+ mu <- family at inverse(eta, extra)
+
+ if(length(family at middle2))
+ eval(family at middle2)
+
+ old.crit <- new.crit
+
+ new.crit <-
+ switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+ if(trace) {
+ cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
+
+ uuuu = switch(criterion,
+ coefficients=if(is.R())
+ format(new.crit, dig=round(2-log10(epsilon))) else
+ format(round(new.crit, round(2-log10(epsilon)))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients={if(length(new.crit) > 2) cat("\n");
+ cat(uuuu, fill=TRUE, sep=", ")},
+ cat(uuuu, fill=TRUE, sep=", "))
+ }
+
+ one.more <- eval(control$convergence)
+
+ if(exists("flush.console"))
+ flush.console()
+
+ if(!is.finite(one.more) || !is.logical(one.more)) one.more = FALSE
+ if(one.more)
+ {
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list$z <- z
+ c.list$wz <- wz
+ c.list$U <- U
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ old.coeffs <- new.coeffs
+ }
+ c.list
+ })
+
+
+
+
+
+ backchat <- control$backchat # if(is.R()) FALSE else TRUE
+ old.coeffs <- coefstart
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in $initialize
+
+ n.save <- dim(x)[1]
+ eval(family at initialize)
+ n <- n.save
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else family at inverse(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- family at link(mu, extra)
+ }
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+
+ if(length(family at constraints))
+ eval(family at constraints)
+ Blist <- process.constraints(constraints, x, M)
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
+
+
+ if(nonparametric) {
+
+
+
+ smooth.frame <- mf # zz
+ assignx <- attr(x, "assign")
+ which <- assignx[smooth.labels]
+
+ bf <- "s.vam"
+ bf.call <- parse(text=paste(
+ "s.vam(x, z, wz, tfit$smooth, which, tfit$smooth.frame,",
+ "bf.maxit, bf.epsilon, trace, se=se.fit, xbig.save, ",
+ "Blist, ncolBlist, M, qbig, U, backchat, ",
+ "all.knots=control$all.knots, nk=control$nk)",
+ sep=""))[[1]]
+
+ qbig <- sum(ncolBlist[smooth.labels]) # Number of component funs
+ s <- matrix(0, n, qbig)
+ dy <- if(is.matrix(y)) dimnames(y)[[1]] else names(y)
+ d2 <- if(is.null(predictors.names))
+ paste("(Additive predictor ",1:M,")", sep="") else
+ predictors.names
+ dimnames(s) <- list(dy, vlabel(smooth.labels,
+ ncolBlist[smooth.labels], M))
+
+ tfit <- list(smooth=s, smooth.frame=smooth.frame)
+ } else {
+ bf.call <- if(is.R()) expression(vlm.wfit(xbig.save, z, Blist=NULL,
+ U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL)) else
+ expression(vlm.wfit(xbig.save, z, Blist=NULL,
+ U=U, matrix.out=FALSE, XBIG=TRUE, singular.ok=TRUE, qr=qr.arg, xij=NULL))
+ bf <- "vlm.wfit"
+ }
+
+ xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+
+
+ if(length(coefstart)) {
+ eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
+ offset else xbig.save * coefstart + offset
+ eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
+ mu <- family at inverse(eta, extra)
+ }
+
+
+ if(criterion != "coefficients") {
+ tfun <- slot(family, criterion) # Needed for R, so have to follow suit
+ }
+
+ iter <- 1
+ new.crit <- switch(criterion,
+ coefficients=1,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+ old.crit <- if(minimize.criterion) 10*new.crit+10 else -10*new.crit-10
+
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list <- list(wz=as.double(wz), z=as.double(z),
+ fit=as.double(t(eta)),
+ one.more=TRUE, U=as.double(U),
+ coeff=as.double(rep(1,ncol(xbig.save))))
+
+
+ dxbig <- as.integer(dim(xbig.save))
+ n.big <- dxbig[[1]]
+ p.big <- dxbig[[2]]
+ if(n.big < p.big)
+ stop(paste(p.big, "parameters but only", n.big,
+ "observations"))
+
+ if(backchat) {
+ nframe <- sys.nframe()
+ dotC(name="v_init_call", as.integer(nframe), new.s.call)
+ }
+
+ while(c.list$one.more) {
+ tfit <- eval(bf.call) # fit$smooth.frame is new
+
+ c.list$coeff <- tfit$coefficients
+
+ tfit$predictors <- tfit$fitted.values + offset
+
+ c.list$fit <- tfit$fitted.values
+ c.list <- eval(new.s.call)
+ NULL
+ }
+
+ if(maxit>1 && iter>=maxit)
+ warning(paste("convergence not obtained in ", maxit, " iterations."))
+
+
+ dn.big <- labels(xbig.save)
+ xn.big <- dn.big[[2]]
+ yn.big <- dn.big[[1]]
+
+ if(length(family at fini))
+ eval(family at fini)
+
+ coefs <- tfit$coefficients
+ asgn <- attr(xbig.save, "assign") # 29/11/01 was x
+
+ names(coefs) <- xn.big
+ cnames <- xn.big
+
+ if(!is.null(tfit$rank)) {
+ rank <- tfit$rank
+ if(rank < ncol(x))
+ stop("rank < ncol(x) is bad")
+ } else rank <- ncol(x) # zz 8/12/01 I think rank is all wrong
+
+
+
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+
+
+
+ if(is.matrix(mu)) {
+ if(length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ } else
+ if(length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
+
+ tfit$fitted.values <- NULL # Have to kill it off 3/12/01
+ fit <- structure(c(tfit, list(
+ assign=asgn,
+ constraints=Blist,
+ control=control,
+ fitted.values=mu,
+ formula=as.vector(attr(Terms, "formula")),
+ iter=iter,
+ offset=offset,
+ rank=rank,
+ terms=Terms)))
+
+ df.residual <- n.big - rank
+ if(backchat) {
+ fit$coefficients <- coefs
+ fit$df.residual <- df.residual
+ }
+
+ if(!se.fit) {
+ fit$var <- NULL
+ }
+
+ if(M==1) {
+ wz <- as.vector(wz) # Convert wz into a vector
+ } # else
+ fit$weights <- if(save.weight) wz else NULL
+
+
+
+ if(M==1) {
+ fit$predictors <- as.vector(fit$predictors)
+ fit$residuals <- as.vector(fit$residuals)
+ names(fit$residuals) <- names(fit$predictors) <- yn
+ } else
+ dimnames(fit$residuals) <- dimnames(fit$predictors) <-
+ list(yn, predictors.names)
+
+ NewBlist <- process.constraints(constraints, x, M, by.col=FALSE)
+
+ misc <- list(
+ colnames.x = xn,
+ colnames.xbig = xn.big,
+ criterion = criterion,
+ function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ new.assign = new.assign(x, NewBlist),
+ nonparametric = nonparametric,
+ n.big = n.big,
+ orig.assign = attr(x, "assign"),
+ p = ncol(x),
+ p.big = p.big,
+ ynames = dimnames(y)[[2]])
+
+
+ if(criterion != "coefficients")
+ fit[[criterion]] <- new.crit
+
+
+
+ if(se.fit && length(fit$s.xargument)) {
+ misc$varassign <-
+ varassign(Blist, names(fit$s.xargument)) # zz or constraints?
+ }
+
+
+
+ if(nonparametric) {
+ misc$smooth.labels <- smooth.labels
+ }
+
+
+ crit.list <- list()
+ if(criterion != "coefficients")
+ crit.list[[criterion]] <- fit[[criterion]] <- new.crit
+ for(i in names(.min.criterion.VGAM)) {
+ if(i != criterion &&
+ any(slotNames(family) == i) &&
+ (( is.R() && length(body(slot(family, i)))) ||
+ ((!is.R() && length(slot(family, i)) > 1)))) {
+ fit[[i]] <- crit.list[[i]] <-
+ (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ }
+ }
+
+
+
+
+
+ if(M==1) {
+ fit$predictors <- as.vector(fit$predictors)
+ fit$residuals <- as.vector(fit$residuals)
+ names(fit$residuals) <- names(fit$predictors) <- yn
+ } else
+ dimnames(fit$residuals) <- dimnames(fit$predictors) <-
+ list(yn, predictors.names)
+
+
+
+
+ if(w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if(length(family at last))
+ eval(family at last)
+
+
+ if(!is.null(fit$smooth)) {
+ fit$nl.chisq <- vgam.nlchisq(fit$qr, fit$resid, wz=wz,
+ s=fit$smooth, deriv=deriv.mu, U=U,
+ smooth.labels, attr(x, "assign"),
+ M=M, n=n, constraints=Blist)
+ }
+
+
+ if(!qr.arg) {
+ fit$qr <- NULL
+ }
+
+
+
+
+ fit$misc = NULL # 8/6/02; It's necessary to kill it as it exists in vgam
+ structure(c(fit, list(
+ contrasts=attr(x, "contrasts"),
+ control=control,
+ crit.list=crit.list,
+ extra=extra,
+ family=family,
+ iter=iter,
+ misc=misc,
+ post=post,
+ x=x,
+ y=y)),
+ vclass=family at vfamily)
+}
+
+
+
+
+
+new.assign <- function(X, Blist)
+{
+
+ M <- nrow(Blist[[1]])
+ dn <- labels(X)
+ xn <- dn[[2]]
+
+ asgn <- attr(X, "assign")
+ nasgn <- names(asgn)
+ lasgn <- unlist(lapply(asgn, length))
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ names(ncolBlist) <- NULL # This is necessary for below to work
+
+ temp2 <- vlabel(nasgn, ncolBlist, M)
+ L <- length(temp2)
+ newasgn <- vector("list", L)
+
+ k <- 0
+ low <- 1
+ for(i in 1:length(asgn)) {
+ len <- low:(low+ncolBlist[i]*lasgn[i]-1)
+ temp <- matrix(len, ncolBlist[i], lasgn[i])
+ for(m in 1:ncolBlist[i])
+ newasgn[[k+m]] <- temp[m,]
+ low <- low + ncolBlist[i]*lasgn[i]
+ k <- k + ncolBlist[i]
+ }
+
+ names(newasgn) <- temp2
+ newasgn
+}
+
diff --git a/R/vgam.match.q b/R/vgam.match.q
new file mode 100644
index 0000000..4fca0b0
--- /dev/null
+++ b/R/vgam.match.q
@@ -0,0 +1,85 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+vgam.match <- function(x, all.knots=FALSE, nk=NULL) {
+
+ if(is.list(x)) {
+ nvar <- length(x)
+ if(length(nk))
+ nk = rep(nk, length=nvar)
+ temp <- vgam.match(x[[1]], all.knots=all.knots, nk=nk[1])
+
+ o <- matrix(temp$o, length(temp$o), nvar)
+ nef <- rep(temp$nef, nvar)
+ xmin <- rep(temp$xmin, nvar)
+ xmax <- rep(temp$xmax, nvar)
+ nknots <- rep(temp$nknots, nvar)
+ knots <- vector("list", nvar)
+ knots[[1]] <- temp$knots
+
+ if(nvar > 1)
+ for(i in 2:nvar) {
+ temp = vgam.match(x[[i]], all.knots=all.knots, nk=nk[i])
+ o[, i] <- temp$o
+ nef[i] <- temp$nef
+ nknots[i] <- temp$nknots
+ knots[[i]] <- temp$knots
+ xmin[i] <- temp$xmin
+ xmax[i] <- temp$xmax
+ }
+ names(nknots) <- names(knots) <-
+ names(nef) <- names(xmin) <- names(xmax) <- names(x)
+ dimnames(o) <- list(NULL, names(x))
+
+ return(list(o=o, nef=nef, nknots=nknots, knots=knots,
+ xmin=xmin, xmax=xmax))
+ }
+
+ if(!is.null(attributes(x)$NAs) || any(is.na(x)))
+ stop("can't smooth on variables with NAs")
+
+ sx <- unique(sort(as.vector(x))) # "as.vector()" strips off attributes
+ o <- match(x, sx) # as.integer(match(x, sx)) # sx[o]==x
+ nef <- length(sx) # as.integer(length(sx))
+
+ if(nef < 7)
+ stop("smoothing variables must have at least 7 unique values")
+
+ xmin <- sx[1] # Don't use rounded value
+ xmax <- sx[nef]
+ xbar <- (sx - xmin) / (xmax - xmin)
+
+ noround = TRUE # Improvement 3/8/02
+ if(all.knots) {
+ if(noround) {
+ knot = valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3)))
+ } else {
+ knot <- c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3))
+ }
+ if(length(nk)) warning("overriding nk by all.knots=T")
+ nk <- length(knot) - 4 # No longer: nef + 2
+ } else {
+ chosen = length(nk)
+ if(chosen && (nk > nef+2 || nk <= 5))
+ stop("bad value for nk")
+ if(!chosen) nk = 0
+ knot.list <- dotFortran(name="vknotl2", as.double(xbar), as.integer(nef),
+ knot=double(nef+6), k=as.integer(nk+4),
+ chosen=as.integer(chosen))
+ if(noround) {
+ knot = valid.vknotl2(knot.list$knot[1:(knot.list$k)])
+ knot.list$k = length(knot)
+ } else {
+ knot <- knot.list$knot[1:(knot$k)]
+ }
+ nk <- knot.list$k - 4
+ }
+ if(nk <= 5) stop("not enough distinct knots found")
+
+ return(list(o=o, nef=nef, nknots=nk, knots=knot, xmin=xmin, xmax=xmax))
+}
+
+
+
diff --git a/R/vglm.R b/R/vglm.R
new file mode 100644
index 0000000..6984465
--- /dev/null
+++ b/R/vglm.R
@@ -0,0 +1,169 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+vglm <- function(formula,
+ family, data=list(),
+ weights=NULL, subset=NULL, na.action=na.fail,
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ control=vglm.control(...),
+ offset=NULL,
+ method="vglm.fit",
+ model=FALSE, x.arg=TRUE, y.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=list(),
+ qr.arg=FALSE, smart=TRUE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "vglm"
+
+
+ ocall <- match.call()
+
+ if(smart)
+ setup.smart("write")
+
+ mt <- terms(formula, data = data)
+ if(missing(data))
+ data <- environment(formula)
+
+ mf <- match.call(expand=FALSE)
+ mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
+ mf$coefstart <- mf$etastart <- mf$... <- NULL
+ mf$smart <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+
+ mf <- eval(mf, parent.frame())
+
+ if(method == "model.frame")
+ return(mf)
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ w <- model.weights(mf)
+ if(!length(w))
+ w <- rep(1, nrow(mf))
+ else if(ncol(as.matrix(w))==1 && any(w < 0))
+ stop("negative weights not allowed")
+
+ if(is.character(family))
+ family <- get(family)
+ if(is.function(family))
+ family <- family()
+ if(!inherits(family, "vglmff")) {
+ stop(paste("family=", family, "is not a VGAM family function"))
+ }
+
+ eval(vcontrol.expression)
+
+ if(length(slot(family, "first")))
+ eval(slot(family, "first"))
+
+
+ vglm.fitter <- get(method)
+
+ fit <- vglm.fitter(x=x, y=y, w=w, offset=offset,
+ etastart=etastart, mustart=mustart, coefstart=coefstart,
+ family=family,
+ control=control,
+ constraints=constraints,
+ criterion=control$criterion,
+ extra=extra,
+ qr.arg = qr.arg,
+ Terms=mt, function.name=function.name, ...)
+
+ fit$misc$dataname <- dataname
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ }
+
+ answer <-
+ new(Class="vglm",
+ "assign" = attr(x, "assign"),
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "criterion" = fit$crit.list,
+ "df.residual" = fit$df.residual,
+ "df.total" = fit$df.total,
+ "dispersion" = 1,
+ "effects" = fit$effects,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "R" = fit$R,
+ "rank" = fit$rank,
+ "residuals" = as.matrix(fit$residuals),
+ "rss" = fit$rss,
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=mt))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ if(qr.arg) {
+ class(fit$qr) = "list"
+ slot(answer, "qr") = fit$qr
+ }
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ if(length(fit$fitted.values))
+ slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+
+ if(length(fit$weights))
+ slot(answer, "weights") = as.matrix(fit$weights)
+
+ if(x.arg)
+ slot(answer, "x") = fit$x # The 'small' design matrix
+
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(fit$y)
+
+
+ slot(answer, "control") = fit$control
+ slot(answer, "extra") = if(length(fit$extra)) {
+ if(is.list(fit$extra)) fit$extra else {
+ warning("\"extra\" is not a list, therefore placing \"extra\" into a list")
+ list(fit$extra)
+ }
+ } else list() # R-1.5.0
+ slot(answer, "iter") = fit$iter
+ slot(answer, "post") = fit$post
+ fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") = fit$predictors
+ if(length(fit$prior.weights))
+ slot(answer, "prior.weights") = fit$prior.weights
+
+
+ answer
+}
+attr(vglm, "smart") <- TRUE
+
+
+
diff --git a/R/vglm.control.q b/R/vglm.control.q
new file mode 100644
index 0000000..1a5731d
--- /dev/null
+++ b/R/vglm.control.q
@@ -0,0 +1,150 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+.min.criterion.VGAM <- c("deviance"=TRUE, "loglikelihood"=FALSE, "AIC"=TRUE,
+ "Likelihood"=FALSE, "rss"=TRUE, "coefficients"=TRUE)
+
+
+
+vlm.control <- function(save.weight=TRUE, tol=1e-7, method="qr",
+ checkwz=TRUE, wzepsilon = .Machine$double.eps^0.75,
+ ...)
+{
+ if(tol <= 0) {
+ warning("tol not positive; using 1e-7 instead")
+ tol <- 1e-7
+ }
+ if(!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for \"checkwz\"")
+ if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ stop("bad input for \"wzepsilon\"")
+
+ list(save.weight=save.weight, tol=tol, method=method,
+ checkwz=checkwz,
+ wzepsilon = wzepsilon)
+}
+
+
+vglm.control <- function(backchat=if(is.R()) FALSE else TRUE,
+ checkwz=TRUE,
+ criterion = names(.min.criterion.VGAM),
+ epsilon=1e-7,
+ half.stepsizing=TRUE,
+ maxit=30,
+ stepsize=1,
+ save.weight=FALSE,
+ trace=FALSE,
+ wzepsilon = .Machine$double.eps^0.75,
+ xij=NULL, ...)
+{
+
+
+
+ if(mode(criterion) != "character" && mode(criterion) != "name")
+ criterion <- as.character(substitute(criterion))
+ criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch=1)
+ criterion <- names(.min.criterion.VGAM)[criterion]
+
+
+
+ if(!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for \"checkwz\"")
+ if(!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ stop("bad input for \"wzepsilon\"")
+
+ convergence <- expression({
+
+
+ switch(criterion,
+ coefficients=if(iter==1) iter<maxit else (iter<maxit &&
+ max(abs(new.crit - old.crit)/(abs(old.crit)+epsilon)) > epsilon),
+ abs(old.crit-new.crit)/(abs(old.crit)+epsilon) > epsilon && iter<maxit)
+ })
+
+ if(!is.Numeric(epsilon, allow=1, posit=TRUE)) {
+ warning("bad input for \"epsilon\"; using 0.00001 instead")
+ epsilon <- 0.00001
+ }
+ if(!is.Numeric(maxit, allow=1, posit=TRUE, integ=TRUE)) {
+ warning("bad input for \"maxit\"; using 20 instead")
+ maxit <- 20
+ }
+ if(!is.Numeric(stepsize, allow=1, posit=TRUE)) {
+ warning("bad input for \"stepsize\"; using 1 instead")
+ stepsize <- 1
+ }
+
+ list(backchat=as.logical(backchat)[1],
+ checkwz=checkwz,
+ convergence=convergence,
+ criterion=criterion,
+ epsilon=epsilon,
+ half.stepsizing=as.logical(half.stepsizing)[1],
+ maxit=maxit,
+ min.criterion = .min.criterion.VGAM,
+ save.weight=as.logical(save.weight)[1],
+ stepsize=stepsize,
+ trace=as.logical(trace)[1],
+ wzepsilon = wzepsilon,
+ xij=xij)
+}
+
+
+
+
+vcontrol.expression <- expression({
+
+ control <- control # First one, e.g., vgam.control(...)
+ mylist <- family at vfamily
+ for(i in length(mylist):1) {
+ for(ii in 1:2) {
+ temp <- paste(if(ii==1) "" else paste(function.name, ".", sep=""),
+ mylist[i], ".control", sep="")
+ tempexists = if(is.R()) exists(temp, envir = VGAMenv) else
+ exists(temp, inherit=TRUE)
+ if(tempexists) {
+ temp <- get(temp)
+ temp <- temp(...)
+ for(k in names(temp))
+ control[[k]] <- temp[[k]]
+ }
+ }
+ }
+
+
+ orig.criterion = control$criterion
+ if(control$criterion != "coefficients") {
+ try.crit = c(names(.min.criterion.VGAM), "coefficients")
+ for(i in try.crit) {
+ if(any(slotNames(family) == i) &&
+ (( is.R() && length(body(slot(family, i)))) ||
+ ((!is.R() && length(slot(family, i)) > 1)))) {
+ control$criterion <- i
+ break
+ } else
+ control$criterion <- "coefficients"
+ }
+ }
+ control$min.criterion <- control$min.criterion[control$criterion]
+
+
+
+
+
+ for(ii in 1:2) {
+ temp <- paste(if(ii==1) "" else paste(function.name, ".", sep=""),
+ family at vfamily[1],
+ ".", control$criterion, ".control", sep="")
+ if(exists(temp, inherit=T)) {
+ temp <- get(temp)
+ temp <- temp(...)
+ for(k in names(temp))
+ control[[k]] <- temp[[k]]
+ }
+ }
+
+})
+
+
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
new file mode 100644
index 0000000..e88382c
--- /dev/null
+++ b/R/vglm.fit.q
@@ -0,0 +1,462 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
+ etastart=NULL, mustart=NULL, coefstart=NULL,
+ offset=0, family,
+ control=vglm.control(),
+ criterion="coefficients",
+ qr.arg=FALSE,
+ constraints=NULL,
+ extra=NULL,
+ Terms=Terms, function.name="vglm", ...)
+{
+ post = list()
+ check.rank <- TRUE # Set this to false for family functions vppr() etc.
+ nonparametric <- FALSE
+ epsilon <- control$epsilon
+ maxit <- control$maxit
+ backchat <- control$backchat # FALSE
+ save.weight <- control$save.weight
+ trace <- control$trace
+ orig.stepsize <- control$stepsize
+ minimize.criterion <- control$min.criterion
+
+
+
+ n <- dim(x)[1]
+
+ new.s.call <- expression({
+ if(c.list$one.more)
+ {
+ fv <- if(backchat) {
+ if(M>1) matrix(c.list$fit,n,M,byrow=TRUE) else c.list$fit
+ } else c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if(length(slot(family, "middle")))
+ eval(slot(family, "middle"))
+
+ eta <- fv + offset
+ mu <- slot(family, "inverse")(eta, extra)
+
+ if(length(slot(family, "middle2")))
+ eval(slot(family, "middle2"))
+
+ old.crit <- new.crit
+ new.crit <-
+ switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+
+
+ if(trace && orig.stepsize==1) {
+ cat("VGLM linear loop ", iter, ": ", criterion, "= ")
+ uuuu =
+ switch(criterion,
+ coefficients=if(is.R())
+ format(new.crit, dig=round(2-log10(epsilon))) else
+ format(round(new.crit, round(2-log10(epsilon)))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients={if(length(new.crit) > 2) cat("\n");
+ cat(uuuu, fill=TRUE, sep=", ")},
+ cat(uuuu, fill=TRUE, sep=", "))
+ }
+
+
+ {
+ take.half.step=(control$half.stepsizing && length(old.coeffs))&&
+ ((orig.stepsize!=1) ||
+ (criterion!="coefficients" &&
+ (if(minimize.criterion) new.crit > old.crit else
+ new.crit < old.crit)))
+ if(take.half.step)
+ {
+ stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+ new.coeffs.save <- new.coeffs
+ if(trace)
+ cat("Taking a modified step")
+ repeat {
+ if(trace) {
+ cat(".")
+ if(exists("flush.console"))
+ flush.console()
+ }
+ stepsize <- stepsize / 2
+ if(too.small <- stepsize < 0.001)
+ break
+ new.coeffs <- (1-stepsize)*old.coeffs +
+ stepsize*new.coeffs.save
+
+ if(length(slot(family, "middle")))
+ eval(slot(family, "middle"))
+
+ fv <- xbig.save %*% new.coeffs
+ if(M > 1)
+ fv <- matrix(fv, n, M, byrow=TRUE)
+
+ eta <- fv + offset
+ mu <- slot(family, "inverse")(eta, extra)
+
+ if(length(slot(family, "middle2")))
+ eval(slot(family, "middle2"))
+
+
+ new.crit <-
+ switch(criterion,
+ coefficients=new.coeffs,
+ tfun(mu=mu,y=y,w=w,res=FALSE,eta=eta,extra))
+
+ if((criterion=="coefficients") ||
+ ( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))
+ break
+ }
+
+ if(trace)
+ cat("\n")
+ if(too.small)
+ {
+ warning(paste("iterations terminated because",
+ "half-step sizes are very small"))
+ one.more <- FALSE
+ } else
+ {
+ if(trace) {
+ cat("VGLM linear loop ", iter, ": ", criterion, "= ")
+
+ uuuu =
+ switch(criterion,
+ coefficients=if(is.R())
+ format(new.crit, dig=round(2-log10(epsilon))) else
+ format(round(new.crit, round(2-log10(epsilon)))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients={if(length(new.crit) > 2) cat("\n");
+ cat(uuuu, fill=TRUE, sep=", ")},
+ cat(uuuu, fill=TRUE, sep=", "))
+ }
+
+ one.more <- eval(control$convergence)
+ }
+ } else {
+ one.more <- eval(control$convergence)
+ }
+ }
+ if(exists("flush.console"))
+ flush.console()
+
+ if(!is.logical(one.more)) one.more = FALSE
+ if(one.more)
+ {
+ iter <- iter + 1
+ deriv.mu <- eval(slot(family, "deriv"))
+ wz <- eval(slot(family, "weight"))
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list$z <- z
+ c.list$U <- U
+ if(copyxbig) c.list$xbig <- xbig.save
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ old.coeffs <- new.coeffs
+ }
+ c.list
+ })
+
+
+
+
+
+ copyxbig <- FALSE # May be overwritten in @initialize
+ stepsize <- orig.stepsize
+ old.coeffs <- coefstart
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+ n.save <- n
+
+
+ if(length(slot(family, "initialize")))
+ eval(slot(family, "initialize")) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+
+ if(length(etastart)) {
+ eta <- etastart
+ mu <- if(length(mustart)) mustart else slot(family, "inverse")(eta, extra)
+ } else {
+ if(length(mustart))
+ mu <- mustart
+ eta <- slot(family, "link")(mu, extra)
+ }
+
+
+ M <- if(is.matrix(eta)) ncol(eta) else 1
+
+
+
+ if(length(slot(family, "constraints")))
+ eval(slot(family, "constraints"))
+
+ Blist <- process.constraints(constraints, x, M)
+
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
+
+
+ xbig.save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+
+
+ if(length(coefstart)) {
+ eta <- if(ncol(xbig.save)>1) xbig.save %*% coefstart +
+ offset else xbig.save * coefstart + offset
+ eta <- if(M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
+ mu <- slot(family, "inverse")(eta, extra)
+ }
+
+
+ if(criterion != "coefficients") {
+ tfun <- slot(family, criterion) # family[[criterion]]
+ }
+
+ iter <- 1
+ new.crit <- switch(criterion,
+ coefficients=1,
+ tfun(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra))
+ old.crit <- if(minimize.criterion) 10*new.crit+10 else -10*new.crit-10
+
+ deriv.mu <- eval(slot(family, "deriv"))
+ wz <- eval(slot(family, "weight"))
+ if(control$checkwz)
+ wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more=TRUE,
+ coeff=as.double(rep(1,ncol(xbig.save))), U=as.double(U),
+ copyxbig=copyxbig,
+ xbig=if(copyxbig) as.double(xbig.save) else double(3))
+
+
+ dxbig <- as.integer(dim(xbig.save))
+ n.big <- dxbig[[1]]
+ p.big <- dxbig[[2]]
+
+ if(n.big < p.big)
+ stop(paste(p.big, "parameters but only", n.big, "observations"))
+
+ if(backchat) {
+ nframe <- sys.nframe()
+ dotC(name="v_init_call", as.integer(nframe), new.s.call)
+ }
+
+
+ if(backchat) {
+ index.vglm <- iam(NA, NA, M, both=TRUE)
+ tfit <- dotFortran(name="vglmf", as.double(xbig.save), n.big, p.big,
+ backchat=as.integer(T),
+ as.integer(n), as.double(z),
+ coefficients=double(p.big),
+ predictors=double(n.big), effects=double(n.big),
+ qr=as.double(xbig.save), qraux=double(p.big),
+ rank=as.integer(0), pivot=as.integer(seq(p.big)),
+ work=double(max(n.big, 2 * p.big)),
+ wkmm=double(M*M*5 + M*p.big),
+ as.double(U), as.integer(M),
+ dimu=as.integer(if(is.matrix(U)) nrow(U) else 1),
+ dimm=as.integer(if(is.matrix(wz)) ncol(wz) else 1),
+ as.integer(index.vglm$row), as.integer(index.vglm$col),
+ copyxbig=as.integer(copyxbig),
+ rss=double(1))
+ } else {
+
+ bf.call <- if(is.R()) expression(vlm.wfit(xbig.save, z, Blist=NULL,
+ U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL)) else
+ expression(vlm.wfit(xbig.save, z, Blist=NULL,
+ U=U, matrix.out=FALSE, XBIG=TRUE, qr=qr.arg, xij=NULL))
+
+ while(c.list$one.more) {
+ tfit <- eval(bf.call) # fit$smooth.frame is new
+
+ c.list$coeff <- tfit$coefficients
+
+ tfit$predictors <- tfit$fitted.values # zz + offset??
+
+ c.list$fit <- tfit$fitted.values
+ c.list <- eval(new.s.call)
+ NULL
+ }
+ }
+
+ if(maxit>1 && iter>=maxit)
+ warning(paste("convergence not obtained in", maxit, "iterations."))
+
+
+
+ dn.big <- labels(xbig.save)
+ xn.big <- dn.big[[2]]
+ yn.big <- dn.big[[1]]
+
+ if(length(slot(family, "fini")))
+ eval(slot(family, "fini"))
+
+ if(M>1)
+ tfit$predictors <- matrix(tfit$predictors, n, M,
+ byrow=backchat)
+
+ coefs <- tfit$coefficients
+ asgn <- attr(xbig.save, "assign")
+
+ names(coefs) <- xn.big
+
+ rank <- tfit$rank
+ cnames <- xn.big
+
+ if(check.rank && rank < p.big)
+ stop("vglm only handles full-rank models (currently)")
+
+ R <- if(is.R()) tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE] else {
+ if(backchat) tfit$qr[1:p.big, 1:p.big, drop=FALSE] else
+ tfit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
+ }
+ R[lower.tri(R)] <- 0
+ attributes(R) <- if(is.R()) list(dim=c(p.big, p.big),
+ dimnames=list(cnames, cnames), rank=rank) else
+ list(dim=c(p.big, p.big),
+ dimnames=list(cnames, cnames), rank=rank, class="upper")
+
+ effects <- tfit$effects
+ neff <- rep("", n.big)
+ neff[seq(p.big)] <- cnames
+ names(effects) <- neff
+
+ dim(tfit$predictors) <- c(n, M)
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+
+
+ residuals <- z - tfit$predictors # zz - offset ??
+ if(M==1) {
+ tfit$predictors <- as.vector(tfit$predictors)
+ residuals <- as.vector(residuals)
+ names(residuals) <- names(tfit$predictors) <- yn
+ } else {
+ dimnames(residuals) <- dimnames(tfit$predictors) <- list(yn, predictors.names)
+ }
+
+ if(is.matrix(mu)) {
+ if(length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if(length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
+
+
+ df.residual <- n.big - rank
+ fit <- list(assign=asgn,
+ coefficients=coefs,
+ constraints=Blist,
+ df.residual=df.residual,
+ df.total=n*M,
+ effects=effects,
+ fitted.values=mu,
+ offset=offset,
+ rank=rank,
+ residuals=residuals,
+ R=R,
+ terms=Terms) # terms: This used to be done in vglm()
+
+ if(qr.arg) {
+ fit$qr <- if(is.R()) {
+ fit$qr <- tfit$qr
+ } else {
+ if(backchat) tfit[c("qr", "rank", "pivot", "qraux")] else tfit$qr
+ }
+ dimnames(fit$qr$qr) <- dn.big
+ }
+
+ if(M==1) {
+ wz <- as.vector(wz) # Convert wz into a vector
+ } # else
+ fit$weights <- if(save.weight) wz else NULL
+
+
+ misc <- list(
+ colnames.x = xn,
+ colnames.xbig = xn.big,
+ criterion = criterion,
+ function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ nonparametric = nonparametric,
+ n.big = n.big,
+ orig.assign = attr(x, "assign"),
+ p = ncol(x),
+ p.big = p.big,
+ ynames = dimnames(y)[[2]])
+
+
+ crit.list <- list()
+ if(criterion != "coefficients")
+ crit.list[[criterion]] <- fit[[criterion]] <- new.crit
+
+ for(i in names(.min.criterion.VGAM)) {
+ if(i != criterion &&
+ any(slotNames(family) == i) &&
+ (( is.R() && length(body(slot(family, i)))) ||
+ ((!is.R() && length(slot(family, i)) > 1)))) {
+ fit[[i]] <- crit.list[[i]] <-
+ (slot(family, i))(mu=mu, y=y, w=w, res=FALSE, eta=eta, extra)
+ }
+ }
+
+
+
+
+ if(w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if(length(slot(family, "last")))
+ eval(slot(family, "last"))
+
+ structure(c(fit, list(predictors=tfit$predictors,
+ contrasts=attr(x, "contrasts"),
+ control=control,
+ crit.list=crit.list,
+ extra=extra,
+ family=family,
+ iter=iter,
+ misc=misc,
+ post=post,
+ rss=tfit$rss,
+ x=x,
+ y=y)),
+ vclass=slot(family, "vfamily"))
+}
+
diff --git a/R/vlm.R b/R/vlm.R
new file mode 100644
index 0000000..c722d51
--- /dev/null
+++ b/R/vlm.R
@@ -0,0 +1,190 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+vlm <- function(formula,
+ data=list(),
+ weights=NULL, subset, na.action,
+ prior.weights=NULL,
+ control=vlm.control(...),
+ method="qr",
+ model=FALSE, x.arg=FALSE, y.arg=TRUE, qr.arg=TRUE,
+ contrasts=NULL,
+ constraints=NULL,
+ extra=NULL, offset=NULL,
+ smart=TRUE, ...)
+{
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "vlm"
+
+ ocall <- match.call()
+
+ if(smart)
+ setup.smart("write")
+
+ mt <- terms(formula, data = data) # attr(m, "terms")
+ if (missing(data))
+ data <- sys.frame(sys.parent())
+
+ mf <- match.call(expand=FALSE)
+ mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
+ mf$contrasts <- mf$constraints <- mf$extra <-
+ mf$qr.arg <- mf$smart <- mf$... <- NULL
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ if(method == "model.frame")
+ return(mf)
+ if(method != "qr")
+ stop("only method=\"qr\" is implemented")
+
+ na.act <- attr(mf, "na.action")
+
+ xvars <- as.character(attr(mt, "variables"))[-1]
+ if ((yvar <- attr(mt, "response")) > 0)
+ xvars <- xvars[-yvar]
+ xlev <- if (length(xvars) > 0) {
+ xlev <- lapply(mf[xvars], levels)
+ xlev[!sapply(xlev, is.null)]
+ }
+
+ y <- model.response(mf, "numeric") # model.extract(mf, "response")
+ x <- model.matrix(mt, mf, contrasts)
+ attr(x, "assign") <- attrassigndefault(x, mt) # So as to make it like Splus
+ offset <- model.offset(mf)
+ if(is.null(offset))
+ offset <- 0 # yyy ???
+ if(length(offset) && any(offset!=0))
+ stop("offsets are redundant for (vector) linear models")
+ wz <- model.weights(mf)
+
+ y = as.matrix(y)
+ M <- ncol(as.matrix(y))
+ n <- nrow(x)
+ dy <- dimnames(y)
+ dy1 <- if(length(dy[[1]])) dy[[1]] else dimnames(mf)[[1]]
+ dy2 <- if(length(dy[[2]])) dy[[2]] else paste("Y", 1:M, sep="")
+ dimnames(y) <- list(dy1, dy2)
+ predictors.names = dy2
+
+ if(!length(prior.weights)) {
+ prior.weights = rep(1, len=n)
+ names(prior.weights) = dy1
+ }
+ if(any(prior.weights <= 0))
+ stop("only positive weights allowed")
+ if(!length(wz)) {
+ wz <- matrix(prior.weights, n, M)
+ identity.wts <- TRUE
+ } else {
+ identity.wts <- FALSE
+ temp = ncol(as.matrix(wz))
+ if(temp < M || temp > M*(M+1)/2)
+ stop(paste("input w must have at between", M, "and",
+ M*(M+1)/2, "columns"))
+ wz <- prior.weights * wz
+ }
+
+ control = control
+ Blist <- process.constraints(constraints, x, M)
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+
+ fit <- vlm.wfit(x=x, z=y, Blist=Blist, wz=wz, U=NULL,
+ matrix.out=FALSE, XBIG=FALSE, rss=TRUE, qr=qr.arg,
+ x.ret=TRUE, offset = offset)
+
+ p.big <- fit$rank
+ fit$R <- fit$qr$qr[1:p.big, 1:p.big, drop=FALSE]
+ fit$R[lower.tri(fit$R)] <- 0
+
+
+
+
+ fit$constraints <- Blist
+
+ dn.big <- labels(fit$xbig)
+ xn.big <- dn.big[[2]]
+ dn <- labels(x)
+ xn <- dn[[2]]
+ dxbig <- as.integer(dim(fit$xbig))
+ n.big <- dxbig[[1]]
+ p.big <- dxbig[[2]]
+
+ misc <- list(
+ colnames.x = xn,
+ colnames.xbig = xn.big,
+ function.name = function.name,
+ intercept.only=intercept.only,
+ predictors.names = predictors.names,
+ M = M,
+ n = nrow(x),
+ n.big = n.big,
+ orig.assign = attr(x, "assign"),
+ p = ncol(x),
+ p.big = p.big,
+ ynames = dimnames(y)[[2]])
+
+ fit$misc <- misc
+
+ fit$misc$dataname <- dataname
+
+
+
+ if(smart) {
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
+ }
+
+ answer <-
+ new("vlm",
+ "assign" = attr(x, "assign"),
+ "call" = ocall,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "control" = control,
+ "criterion" = list(deviance=fit$rss),
+ "dispersion" = 1,
+ "df.residual" = fit$df.residual,
+ "df.total" = n*M,
+ "effects" = fit$effects,
+ "fitted.values"= as.matrix(fit$fitted.values),
+ "misc" = fit$misc,
+ "model" = if(model) mf else data.frame(),
+ "R" = fit$R,
+ "rank" = fit$rank,
+ "residuals" = as.matrix(fit$residuals),
+ "rss" = fit$rss,
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=mt))
+
+ if(!smart) answer at smart.prediction <- list(smart.arg=FALSE)
+
+ slot(answer, "prior.weights") = prior.weights
+
+ if(length(attr(x, "contrasts")))
+ slot(answer, "contrasts") = attr(x, "contrasts")
+ slot(answer, "na.action") = if(length(na.act)) list(na.act) else list()
+ if(length(offset))
+ slot(answer, "offset") = as.matrix(offset)
+ if(qr.arg) {
+ class(fit$qr) = "list"
+ slot(answer, "qr") = fit$qr
+ }
+ if(x.arg)
+ slot(answer, "x") = x # The 'small' design matrix
+ if(control$save.weight)
+ slot(answer, "weights") = wz
+ if(length(xlev))
+ slot(answer, "xlevels") = xlev
+ if(y.arg)
+ slot(answer, "y") = as.matrix(y)
+
+ answer
+}
+attr(vlm, "smart") <- TRUE
+
+
+
+
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
new file mode 100644
index 0000000..4ef0109
--- /dev/null
+++ b/R/vlm.wfit.q
@@ -0,0 +1,150 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+
+
+
+vlm.wfit <- function(x, z, Blist, wz=NULL, U=NULL,
+ matrix.out=FALSE, XBIG=FALSE, rss=TRUE, qr=FALSE, x.ret=FALSE,
+ offset=NULL,
+ omit.these=NULL, only.rss=FALSE,
+ ncolx=if(matrix.out && XBIG) stop("need this argument") else ncol(x),
+ xij=NULL, Aarray=NULL, Aindex=NULL, lp.names=NULL,
+ Eta.range=NULL, ...)
+{
+
+ missing.Blist <- missing(Blist)
+ z = as.matrix(z)
+ n <- nrow(z)
+ M <- ncol(z)
+ if(!only.rss) {
+ contrast.save <- attr(x, "contrasts")
+ znames <- dimnames(z)[[2]]
+ }
+
+ if(length(offset))
+ z <- z - offset
+ if(missing(U) || !length(U)) {
+ U <- vchol(wz, M=M, n=n, silent=FALSE)
+ }
+ dU <- dim(U)
+ if(dU[2] != n)
+ stop("input unconformable")
+
+ xbig.save <- if(XBIG) {
+ x
+ } else {
+ if(missing.Blist || !length(Blist))
+ Blist = replace.constraints(vector("list", ncol(x)),
+ diag(M), 1:ncol(x)) # NULL
+ lm2vlm.model.matrix(x=x, Blist=Blist, M=M, assign.attributes=FALSE,
+ xij = xij, Aarray=Aarray, Aindex=Aindex)
+ }
+ xbig <- mux111(U, xbig.save, M=M)
+ z.big <- mux22(U, z, M=M, upper=TRUE, as.mat=FALSE)
+
+
+ if(length(omit.these)) {
+ xbig = xbig[!omit.these,,drop=FALSE]
+ z.big = z.big[!omit.these]
+ }
+
+ ans <- if(!is.R()) lm.fit.qr(x=xbig, y=z.big, qr=qr, ...) else
+ lm.fit(xbig, z.big, ...) # zz; also qr=qr,
+
+ if(rss) {
+ ans$rss <- sum(ans$resid^2)
+ if(only.rss) return(list(rss=ans$rss))
+ }
+
+
+ if(length(omit.these) && any(omit.these))
+ stop("code beyond here can't handle omitted observations")
+
+
+ fv <- ans$fitted.values
+ dim(fv) <- c(M, n)
+ fv <- vbacksub(U, fv, M=M, n=n) # Have to premultiply fv by U
+
+
+ if(length(Eta.range)) {
+ if(length(Eta.range) != 2)
+ stop("length(Eta.range) must equal 2")
+ fv = ifelse(fv < Eta.range[1], Eta.range[1], fv)
+ fv = ifelse(fv > Eta.range[2], Eta.range[2], fv)
+ }
+
+ ans$fitted.values <- if(M==1) c(fv) else fv
+ if(M > 1)
+ dimnames(ans$fitted.values) <- list(dimnames(z)[[1]], znames)
+ ans$residuals <- if(M==1) c(z-fv) else z-fv
+ if(M > 1)
+ dimnames(ans$residuals) <- list(dimnames(ans$residuals)[[1]], znames)
+ ans$misc <- list(M=M, n=n)
+ ans$call <- match.call()
+
+ ans$constraints <- Blist
+ ans$contrasts <- contrast.save
+ if(x.ret)
+ ans$xbig <- xbig.save
+
+ if(!is.null(offset))
+ ans$fitted.values <- ans$fitted.values + offset
+
+
+
+
+ if(!matrix.out)
+ return(ans)
+
+
+ dx2 = if(XBIG) NULL else dimnames(x)[[2]]
+ B <- matrix(as.numeric(NA), nrow=M, ncol=ncolx, dimnames=list(lp.names, dx2))
+ if(is.null(Blist)) {
+ Blist = replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
+ }
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ temp <- c(0, cumsum(ncolBlist))
+ for(i in 1:ncolx) {
+ index <- (temp[i]+1):temp[i+1]
+ cm <- Blist[[i]]
+ B[,i] <- cm %*% ans$coef[index]
+ }
+ ans$mat.coefficients <- t(B) # 20/9/99; not $coefficients
+ ans
+}
+
+
+print.vlm.wfit <- function(x, ...)
+{
+ if(!is.null(cl <- x$call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x$coefficients
+ cat("\nCoefficients:\n")
+ print(coef, ...)
+
+ rank <- x$rank
+ if(is.null(rank))
+ rank <- sum(!is.na(coef))
+ n <- x$misc$n
+ M <- x$misc$M
+ rdf <- x$df.resid
+ if(is.null(rdf))
+ rdf <- (n - rank) * M
+ cat("\nDegrees of Freedom:", n*M, "Total;", rdf, "Residual\n")
+
+ if(!is.null(x$rss))
+ cat("Residual Sum of Squares:", format(x$rss), "\n")
+
+ invisible(x)
+}
+
+
+
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
new file mode 100644
index 0000000..70cd95a
--- /dev/null
+++ b/R/vsmooth.spline.q
@@ -0,0 +1,564 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+
+
+if(!exists("is.R")) is.R <- function()
+ exists("version") && !is.null(version$language) && version$language=="R"
+
+setClass("vsmooth.spline.fit", representation(
+ "Bcoefficients"= "matrix",
+ "knots" = "numeric",
+ "xmin" = "numeric",
+ "xmax" = "numeric"))
+
+setClass("vsmooth.spline", representation(
+ "call" = "call",
+ "constraints" = "list",
+ "df" = "numeric",
+ "nlfit" = "vsmooth.spline.fit", # is the nonlinear component
+ "lev" = "matrix",
+ "lfit" = "vlm", # 6/6/02: was "vlm.wfit"; is the linear component
+ "spar" = "numeric",
+ "var" = "matrix",
+ "w" = "matrix",
+ "x" = "numeric",
+ "y" = "matrix",
+ "yin" = "matrix"))
+
+
+setMethod("coefficients", signature(object="vsmooth.spline"),
+ function(object, ...)
+ coefvsmooth.spline(object, ...))
+setMethod("coef", signature(object="vsmooth.spline"),
+ function(object, ...)
+ coefvsmooth.spline(object, ...))
+
+setMethod("coefficients", signature(object="vsmooth.spline.fit"),
+ function(object, ...)
+ coefvsmooth.spline.fit(object, ...))
+setMethod("coef", signature(object="vsmooth.spline.fit"),
+ function(object, ...)
+ coefvsmooth.spline.fit(object, ...))
+
+setMethod("fitted.values", signature(object="vsmooth.spline"),
+ function(object, ...)
+ fittedvsmooth.spline(object, ...))
+setMethod("fitted", signature(object="vsmooth.spline"),
+ function(object, ...)
+ fittedvsmooth.spline(object, ...))
+
+setMethod("residuals", signature(object="vsmooth.spline"),
+ function(object, ...)
+ residvsmooth.spline(object, ...))
+setMethod("resid", signature(object="vsmooth.spline"),
+ function(object, ...)
+ residvsmooth.spline(object, ...))
+
+setMethod("predict", signature(object="vsmooth.spline"),
+ function(object, ...)
+ predictvsmooth.spline(object, ...))
+setMethod("print", "vsmooth.spline",
+ function(x, ...)
+ invisible(printvsmooth.spline(x, ...)))
+setMethod("show", "vsmooth.spline",
+ function(object)
+ printvsmooth.spline(object))
+setMethod("plot", "vsmooth.spline",
+ function(x, y, ...) {
+ if(!missing(y)) stop("can't process the \"y\" argument")
+ invisible(plotvsmooth.spline(x, ...))})
+setMethod("predict", "vsmooth.spline.fit",
+ function(object, ...)
+ predictvsmooth.spline.fit(object, ...))
+
+
+vsmooth.spline <- function(x, y, w, df=rep(5,M), spar=NULL, # rep(0,M),
+ all.knots=FALSE,
+ iconstraint=diag(M),
+ xconstraint=diag(M),
+ constraints=list("(Intercepts)"=diag(M), x=diag(M)),
+ tol.nl=0.01, var.arg=FALSE,
+ scale.w=TRUE,
+ nk=NULL)
+{
+
+
+ if(var.arg) {
+ warning("@var will be returned, but no use will be made of it")
+ }
+
+
+ missing.constraints <- missing(constraints)
+
+ if(!(missing.spar <- missing(spar)) && !missing(df))
+ stop("can't specify both spar and df")
+
+
+ my.call <- match.call()
+ if(missing(y)) {
+ if(is.list(x)) {
+ if(any(is.na(match(c("x", "y"), names(x)))))
+ stop("cannot find x and y in list")
+ y <- x$y
+ x <- x$x
+ } else if(is.complex(x)) {
+ y <- Im(x)
+ x <- Re(x)
+ } else if(is.matrix(x)) {
+ y <- x[,-1]
+ x <- x[,1]
+ } else {
+ y <- x
+ x <- time(x)
+ }
+ }
+
+ n <- length(x)
+ y <- as.matrix(y)
+ ny2 <- dimnames(y)[[2]] # NULL if vector
+ M <- ncol(y)
+ if(n != nrow(y))
+ stop("lengths of x and y must match")
+
+ if(any(is.na(x)) || any(is.na(y)))
+ stop("NAs not allowed in x or y")
+
+ if(missing(w)) {
+ w <- matrix(1, n, M)
+ } else {
+ if(any(is.na(w)))
+ stop("NAs not allowed in w")
+
+ w <- as.matrix(w)
+
+ if(nrow(y) != nrow(w) || ncol(w)>M*(M+1)/2)
+ stop("w and y don't match")
+
+ if(scale.w)
+ w <- w / mean(w[,1:M]) # 'Average' value is 1
+ }
+ dimw <- ncol(w)
+
+ if(missing.constraints)
+ constraints <- list("(Intercepts)"=eval(iconstraint),
+ x=eval(xconstraint))
+ constraints <- eval(constraints)
+ if(is.matrix(constraints))
+ constraints <- list("(Intercepts)"=constraints, x=constraints)
+ if(!is.list(constraints) || length(constraints)!=2)
+ stop("constraints must equal a list (of length 2) or a matrix")
+ for(i in 1:2)
+ if(!is.numeric(constraints[[i]]) || !is.matrix(constraints[[i]]) ||
+ nrow(constraints[[i]])!=M || ncol(constraints[[i]])>M)
+ stop("something wrong with the constraints")
+ names(constraints) <- c("(Intercepts)", "x")
+
+
+ sx <- unique(sort(as.vector(x)))
+ o <- match(x, sx) # sx[o]==x
+ nef <- length(sx)
+ if(nef < 7)
+ stop("not enough unique x values (need 7 or more)")
+
+
+ index <- iam(NA, NA, M, both=TRUE, diagonal=TRUE)
+ template1 <- template2 <- matrix(0, nef, M) # Must have M columns
+ ncb <- M
+ dimu <- dimw # 10/1/00; was M*(M+1)/2
+
+ collaps <- dotFortran(name="vsuff9",
+ as.integer(n), as.integer(nef), as.integer(o),
+ as.double(x), as.double(y), as.double(w),
+ xbar=double(nef), ybar=as.double(template1), wbar=double(nef*dimu),
+ uwbar=as.double(0), wz=as.double(template2),
+ as.integer(M), dimw=as.integer(dimw), dimu=as.integer(dimu),
+ as.integer(index$row), as.integer(index$col),
+ double(M*(M+1)), double(ncb*(ncb+1)),
+ as.double(diag(M)), as.integer(M),
+ triv=as.integer(1), wuwbar=as.integer(0), ok=as.integer(0))
+
+
+ if(collaps$ok != 1)
+ stop("some non-positive-definite weight matrices detected in \"vsuff9\"")
+ dim(collaps$ybar) <- dim(collaps$wz) <- c(nef, M)
+
+
+ if(FALSE) {
+ } else {
+ yin = collaps$ybar # Includes both linear and nonlinear parts
+ junk.frame = data.frame(x=collaps$xbar, yin = yin)
+ x = collaps$xbar # Warning: From now on "x" is no longer the original x
+
+ lfit = vlm(yin ~ 1 + x,
+ constraints = constraints,
+ save.weight=FALSE, qr=FALSE, x=FALSE, y=FALSE,
+ smart = FALSE,
+ weight=matrix(collaps$wbar, nrow=nrow(yin), byrow=FALSE))
+ }
+
+ ncb <- ncol(constraints[[2]]) # Of x and not of the intercept
+ spar <- if(length(spar)) rep(spar, length=ncb) else rep(0, length=ncb)
+ df <- rep(df, length=ncb)
+
+ if(!missing.spar) {
+ ispar <- 1
+ if(any(spar <= 0) || !is.numeric(spar))
+ stop("not allowed non-positive or non-numeric smoothing parameters")
+ nonlin <- if(is.R()) (spar != Inf) else (!is.inf(spar))
+ } else {
+ ispar <- 0
+ if(!is.numeric(df) || any(df < 2 | df > nef))
+ stop(paste("you must supply 2 <= df <=", nef))
+ if(tol.nl <= 0) stop("bad value for tol.nl")
+ nonlin <- abs(df-2) > tol.nl
+ }
+
+
+ if(all(!nonlin)) {
+
+ junk.fill = new("vsmooth.spline.fit",
+ "Bcoefficients"= matrix(as.numeric(NA), 1, 1),
+ "knots" = numeric(0),
+ "xmin" = numeric(0),
+ "xmax" = numeric(0)) # 8/11/03
+ object =
+ new("vsmooth.spline",
+ "call" = my.call,
+ "constraints" = constraints,
+ "df" = if(ispar==0) df else rep(2, length(spar)),
+ "lfit" = lfit,
+ "nlfit" = junk.fill,
+ "spar" = if(ispar==1) spar else rep(Inf, length(df)),
+ "w" = as.matrix(collaps$wbar),
+ "x" = sx,
+ "y" = lfit at fitted.values,
+ "yin" = yin)
+
+
+ return(object)
+ }
+
+
+
+ xbar <- (sx - sx[1]) / (sx[nef] - sx[1])
+ noround = TRUE # Improvement 3/8/02
+ if(all.knots) {
+ if(noround) {
+ knot = valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3)))
+ } else {
+ knot <- c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3))
+ }
+ if(length(nk)) warning("overriding nk by all.knots=TRUE")
+ nk <- length(knot) - 4 # No longer nef + 2
+ } else {
+ chosen = length(nk)
+ if(chosen && (nk > nef+2 || nk <= 5))
+ stop("bad value for nk")
+ if(!chosen) nk = 0
+ knot.list <- dotFortran(name="vknotl2", as.double(xbar), as.integer(nef),
+ knot=double(nef+6), k=as.integer(nk+4),
+ chosen=as.integer(chosen))
+ if(noround) {
+ knot = valid.vknotl2(knot.list$knot[1:(knot.list$k)])
+ knot.list$k = length(knot)
+ } else {
+ knot <- knot.list$knot[1:(knot.list$k)]
+ }
+ nk <- knot.list$k - 4
+ }
+ if(nk <= 5) stop("not enough distinct knots found")
+
+ conmat <- (constraints[[2]])[,nonlin,drop=FALSE]
+ ncb <- sum(nonlin)
+ trivc <- trivial.constraints(conmat)
+ resmat <- collaps$ybar - lfit at fitted.values # nef by M
+ spar.nl <- spar[nonlin]
+ df.nl <- df[nonlin]
+
+ edimu <- if(trivc) dimw else max(ncb*(ncb+1)/2, dimw) # for wbar's size
+ dimu <- if(trivc) dimw else ncb*(ncb+1)/2
+ o <- 1:nef # Already sorted
+
+ collaps <- dotFortran(name="vsuff9",
+ as.integer(nef), as.integer(nef), as.integer(o),
+ as.double(collaps$xbar), as.double(resmat), as.double(collaps$wbar),
+ xbar=double(nef), ybar=as.double(template1),
+ wbar=double(nef*edimu), uwbar=as.double(0), wz=as.double(template2),
+ M=as.integer(M), dimw=as.integer(dimw), dimu=as.integer(dimu),
+ as.integer(index$row), as.integer(index$col),
+ double(M*(M+1)), double(ncb*(ncb+1)),
+ as.double(conmat), as.integer(ncb),
+ as.integer(trivc), wuwbar=as.integer(0), ok=as.integer(0))
+ if(collaps$ok != 1)
+ stop("some non-positive-definite weight matrices detected in \"vsuff9\"")
+
+ dim(collaps$ybar) <- dim(collaps$wz) <- c(nef, M)
+ collaps$ybar = collaps$ybar[,1:ncb,drop=FALSE]
+ collaps$wz = collaps$wz[,1:ncb,drop=FALSE]
+ dim(collaps$wbar) <- c(nef, edimu)
+
+
+ ldk = 3 * ncb + 1 # 10/7/02; Previously 4 * ncb
+ lev <- if(ncb > 1) matrix(0, nef, ncb) else rep(0, nef)
+ varmat <- if(var.arg) {if(ncb > 1) matrix(0, nef, ncb) else
+ rep(0, nef)} else double(1)
+ index <- iam(NA, NA, ncb, both=TRUE, diagonal=TRUE)
+ dimwbar <- if(trivc) dimw else ncb*(ncb+1)/2
+
+ vsplin <- dotFortran(name="vsplin",
+ xs=as.double(xbar), wz=as.double(collaps$wz),
+ w=as.double(collaps$wbar), n=as.integer(nef),
+ xknot=as.double(knot),
+ nk=as.integer(nk), as.integer(ldk),
+ M=as.integer(ncb), dimw=as.integer(dimwbar),
+ as.integer(index$row), as.integer(index$col),
+ wkmm=double(ncb*ncb*16), spar.nl=as.double(spar.nl),
+ info=integer(1), fv=double(nef*ncb), Bcoef=double(nk*ncb),
+ hs=double(ldk*nk*ncb), btwy=double(ncb*nk),
+ sgdub=double(nk * max(4,ncb)),
+ var=as.double(varmat), ifvar=as.integer(var.arg),
+ bmb=double(ncb*ncb),
+ lev=as.double(lev),
+ as.double(df.nl),
+ scrtch=double(min((17+nk)*nk, nk*17+1)),
+ ier=as.integer(0),
+ truen=as.integer(nef))
+
+
+ if(vsplin$ier != 0) {
+ cat("vsplin$ier ==", vsplin$ier, "\n")
+ stop("something gone wrong in \"vsplin\"")
+ }
+ if(vsplin$info != 0)
+ stop(paste("leading minor of order", vsplin$info,
+ "is not positive definite"))
+
+ dim(vsplin$lev) <- c(nef, ncb) # A matrix even when ncb==1
+ if(ncb > 1) {
+ dim(vsplin$fv) <- c(nef, ncb)
+ if(var.arg)
+ dim(vsplin$var) <- c(nef, ncb)
+ }
+
+ df.nl <- apply(vsplin$lev, 2, sum) # Actual EDF used
+
+
+ fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
+ if(M > 1)
+ dimnames(fv) <- list(NULL, ny2)
+
+ df[!nonlin] = 2
+ df[ nonlin] = df.nl
+ if(ispar==0) {
+ spar[!nonlin] = Inf
+ spar[ nonlin] = vsplin$spar.nl # Actually used
+ }
+
+ fit.object = new("vsmooth.spline.fit",
+ "Bcoefficients" = matrix(vsplin$Bcoef, nrow=nk, ncol=ncb),
+ "knots" = knot,
+ "xmax" = sx[nef],
+ "xmin" = sx[1])
+
+ object =
+ new("vsmooth.spline",
+ "call" = my.call,
+ "constraints" = constraints,
+ "df" = df,
+ "nlfit" = fit.object,
+ "lev" = vsplin$lev,
+ "lfit" = lfit,
+ "spar" = spar, # if(ispar==1) spar else vsplin$spar,
+ "w" = collaps$wbar,
+ "x" = sx,
+ "y" = fv,
+ "yin" = yin)
+
+ if(var.arg)
+ object at var = vsplin$var
+
+ object
+}
+
+
+printvsmooth.spline <- function(x, ...)
+{
+ if(!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ ncb <- if(length(x at nlfit)) ncol(x at nlfit@Bcoefficients) else NULL
+ cat("\nSmoothing Parameter (Spar):",
+ if(length(ncb) && ncb==1) format(x at spar) else
+ paste(format(x at spar), collapse=", "), "\n")
+
+ cat("\nEquivalent Degrees of Freedom (Df):",
+ if(length(ncb) && ncb==1) format(x at df) else
+ paste(format(x at df), collapse=", "), "\n")
+
+ if(!all(trivial.constraints(x at constraints))) {
+ cat("\nConstraint matrices:\n")
+ print(x at constraints)
+ }
+
+ invisible(x)
+}
+
+
+coefvsmooth.spline = function(object, matrix=FALSE, ...) {
+ list(lfit=coef(object at lfit, matrix=matrix), nlfit=coef(object at nlfit))
+}
+
+
+coefvsmooth.spline.fit = function(object, ...) {
+ object at Bcoefficients
+}
+
+
+fittedvsmooth.spline = function(object, ...) {
+ object at y
+}
+
+residvsmooth.spline = function(object, ...) {
+ as.matrix(object at yin - object at y)
+}
+
+
+
+plotvsmooth.spline <- function(x, xlab="x", ylab="", points=TRUE,
+ pcol=par()$col, pcex=par()$cex,
+ pch=par()$pch,
+ lcol=par()$col, lwd=par()$lwd, lty=par()$lty,
+ add=FALSE, ...)
+{
+ M = ncol(x at y)
+ pcol = rep(pcol, length=M)
+ pcex = rep(pcex, length=M)
+ pch = rep(pch, length=M)
+ lcol = rep(lcol, length=M)
+ lwd = rep(lwd, length=M)
+ lty = rep(lty, length=M)
+ if(!add)
+ matplot(x at x, x at yin, type="n", xlab=xlab, ylab=ylab, ...)
+ for(i in 1:ncol(x at y)) {
+ if(points)
+ points(x at x, x at yin[,i], col=pcol[i], pch=pch[i], cex=pcex[i])
+ lines(x at x, x at y[,i], col=lcol[i], lwd=lwd[i], lty=lty[i])
+ }
+ invisible(x)
+}
+
+
+
+predictvsmooth.spline <- function(object, x, deriv=0, se.fit=FALSE)
+{
+ if(se.fit)
+ warning("se.fit=TRUE is not currently implemented. Using se.fit=FALSE")
+
+ lfit <- object at lfit # Linear part of the vector spline
+ nlfit <- object at nlfit # Nonlinear part of the vector spline
+
+ if(missing(x)) {
+ if(deriv==0) {
+ return(list(x=object at x, y=object at y))
+ } else {
+ x <- object at x
+ return(Recall(object, x, deriv))
+ }
+
+ }
+
+ mat.coef = coef(lfit, matrix=TRUE)
+ coeflfit <- t(mat.coef) # M x p now
+ M <- nrow(coeflfit) # if(is.matrix(object at y)) ncol(object at y) else 1
+
+ pred <- if(deriv==0) predict(lfit, data.frame(x=x)) else
+ if(deriv==1) matrix(coeflfit[,2], length(x), M, byrow=TRUE) else
+ matrix(0, length(x), M)
+ if(!length(nlfit at knots))
+ return(list(x=x, y=pred))
+
+ nonlin <- if(is.R()) (object at spar != Inf) else (!is.inf(object at spar))
+
+ conmat <- if(!length(lfit at constraints)) diag(M) else lfit at constraints[[2]]
+ conmat <- conmat[,nonlin,drop=FALSE] # Of nonlinear functions
+
+ list(x=x, y=pred + predict(nlfit, x, deriv)$y %*% t(conmat))
+}
+
+
+predictvsmooth.spline.fit <- function(object, x, deriv=0)
+{
+ nk = nrow(object at Bcoefficients)
+ drangex <- object at xmax - object at xmin
+ if(missing(x))
+ x <- seq(from=object at xmin, to=object at xmax, length=nk-4)
+
+ xs <- as.double((x - object at xmin)/drangex)
+
+ bad.left <- xs < 0
+ bad.right <- xs > 1
+ good <- !(bad.left | bad.right)
+
+ ncb <- ncol(object at Bcoefficients)
+ y <- matrix(as.numeric(NA), length(xs), ncb)
+ if(any(good)) {
+ ngood <- sum(good)
+ junk <- dotFortran(name="vbvs", as.integer(ngood),
+ as.double(object at knots), as.double(object at Bcoefficients),
+ as.integer(nk),
+ as.double(xs[good]), s=double(ngood*ncb),
+ as.integer(deriv), as.integer(ncb))
+ y[good,] <- junk$s
+
+ if(TRUE && deriv > 1) {
+ edges <- xs <= 0 | xs >= 1 # Zero the edges & beyond explicitly
+ y[edges,] <- 0
+ }
+ }
+ if(any(!good)) {
+ xrange <- c(object at xmin, object at xmax)
+ if(deriv == 0) {
+ end.object <- Recall(object, xrange)$y
+ end.slopes <- Recall(object, xrange, 1)$y * drangex
+
+ if(any(bad.left))
+ y[bad.left,] = rep(end.object[1,], rep(sum(bad.left), ncb)) +
+ rep(end.slopes[1,], rep(sum(bad.left), ncb)) *
+ xs[bad.left]
+ if(any(bad.right))
+ y[bad.right,] = rep(end.object[2,], rep(sum(bad.right), ncb)) +
+ rep(end.slopes[2,], rep(sum(bad.right), ncb)) *
+ (xs[bad.right] - 1)
+ } else if(deriv == 1) {
+ end.slopes <- Recall(object, xrange, 1)$y * drangex
+ y[bad.left,] <- rep(end.slopes[1,], rep(sum(bad.left), ncb))
+ y[bad.right,] <- rep(end.slopes[2,], rep(sum(bad.right), ncb))
+ } else
+ y[!good,] <- 0
+ }
+ if(deriv > 0)
+ y <- y / (drangex^deriv)
+ list(x=x, y=y)
+}
+
+
+valid.vknotl2 = function(knot, tol=1/1000) {
+
+ junk = dotFortran(name="pknotl2", knot=as.double(knot), as.integer(length(knot)),
+ keep=integer(length(knot)), as.double(tol))
+ keep = as.logical(junk$keep)
+ knot = junk$knot[keep]
+ if(length(knot) <= 11)
+ stop("too few (distinct) knots")
+ knot
+}
+
+
+
+
+
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100644
index 0000000..616e191
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,12 @@
+# These functions are
+# Copyright (C) 1998-2006 T.W. Yee, University of Auckland. All rights reserved.
+
+
+
+.First.lib <- function(lib, pkg) {
+ library.dynam("VGAM", pkg, lib)
+}
+
+
+
+
diff --git a/data/auuc.R b/data/auuc.R
new file mode 100644
index 0000000..e3d9beb
--- /dev/null
+++ b/data/auuc.R
@@ -0,0 +1,6 @@
+"auuc" <-
+structure(list(Commerce = c(446, 937, 311, 49), Arts = c(895,
+1834, 805, 157), SciEng = c(496, 994, 430, 62), Law = c(170,
+246, 95, 15), Medicine = c(184, 198, 48, 9)), .Names = c("Commerce",
+"Arts", "SciEng", "Law", "Medicine"), row.names = c("SES1", "SES2",
+"SES3", "SES4"), class = "data.frame")
diff --git a/data/bminz.txt b/data/bminz.txt
new file mode 100644
index 0000000..02393e7
--- /dev/null
+++ b/data/bminz.txt
@@ -0,0 +1,701 @@
+ age BMI
+ 31.52966 22.77107
+ 39.38045 27.70033
+ 43.38940 28.18127
+ 34.84894 25.08380
+ 53.81990 26.46388
+ 39.17002 36.19648
+ 33.51872 24.66313
+ 49.03421 28.72118
+ 36.25354 25.48609
+ 40.30528 26.71721
+ 36.73073 21.28543
+ 28.42404 24.92885
+ 21.30212 27.96330
+ 38.91421 29.62630
+ 48.49673 26.33872
+ 58.94552 24.57081
+ 36.48628 27.89536
+ 59.80134 30.14546
+ 64.40486 25.12497
+ 69.17766 29.50876
+ 30.93609 28.29902
+ 55.28165 34.34088
+ 35.41093 29.03532
+ 27.27993 24.20983
+ 64.87958 30.88243
+ 61.69701 24.83798
+ 75.59529 24.47465
+ 36.58378 26.68803
+ 38.07413 33.86913
+ 27.99299 20.92162
+ 51.39876 31.86141
+ 51.96557 28.92970
+ 42.38411 16.62793
+ 60.49754 25.86957
+ 31.72362 24.72202
+ 30.27677 25.96939
+ 40.04436 18.04338
+ 33.18791 27.45061
+ 25.82706 25.45475
+ 39.73441 22.80517
+ 71.00441 25.52747
+ 46.93339 26.08018
+ 33.38008 25.28969
+ 61.38006 30.69433
+ 30.88379 39.06963
+ 26.57036 21.73781
+ 85.05665 20.97085
+ 22.01522 27.17437
+ 21.07371 22.70368
+ 46.28684 23.09759
+ 52.38596 16.52431
+ 39.86237 30.39876
+ 18.31478 22.30990
+ 44.58452 27.62018
+ 37.50505 21.84954
+ 33.55295 30.01736
+ 48.58470 27.00412
+ 35.02938 33.94024
+ 56.74282 26.12108
+ 27.01597 35.78276
+ 42.12292 21.45356
+ 53.47645 29.93081
+ 73.57362 28.80390
+ 33.07897 29.89490
+ 27.71434 23.48019
+ 26.56032 19.46313
+ 56.88176 31.75842
+ 40.04743 29.23342
+ 43.46757 27.66600
+ 48.40347 45.84425
+ 51.94451 34.52123
+ 22.81941 25.12492
+ 47.31553 29.41774
+ 56.90261 32.57713
+ 55.64545 21.59294
+ 32.71280 29.03342
+ 39.61485 31.07253
+ 34.29674 31.27179
+ 75.03272 23.26198
+ 42.52394 26.06924
+ 63.50737 26.19967
+ 41.45791 26.15889
+ 26.48068 23.28205
+ 29.66578 23.76932
+ 55.39052 21.38899
+ 33.93638 21.50787
+ 41.16556 29.31633
+ 55.28719 26.41008
+ 48.96903 21.81375
+ 39.43987 25.20258
+ 54.69976 31.80343
+ 61.81926 23.78463
+ 29.83802 31.30212
+ 36.10928 27.78490
+ 38.88342 26.29163
+ 21.69252 24.09296
+ 40.65945 24.49848
+ 45.41919 24.61197
+ 51.04320 23.25319
+ 51.76804 31.14619
+ 50.07597 26.15693
+ 31.72125 23.02196
+ 42.54576 24.56047
+ 33.84730 24.49910
+ 43.16923 30.89444
+ 32.48236 30.59186
+ 27.07331 30.24490
+ 24.40670 23.41133
+ 38.33903 27.60308
+ 40.66055 23.13918
+ 33.89193 18.73414
+ 28.86946 17.23473
+ 54.29757 29.46142
+ 49.89804 24.83382
+ 24.50766 29.16544
+ 29.16440 34.56097
+ 60.04799 28.16589
+ 30.89256 19.40223
+ 63.13626 21.74822
+ 54.85329 25.26185
+ 78.96306 20.95348
+ 78.01710 21.24361
+ 50.47371 30.81535
+ 34.08666 24.19961
+ 50.69744 56.39979
+ 25.29732 28.08855
+ 64.83965 25.95777
+ 21.06325 25.73138
+ 49.60311 29.66317
+ 61.46347 35.88379
+ 32.10376 25.98025
+ 30.43664 23.82905
+ 33.89039 22.33076
+ 27.23587 24.80311
+ 65.21659 34.55600
+ 24.94063 26.89007
+ 57.41561 22.96084
+ 43.18235 23.51734
+ 23.45612 26.43305
+ 67.44293 27.23271
+ 75.21263 29.53429
+ 36.35890 23.16634
+ 41.13786 24.64723
+ 46.11416 24.32160
+ 78.32855 22.34879
+ 56.22332 22.20308
+ 40.92859 27.41183
+ 74.37957 24.87848
+ 82.94280 26.32849
+ 38.23325 28.22564
+ 28.50112 21.52640
+ 83.50588 22.53243
+ 47.39850 21.92371
+ 63.18745 19.29737
+ 30.83350 21.58011
+ 47.27177 25.67939
+ 76.68825 23.46654
+ 35.72061 23.60277
+ 35.58604 29.63724
+ 49.17131 29.98912
+ 39.02782 25.72651
+ 30.67334 32.80155
+ 45.51781 30.89201
+ 70.39287 32.74232
+ 41.74017 20.21025
+ 33.55806 23.12314
+ 21.55016 26.18381
+ 40.72792 19.64126
+ 47.01347 28.57987
+ 37.42738 20.35258
+ 47.91820 27.95266
+ 38.70494 26.32271
+ 26.58318 38.73875
+ 27.40259 22.45829
+ 32.12200 27.23851
+ 55.61718 32.19144
+ 29.10814 28.72264
+ 43.45972 28.04374
+ 52.66468 29.56594
+ 36.05022 29.32213
+ 53.83558 25.49865
+ 30.80845 27.30202
+ 29.33553 26.27864
+ 45.00041 24.30341
+ 36.77749 32.27165
+ 37.03259 22.24966
+ 44.36336 32.04588
+ 50.15041 30.48021
+ 39.19250 24.14858
+ 23.73404 34.87301
+ 34.33628 22.13615
+ 82.02401 22.55500
+ 25.48160 27.38973
+ 45.76363 26.61516
+ 70.17821 30.39693
+ 43.11517 37.33708
+ 54.43810 33.54397
+ 25.44707 22.96111
+ 49.46259 26.52552
+ 30.88428 23.43050
+ 21.14543 31.28093
+ 56.39559 25.97359
+ 42.53512 30.54994
+ 40.25480 24.17316
+ 35.22250 28.13546
+ 37.70700 19.94863
+ 49.89107 23.43994
+ 43.42424 23.60671
+ 78.25495 21.56012
+ 47.47255 32.80983
+ 44.90691 25.88567
+ 27.52709 22.13972
+ 33.91070 20.40207
+ 43.99000 40.05864
+ 43.31449 32.97531
+ 61.74588 22.26506
+ 33.18188 25.19256
+ 52.27435 25.14232
+ 68.01345 28.86970
+ 34.15872 28.30390
+ 69.57542 34.58239
+ 64.99171 25.72063
+ 41.98288 22.84582
+ 48.89752 26.64060
+ 24.79011 25.06266
+ 42.36029 19.08029
+ 24.81843 34.94792
+ 32.34701 25.34357
+ 45.00538 31.18279
+ 36.49979 25.49393
+ 42.63332 21.16398
+ 33.69105 25.26029
+ 44.72925 32.50419
+ 65.81059 33.69888
+ 42.65160 37.67878
+ 45.18119 34.08004
+ 49.68979 28.13514
+ 56.20241 23.94545
+ 31.18923 28.24754
+ 67.69225 29.79062
+ 23.55535 22.29876
+ 55.67679 24.64444
+ 28.55048 25.49393
+ 24.14786 27.70837
+ 42.47740 27.26170
+ 63.14416 25.29962
+ 56.01077 22.61658
+ 21.91515 31.32883
+ 35.40591 23.29598
+ 56.46014 25.85095
+ 40.55150 27.36237
+ 21.71111 22.54526
+ 29.94185 28.40865
+ 76.55133 19.81970
+ 36.24921 27.05104
+ 55.43160 26.53046
+ 48.75538 26.98030
+ 45.45938 23.49878
+ 41.57688 25.98898
+ 55.17125 22.93940
+ 35.41448 23.14167
+ 36.88115 20.82436
+ 44.45453 36.05414
+ 57.20137 23.08461
+ 52.59177 29.45599
+ 54.08459 25.04709
+ 48.51428 28.72275
+ 22.17651 31.58727
+ 43.56249 29.60719
+ 28.56742 23.47788
+ 45.39733 30.82735
+ 40.11140 22.49939
+ 46.44631 28.96823
+ 36.06642 29.92212
+ 30.29033 25.71443
+ 35.92196 29.62183
+ 53.06952 25.07546
+ 65.67793 26.47240
+ 43.91989 32.63428
+ 63.15732 35.89691
+ 70.78002 18.89236
+ 58.07750 24.11372
+ 28.82403 20.88626
+ 43.95267 30.60240
+ 47.15775 24.78813
+ 72.76805 20.53834
+ 29.88184 23.29847
+ 56.31945 23.18515
+ 63.92690 25.37558
+ 55.45586 24.44225
+ 39.20491 25.10269
+ 46.02750 25.37425
+ 31.20204 24.44160
+ 72.42475 21.27856
+ 73.93593 23.67095
+ 25.69724 27.88811
+ 46.24666 22.03448
+ 19.75632 21.43243
+ 32.90972 31.65328
+ 24.29984 22.55361
+ 21.37466 25.48046
+ 53.08102 27.19554
+ 51.71619 25.99624
+ 82.10258 25.30308
+ 27.55140 24.97988
+ 35.28401 25.80495
+ 28.78350 27.10437
+ 37.79290 25.85233
+ 44.81091 25.55065
+ 28.36307 23.45976
+ 30.81308 26.38828
+ 23.84566 30.51769
+ 58.02867 25.40111
+ 44.22382 27.63402
+ 36.73311 22.78040
+ 34.97739 30.09005
+ 26.56001 23.18376
+ 31.00518 29.06396
+ 39.71215 30.28992
+ 56.62149 30.28237
+ 44.29298 27.86364
+ 62.19078 32.15184
+ 75.69354 35.97990
+ 29.74094 26.00676
+ 33.64277 23.23515
+ 26.15642 20.54699
+ 53.29250 25.79836
+ 45.65686 31.53945
+ 47.02932 36.10523
+ 49.17053 22.61946
+ 41.01243 28.52778
+ 38.08636 22.67711
+ 40.50317 21.48441
+ 81.45242 24.94650
+ 46.04637 27.76951
+ 35.20837 24.80653
+ 37.40834 26.30482
+ 55.68801 28.13275
+ 57.02114 29.16809
+ 60.70621 27.45068
+ 29.02393 26.97608
+ 53.47975 27.72291
+ 23.00122 25.23883
+ 63.57340 24.03611
+ 63.13391 36.37257
+ 64.77770 26.08616
+ 49.63202 19.60818
+ 31.80328 19.49399
+ 27.65714 28.24291
+ 45.58329 30.81453
+ 43.69298 36.42976
+ 27.07073 31.60704
+ 35.38172 31.52190
+ 34.04135 32.69671
+ 30.47870 19.75769
+ 44.71175 26.30368
+ 25.90396 22.77236
+ 55.62400 32.48291
+ 23.64825 19.91489
+ 64.69763 23.75995
+ 24.10224 27.65047
+ 43.93715 19.09906
+ 20.92866 21.14471
+ 56.35803 27.45068
+ 32.83191 19.92130
+ 33.57491 18.97285
+ 51.39979 26.50756
+ 35.91443 23.68267
+ 29.08776 27.67255
+ 53.48770 21.77455
+ 32.28868 23.31358
+ 59.62776 41.14084
+ 66.35298 20.53565
+ 71.17516 22.59524
+ 68.95668 31.76114
+ 50.61098 29.39348
+ 32.47020 23.20174
+ 40.37709 29.04672
+ 60.77362 27.25457
+ 51.87677 26.17666
+ 27.04370 29.13125
+ 61.55968 28.71037
+ 45.57977 22.95518
+ 44.02950 26.16537
+ 34.40152 22.05288
+ 39.96531 27.67567
+ 55.84456 29.38718
+ 35.01607 26.01437
+ 63.91591 26.82824
+ 79.57355 26.85985
+ 36.26843 30.29892
+ 59.31089 26.01033
+ 42.28652 22.00992
+ 35.05846 27.89526
+ 31.18635 29.99768
+ 58.42965 26.19802
+ 54.03744 26.45203
+ 18.98587 20.37107
+ 43.15795 21.47711
+ 31.91582 22.40004
+ 41.65393 23.79026
+ 55.34468 31.67017
+ 30.83594 24.21585
+ 23.11364 31.00124
+ 40.35013 29.51555
+ 49.77633 34.24107
+ 53.08469 22.44848
+ 43.72293 28.27124
+ 40.15939 30.68685
+ 27.25035 23.85673
+ 58.15061 25.51259
+ 31.73202 22.37813
+ 38.23582 26.95630
+ 70.46463 27.12393
+ 78.82369 27.95337
+ 34.13841 22.02694
+ 23.57778 22.65422
+ 44.21422 28.39160
+ 66.83015 21.58463
+ 43.64449 30.13751
+ 48.88550 26.13202
+ 52.33360 28.30391
+ 45.18157 41.89273
+ 78.26361 27.24427
+ 29.88389 26.63379
+ 30.94319 23.77762
+ 56.51968 33.27705
+ 39.94894 22.21688
+ 31.38151 20.92965
+ 39.15090 23.80694
+ 36.75032 26.18486
+ 30.35745 25.90498
+ 45.89886 25.79803
+ 22.97356 26.67575
+ 36.64776 30.95431
+ 37.43784 36.03451
+ 37.88910 20.43014
+ 37.80332 25.32069
+ 35.87383 36.55644
+ 23.28766 23.83804
+ 41.42498 28.88880
+ 58.98015 24.26709
+ 24.99242 24.45415
+ 45.78202 32.32184
+ 45.28900 33.67970
+ 24.19064 23.73958
+ 65.40365 23.17002
+ 26.47503 31.19049
+ 37.23060 23.69885
+ 40.83703 31.78726
+ 21.67654 22.91531
+ 56.20328 30.89455
+ 73.45188 32.94969
+ 41.50183 29.52258
+ 24.15896 27.02866
+ 42.72720 22.64554
+ 31.43625 22.84349
+ 39.86141 24.17527
+ 55.18267 25.03010
+ 55.07938 23.84948
+ 73.46461 19.68267
+ 49.08313 35.69100
+ 48.68449 26.75326
+ 54.58864 25.86462
+ 32.95040 36.44769
+ 57.21915 28.30972
+ 41.24059 23.85056
+ 28.98524 23.28421
+ 41.50114 33.73705
+ 43.35912 25.30764
+ 22.55027 29.36313
+ 27.63637 24.18665
+ 24.86806 26.15232
+ 23.34930 22.47438
+ 46.52759 25.73626
+ 38.98288 29.70823
+ 32.32178 26.39903
+ 56.43930 29.58057
+ 52.63055 28.59816
+ 62.44314 29.22467
+ 60.73191 25.47479
+ 52.86880 38.37063
+ 37.45048 21.23562
+ 45.62444 29.66516
+ 41.21803 26.75888
+ 29.48350 20.45625
+ 46.45759 26.57750
+ 40.22980 23.46263
+ 35.34829 22.35871
+ 30.86723 22.61507
+ 46.32276 23.27721
+ 54.15260 25.16281
+ 53.32966 19.11416
+ 30.43213 33.47223
+ 21.74999 23.10042
+ 51.50626 22.92780
+ 40.49315 22.36288
+ 37.15326 27.89680
+ 39.19288 26.19343
+ 60.54843 25.33996
+ 40.67981 24.81465
+ 40.22503 23.39227
+ 25.43711 25.12216
+ 55.47331 31.34918
+ 59.09021 26.29375
+ 34.10491 27.36621
+ 34.37518 28.92030
+ 42.31397 30.55879
+ 39.98724 31.80141
+ 49.89564 26.79459
+ 26.59876 19.45114
+ 39.04950 25.85431
+ 54.55610 25.57969
+ 52.98590 26.10389
+ 28.26060 25.42799
+ 29.60006 26.94916
+ 22.08088 27.80820
+ 51.24027 26.79131
+ 42.96648 35.57131
+ 22.15670 18.28610
+ 27.46471 45.34389
+ 54.31407 33.08023
+ 33.02283 27.22304
+ 51.14024 24.84439
+ 31.63079 33.92747
+ 26.24003 58.46019
+ 42.09502 26.09479
+ 27.66505 22.52645
+ 52.68118 27.07099
+ 42.37960 24.85478
+ 37.54313 24.87599
+ 43.01623 30.52789
+ 37.91853 26.33468
+ 44.17362 28.55548
+ 45.33204 30.91687
+ 52.56293 28.79118
+ 37.21896 23.54102
+ 28.09818 23.80765
+ 60.81037 28.01500
+ 81.05304 19.05995
+ 69.97036 20.01497
+ 41.15605 34.93174
+ 68.36251 23.10081
+ 39.26193 22.11617
+ 29.69093 24.50141
+ 40.52842 25.50279
+ 30.91302 27.96096
+ 37.80699 29.54781
+ 67.56905 31.13175
+ 53.25386 33.45877
+ 38.15878 31.60473
+ 23.46046 28.95935
+ 27.77209 23.25381
+ 30.92026 24.63706
+ 36.23157 20.23759
+ 37.07129 21.28394
+ 34.07756 26.81417
+ 29.61593 28.31304
+ 32.72098 27.89622
+ 61.11371 25.24639
+ 50.54263 20.37285
+ 41.56782 28.46119
+ 26.11217 24.27699
+ 35.25266 24.89300
+ 33.40608 25.25724
+ 79.66995 15.21701
+ 57.13734 24.56645
+ 52.95081 31.49002
+ 51.46670 30.03681
+ 39.37912 29.47748
+ 71.60168 29.50156
+ 59.12662 28.23213
+ 50.87476 24.65928
+ 52.67663 31.46514
+ 23.06647 25.85981
+ 33.53777 20.45197
+ 40.63812 33.96489
+ 42.77290 22.80973
+ 49.28024 25.32790
+ 33.26269 28.23219
+ 29.74569 30.54155
+ 47.34192 27.70290
+ 80.76579 25.70749
+ 62.95430 24.10388
+ 31.36708 23.52038
+ 23.39756 22.41462
+ 41.58252 25.48043
+ 36.54170 33.87073
+ 40.00033 16.96053
+ 58.10562 25.77635
+ 52.11137 26.98397
+ 56.91456 27.24343
+ 47.97594 28.69994
+ 33.09406 27.44111
+ 49.22106 26.35344
+ 45.73891 25.21268
+ 37.13081 21.33042
+ 52.90884 24.07418
+ 42.49667 26.30556
+ 60.81374 32.04345
+ 25.65456 21.18742
+ 62.75386 31.47147
+ 47.73567 33.96991
+ 32.60075 34.94423
+ 56.99292 27.52372
+ 48.79374 29.38628
+ 40.19805 25.35993
+ 78.38822 29.46423
+ 37.27869 25.09015
+ 57.47466 30.70793
+ 35.42958 28.55212
+ 44.49937 25.45300
+ 36.25600 28.19482
+ 61.01008 35.87104
+ 64.61303 33.28117
+ 49.91383 25.10920
+ 29.35288 24.17736
+ 45.67369 25.12368
+ 31.40035 21.31946
+ 49.13794 26.45360
+ 43.83834 27.40322
+ 64.53794 24.01352
+ 68.43014 32.70878
+ 40.98285 20.55172
+ 42.71057 24.38935
+ 24.84875 26.15615
+ 46.89653 24.86133
+ 44.47789 24.82950
+ 47.70465 28.62602
+ 53.46058 29.85526
+ 47.47699 23.94518
+ 32.64402 27.20224
+ 20.38963 22.70149
+ 68.86426 19.81621
+ 35.72066 24.47809
+ 59.60039 31.69577
+ 52.16699 26.02872
+ 53.30722 23.44785
+ 58.28852 30.03706
+ 20.97854 29.74651
+ 36.47624 33.19249
+ 26.74878 27.57238
+ 41.44547 26.32236
+ 29.58364 25.77652
+ 40.52393 22.71014
+ 34.86628 28.60414
+ 72.82124 24.59530
+ 36.17020 31.35266
+ 44.98027 23.51846
+ 39.92309 28.36976
+ 26.39613 20.89233
+ 37.86704 23.31549
+ 29.20308 23.49578
+ 45.15887 35.57963
+ 36.92703 21.83052
+ 56.34293 20.55390
+ 44.92657 24.09017
+ 37.51007 27.97622
+ 40.08890 26.53310
+ 66.48486 26.84477
+ 76.36312 19.12098
+ 41.76863 28.49602
+ 43.54191 22.47352
+ 58.53230 23.65549
+ 61.26342 34.03914
+ 40.67457 29.59000
+ 28.68782 24.00571
+ 59.07816 26.37625
+ 48.43925 27.06631
+ 24.38647 18.88834
+ 54.42330 25.25450
+ 40.27098 25.82104
+ 45.86077 35.17492
+ 24.99284 20.21561
+ 35.13303 28.55774
+ 41.87684 26.36686
+ 43.63938 30.21256
+ 26.41563 26.25695
+ 25.16920 26.09072
+ 37.39686 33.83508
+ 40.32570 23.81549
+ 57.35052 23.73345
+ 26.84973 24.46207
+ 49.35000 28.47002
+ 36.06802 25.09165
+ 75.92164 26.05036
+ 22.48510 23.24938
+ 35.45687 23.38356
+ 28.06145 30.99933
+ 26.25515 24.44475
+ 35.64301 25.39837
+ 26.64653 21.23040
+ 45.37849 35.30793
+ 37.51152 22.75663
+ 51.91544 26.70491
+ 46.87006 24.36511
+ 72.80836 31.83121
+ 31.13543 22.93118
+ 37.74808 28.81083
+ 36.06805 27.00199
diff --git a/data/car.all.R b/data/car.all.R
new file mode 100644
index 0000000..d1f580c
--- /dev/null
+++ b/data/car.all.R
@@ -0,0 +1,362 @@
+"car.all"<-
+structure(.Data = list(Length = c(177, 191, 193, 176, 175, 186, 189, 197, 197,
+ 192, 198, 221, 206, 191, 191, 177, 187, 192, 212, 179, 183, 177, 198,
+ 194, 203, 183, 185, 176, 159, 179, 192, 191, 163, 172, 181, 193, 170,
+ 175, 169, 141, 211, 180, 177, 188, 177, 199, 150, 171, 163, 185, 169,
+ 149, 178, 168, 184, 200, 197, 205, 203, 220, 164, 179, 194, 176, 155,
+ 177, 172, 170, 175, 187, 162, 184, 161, 186, 175, 178, 170, 172, 188,
+ 167, 172, 180, 178, 175, 181, 171, 199, 180, 172, 169, 184, 188, 189,
+ 146, 178, 175, 178, 182, 174, 170, 190, 182, 167, 159, 163, 158, 158,
+ 172, 180, 190, 188), Wheel.base = c(102, 109, 106, 100, 101, 109, 105,
+ 111, 111, 108, 108, 122, 114, 108, 108, 111, 103, 101, 116, 101, 103,
+ 96, 108, 110, 109, 103, 100, 112, 94, 97, 104, 119, 99, 97, 103, 106,
+ 97, 119, 94, 90, 114, 101, 99, 106, 100, 113, 93, 96, 97, 107, 98, 91,
+ 101, 94, 104, 113, 111, 109, 109, 117, 97, 101, 107, 110, 89, 99, 98,
+ 96, 105, 110, 95, 102, 94, 102, 88, 97, 97, 103, 104, 96, 96, 100, 93,
+ 105, 108, 97, 111, 103, 99, 95, 99, 105, 109, 90, 102, 97, 97, 102, 99,
+ 96, 106, 102, 94, 97, 93, 97, 97, 97, 97, 104, 109), Width = c(67, 69,
+ 71, 67, 65, 69, 69, 72, 72, 71, 73, 77, 73, 72, 72, 77, 68, 73, 75, 66,
+ 68, 71, 71, 74, 69, 68, 69, 72, 66, 69, 69, 72, 67, 67, 67, 70, 66, 72,
+ 66, 63, 78, 68, 68, 71, 68, 73, 63, 65, 67, 68, 67, 66, 67, 63, 69, 72,
+ 72, 73, 71, 78, 66, 67, 68, 72, 66, 67, 66, 67, 67, 69, 65, 67, 63, 67,
+ 67, 67, 71, 67, 69, 66, 65, 67, 67, 68, 68, 67, 72, 67, 66, 68, 67, 69,
+ 68, 60, 67, 65, 67, 67, 69, 65, 67, 69, 64, 66, 63, 66, 66, 66, 73, 67,
+ 69), Height = c(47.5, 50, 51.5, 50.5, 49.5, 51, 49.5, 50.5, 51, 50.5,
+ 48.5, 52.5, 51, 49, 49, 68.5, 49, 46.5, 52.5, 50, 51, 43.5, 51, 60, 51,
+ 51, 49, 60.5, 50, 47, 51, 61, 51, 51, 51, 51, 49.5, 66.5, 51.5, 52, 52,
+ 49, 47, 50.5, 49.5, 49, 48.5, 49.5, 46.5, 49.5, 48, 45, 47, 50, 50.5,
+ 50.5, 50.5, 50.5, 49, 53, 49.5, 49.5, 51, 61.5, 43.5, 48.5, 49, 45.5,
+ 50.5, 52, 49, 50.5, 50, 49.5, 65.5, 46.5, 45, 58, 49.5, 46, 49, 49.5,
+ 65.5, 51, 52, 47, 50.5, 48.5, 50, 46, 50.5, 52, 50, 50.5, 49, 49, 46,
+ 50.5, 47, 49.5, 50.5, 46.5, 49.5, 47, 49.5, 50.5, 51, 51, 70.5, 52.5,
+ 50.5), Front.Hd. = c(3.5, 2, 2.5, 4, 2, 3, 4, 6, 5, 5.5, 3.5, 5, 6, 3,
+ 3, 5, 3.5, 3.5, 5, 4, 4, 2, 4.5, 4.5, 4, 4, 3.5, 4.5, 3, 3, 4, 4, 3, 4,
+ 3.5, 3.5, 4.5, 5, 3.5, 4, 3.5, 3.5, 3, 4, 3.5, 3.5, 2.5, 3.5, 3, 4.5,
+ 3.5, 1.5, 1.5, 2.5, 3.5, 2.5, 3, 4, 3, 4.5, 4, 3.5, 3, 5, 3.5, 2, 4,
+ 1.5, 2, 3, 3.5, 4, 4, 1.5, 4, 3, 3, 4, 2, 4, 3, 3.5, 3.5, 4, 2.5, 3.5,
+ 4, 3.5, 5, 3.5, 3.5, 2.5, 2, 2.5, 2, 2.5, 2, 3, 2, 4, 2, 2.5, 3, 2, 3,
+ 3.5, 3, 3.5, 5, 3.5, 2.5), Rear.Hd = c(1.5, 2, 3, 1, 1, 2.5, 2.5, 4.5,
+ 3.5, 3.5, 2, 3.5, 4, 2, 2, 4.5, 0.5, 0.5, 2.5, 3, 3, -2, 3.5, 4, 1.5,
+ 2.5, 0.5, 4.5, 2, 0, 1.5, 4, 0.5, 2.5, 3, 2, 2, 3.5, 1, 3, 2, 0.5, 0, 2,
+ 1.5, 0.5, 1.5, 1, 0, 2, 2.5, -2, 0, 1.5, 2.5, 1.5, 2, 3, 0.5, 3, 2.5, 2,
+ 2.5, 5.5, -2, 2, 2.5, -2, 0.5, 1.5, 2, 2.5, 3, 1.5, 4.5, 0, -2, 4.5,
+ 1.5, 0, 2, 2, 5, 1.5, 1, 0, 4, 1, 2.5, 0, 2, 2.5, 1.5, 0.5, 2, 2, 0, 2,
+ 0, 2, 1.5, 0, 1.5, 0, 2, 2, 2, 2.5, 9, 2, 3), Frt.Leg.Room = c(41.5,
+ 41.5, 41.5, 42, 42, 42, 42, 42, 41, 41, 41.5, 43, 42, 42, 42, 42, 42,
+ 41.5, 41, 41, 41.5, 42, 42, 41, 42, 41, 41.5, 41.5, 40.5, 41, 41, 40.5,
+ 41, 41.5, 41, 42, 41, 42, 41, 40.5, 41.5, 41, 41.5, 42, 41, 41.5, 41,
+ 40.5, 41.5, 42, 41, 42.5, 42.5, 41, 41, 41.5, 43, 41, 42, 41, 41.5,
+ 40.5, 41.5, 41, 40.5, 41.5, 40.5, 42, 44, 42.5, 40.5, 41, 40, 40, 40.5,
+ 41, 42, 40.5, 41.5, 41, 40, 40.5, 41, 41, 42, 41.5, 42, 41.5, 41.5,
+ 43.5, 41.5, 42, 42, 40.5, 41, 41, 42, 42, 42, 40.5, 41, 42.5, 40.5, 42,
+ 42, 41.5, 41, 41, 39, 41.5, 42), Rear.Seating = c(26.5, 28.5, 31, 28,
+ 25.5, 27, 28, 30.5, 28.5, 27.5, 26.5, 32, 30, 28, 29, 28, 27.5, 24, 28,
+ 26.5, 26.5, -2, 30, 32.5, 31.5, 30.5, 26, 27, 24.5, 22.5, 31, 31, 26,
+ 25, 30.5, 30, 27.5, 30, 26.5, 26, 29, 24, 23.5, 29, 27.5, 28, 27.5,
+ 25.5, 24.5, 27, 27.5, -2, 23.5, 26, 31, 29.5, 29, 29.5, 28.5, 31.5, 27,
+ 27, 29, 27.5, -2, 26, 26.5, -2, 26, 27, 27, 29.5, 26.5, 28, -1, 21.5,
+ -2, 28.5, 29, 21, 24.5, 26.5, -1, 29, 29.5, 23, 29.5, 26.5, 25.5, 18,
+ 27.5, 31, 29.5, 23.5, 27.5, 27.5, 22.5, 28.5, 23, 26, 27.5, 23, 25, 26,
+ 26, 27, 27, 28, 37, 29.5, 29.5), Frt.Shld = c(53, 55.5, 56.5, 52.5, 52,
+ 54.5, 56.5, 58.5, 59, 58, 58, 60.5, 59.5, 58, 57.5, 61.5, 55.5, 58, 61,
+ 53.5, 54.5, 54, 58.5, 60.5, 56, 54, 56, 58.5, 54, 56, 56, 58, 51.5,
+ 54.5, 54, 57.5, 53.5, 60, 51.5, 52, 61.5, 55.5, 54.5, 57.5, 53, 59,
+ 50.5, 53, 51, 54.5, 53.5, 53, 52.5, 52, 57, 57.5, 57, 57.5, 56, 62, 52,
+ 55, 55.5, 57.5, 50.5, 55, 54, 53, 53.5, 56, 52, 55, 52, 53.5, 56.5,
+ 53.5, 56.5, 55.5, 56.5, 52, 52.5, 55, 57, 55.5, 55.5, 54, 59, 55, 53.5,
+ 55.5, 53, 57, 55, 51, 54, 53.5, 53, 54.5, 51.5, 53, 55, 52.5, 53, 53.5,
+ 51.5, 53, 53, 53, 61.5, 54, 56.5), RearShld = c(52, 55.5, 55, 52, 51.5,
+ 55.5, 56, 58.5, 58.5, 56.5, 56.5, 60, 58.5, 57, 56.5, 50.5, 55, 55.5,
+ 61, 53.5, 55, -2, 56.5, 62, 55.5, 53.5, 55, 41.5, 52.5, 54, 55.5, 60.5,
+ 51, 53.5, 54.5, 56.5, 52.5, 62.5, 51.5, 51, 61.5, 54.5, 52, 57.5, 54,
+ 59.5, 50, 52.5, 47.5, 54, 53, -2, 50.5, 51.5, 56.5, 57, 56, 57.5, 57.5,
+ 62, 52, 55, 55.5, 43, -2, 52.5, 54, -2, 53, 56, 52, 55, 52.5, 53, 59,
+ 50.5, -2, 56.5, 55.5, 49.5, 52.5, 54, 58.5, 55, 54.5, 52, 59, 54, 53,
+ 51.5, 53, 57, 54, 50.5, 53.5, 53.5, 51, 53.5, 48, 52.5, 54.5, 50.5,
+ 52.5, 51, 51, 54, 53, 53.5, 39.5, 53, 56.5), Luggage = c(16, 14, 17, 10,
+ 12, 12, 16, 16, 16, 15, 14, 19, 16, 14, 14, -2, 14, 12, 20, 14, 14, -2,
+ 15, -2, 17, 14, 14, -2, 12, 17, 16, -2, 15, 13, 14, 17, 10, -2, 18, 12,
+ 22, 12, 12, 17, 13, 15, 10, 11, 11, 14, 12, -2, 11, 11, 14, 15, 14, 19,
+ 14, 22, 13, 15, 15, -2, -2, 15, 13, -2, 12, 15, 15, 11, 11, 12, -2, 14,
+ -2
+ , -3, 14, 7, 12, 14, -2, 14, 11, 10, 15, 13, 18, 12, 14, 18, 12, 9, 14,
+ 15, 10, 12, 13, 11, 12, 11, 13, 19, 10, 18, 18, 17, -2, 14, 17), Weight
+ = c(2700, 3265, 2935, 2670, 2895, 3640, 2880, 3350, 3325, 3320, 3465,
+ 4285, 3545, 3480, NA, 4025, 2655, 3110, 3855, 2485, NA, 3280, 3195,
+ 3630, 3570, 2975, 2975, 3385, 2270, 2935, 3080, 3735, 2300, 2670, 2940,
+ 3145, 2560, 3665, 2345, 1845, 3850, 2850, 2695, 3015, 2750, 3610, 1695,
+ 2255, 2455, 2920, 2260, 2170, 2710, 2345, 2885, 4000, 3930, 3695, 3780,
+ 4040, NA, 2780, 3480, 3735, 2210, 2690, 2440, 2790, 3020, 3315, 2285,
+ 2745, 2185, 3110, 3415, 2775, 3350, 3185, 3200, 2390, 2275, 2920, 3690,
+ 2575, 3000, 2840, 3360, 2770, 2350, 2900, 2775, 3065, 3295, 1900, 2935,
+ 2295, 2485, 2920, 2885, 2390, 3480, 3470, 2075, 2680, 2190, 2270, 2215,
+ 2330, 3460, 2985, 3140), Tires = structure(.Data = c(16, 20, 20, 8, 17,
+ 28, 13, 23, 23, 22, 22, 29, 22, 22, NA, 23, 18, 25, 23, 14, NA, 30, 19,
+ 22, 19, 18, 18, 19, 5, 20, 19, 22, 7, 12, 18, 20, 16, 26, 8, 1, 26, 19,
+ 12, 22, 12, 22, 2, 8, 10, 12, 8, 10, 16, 5, 12, 25, 21, 22, 26, 26, NA,
+ 12, 17, 22, 10, 12, 8, 20, 11, 17, 8, 12, 5, 17, 23, 16, 27, 18, 21, 12,
+ 8, 17, 19, 11, 11, 12, 25, 18, 8, 17, 11, 17, 16, 6, 8, 8, 12, 12, 24,
+ 4, 17, 27, 3, 15, 8, 10, 8, 8, 9, 12, 11), .Label = c("145", "145/80",
+ "155", "155/65", "155/80", "165/65", "165/80", "175/70", "185",
+ "185/60", "185/65", "185/70", "185/75", "185/80", "195/50", "195/60",
+ "195/65", "195/70", "195/75", "205/60", "205/65", "205/70", "205/75",
+ "215/50", "215/65", "215/70", "225/50", "225/60", "225/75", "275/40"),
+ class = "factor"), Steering = structure(.Data = c(1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 16, NA, 1, 1, 1, 1, 1, 1, 1, 14, 1,
+ 1, 1, 20, 1, 1, 1, 14, 1, 10, 4, 1, 1, 1, 1, 1, 1, 3, 17, 1, 1, 12, 6,
+ 1, 15, 1, 1, 1, 1, 1, 1, NA, 1, 1, 1, 2, 1, 5, 9, 1, 1, 6, 1, 15, 1, 13,
+ 1, 1, 1, 1, 1, 12, 1, 1, 1, 1, 18, 1, 1, 19, 1, 1, 1, 1, 11, 1, 1, 1, 1,
+ 1, 20, 1, 1, 8, 1, 7, 1, 12, 12, 1, 1, 1), .Label = c("", "0.68 man.",
+ "0.72 man.", "0.78 man.", "0.80 man.", "0.81 man.", "0.82 man.",
+ "0.83 man.", "0.84 man.", "0.85 man.", "0.86 man.", "0.87 man.",
+ "0.89 man.", "0.90 man.", "0.93 man.", "0.95 man.", "1.00 man.",
+ "1.01 man.", "1.05 man.", "1.06 man."), class = "factor"), Turning = c(
+ 37, 42, 39, 35, 35, 39, 41, 43, 42, 41, 41, 44, 43, 42, NA, 42, 38, 41,
+ 42, 38, NA, 42, 42, 42, 43, 39, 39, 42, 32, 38, 42, 47, 40, 38, 41, 39,
+ 36, 42, 37, 33, 45, 40, 38, 42, 39, 38, 34, 35, 37, 41, 34, 32, 34, 35,
+ 41, 42, 40, 42, 43, 45, NA, 39, 39, 39, 33, 36, 36, 37, 34, 37, 36, 39,
+ 37, 41, 36, 38, 38, 38, 42, 38, 37, 39, 38, 37, 39, 39, 43, 39, 35, 36,
+ 37, 40, 42, 32, 37, 35, 36, 39, 39, 36, 36, 40, 35, 34, 34, 35, 35, 35,
+ 37, 37, 37), Disp. = c(112, 163, 141, 121, 152, 209, 151, 231, 231, 189,
+ 231, 307, 273, 273, NA, 262, 133, 191, 305, 133, NA, 350, 151, 191, 202,
+ 153, 153, 153, 90, 135, 153, 202, 135, 153, 181, 180, 97, 182, 114, 81,
+ 302, 140, 133, 153, 141, 232, 61, 97, 97, 132, 91, 97, 125, 90, 143,
+ 274, 242, 232, 302, 302, NA, 133, 180, 181, 97, 133, 113, 80, 159, 181,
+ 97, 122, 90, 181, 143, 146, 180, 146, 180, 97, 97, 146, 146, 116, 132,
+ 107, 231, 138, 98, 165, 121, 121, 163, 73, 135, 109, 109, 122, 132, 97,
+ 180, 180, 89, 109, 109, 109, 109, 109, 129, 141, 141), HP = c(130, 160,
+ 130, 108, 168, 208, 110, 165, 165, 101, 165, 140, 180, 180, NA, 150, 95,
+ 140, 170, 95, NA, 250, 110, 120, 150, 150, 150, 100, 81, 150, 100, 150,
+ 93, 150, 141, 150, 113, 145, 90, 63, 150, 88, 110, 90, 98, 140, 55, 102,
+ 95, 125, 92, 108, 140, 81, 110, 278, 250, 140, 225, 150, NA, 110, 158,
+ 150, 116, 110, 103, 160, 158, 177, 82, 102, 81, 142, 107, 140, 180, 138,
+ 160, 90, 90, 138, 106, 120, 120, 92, 165, 160, 74, 165, 128, 130, 160,
+ 73, 130, 90, 97, 115, 130, 102, 190, 200, 78, 158, 81, 105, 100, 100,
+ 90, 114, 114), Trans1 = structure(.Data = c(3, 3, 3, 3, 3, 3, 1, 1, 1,
+ 1, 1, 1, 1, 1, NA, 3, 3, 3, 1, 3, NA, 4, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1,
+ 3, 3, 1, 1, 3, 3, 3, 3, 1, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
+ 1, 1, 1, 1, NA, 3, 1, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 1, 1, 3, 3, 1, 3, 3,
+ 3, 3, 1, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3,
+ 2, 3, 3, 3, 2, 3, 3), .Label = c("", "man.4", "man.5", "man.6"), class
+ = "factor"), Gear.Ratio = structure(.Data = c(39, 26, 40, 38, 28, 19,
+ 1, 1, 1, 1, 1, 1, 1, 1, NA, 3, 21, 8, 1, 6, NA, 2, 1, 1, 1, 1, 1, 17,
+ 35, 5, 1, 1, 6, 5, 1, 1, 45, 7, 11, 9, 1, 14, 25, 1, 6, 1, 35, 31, 37,
+ 18, 36, 40, 44, 35, 12, 1, 1, 1, 1, 1, NA, 25, 1, 30, 46, 25, 27, 23,
+ 40, 1, 20, 30, 35, 1, 1, 34, 32, 1, 24, 43, 25, 16, 1, 33, 1, 40, 1, 8,
+ 10, 22, 29, 35, 26, 28, 15, 30, 41, 14, 4, 29, 1, 42, 29, 25, 29, 39,
+ 40, 16, 47, 13, 13), .Label = c("", "1.67", "2.07", "2.49", "2.51",
+ "2.55", "2.58", "2.60", "2.61", "2.64", "2.66", "2.68", "2.71", "2.73",
+ "2.74", "2.75", "2.76", "2.78", "2.80", "2.81", "2.83", "2.84", "2.86",
+ "2.90", "2.94", "2.95", "3.00", "3.02", "3.03", "3.04", "3.05", "3.07",
+ "3.08", "3.10", "3.12", "3.13", "3.18", "3.25", "3.26", "3.27", "3.29",
+ "3.30", "3.38", "3.46", "3.47", "3.50", "4.11"), class = "factor"),
+ Eng.Rev = structure(.Data = c(50, 26, 42, 46, 33, 14, 1, 1, 1, 1, 1, 1,
+ 1, 1, NA, 3, 20, 5, 1, 7, NA, 2, 1, 1, 1, 1, 1, 12, 45, 6, 1, 1, 13, 7,
+ 1, 1, 58, 4, 17, 29, 1, 10, 28, 1, 8, 1, 56, 43, 49, 22, 48, 54, 57, 45,
+ 16, 1, 1, 1, 1, 1, NA, 28, 1, 27, 59, 28, 38, 24, 43, 1, 30, 32, 45, 1,
+ 1, 35, 34, 1, 19, 55, 36, 23, 1, 39, 1, 44, 1, 11, 15, 21, 31, 32, 25,
+ 51, 22, 41, 50, 19, 9, 40, 1, 47, 40, 37, 40, 53, 52, 27, 60, 18, 16),
+ .Label = c("", "1335", "1630", "2075", "2085", "2130", "2160", "2195",
+ "2215", "2225", "2245", "2250", "2280", "2285", "2295", "2305", "2320",
+ "2330", "2350", "2375", "2380", "2390", "2395", "2430", "2470", "2505",
+ "2510", "2530", "2545", "2565", "2575", "2615", "2625", "2645", "2670",
+ "2680", "2685", "2735", "2760", "2765", "2770", "2775", "2780", "2810",
+ "2825", "2835", "2840", "2855", "2930", "2935", "2945", "2980", "3000",
+ "3010", "3015", "3025", "3115", "3125", "3225", "3370"), class =
+ "factor"), Tank = c(13.199999999999999, 18, 21.100000000000001, 15.9,
+ 16.399999999999999, 21.100000000000001, 15.699999999999999, 18, 18,
+ 16.5, 18.800000000000001, 25, 18, 18.800000000000001, NA, 27, 15.6,
+ 15.5, 25, 13.6, NA, 20, 17.100000000000001, 20, 16, 14, 14, 20,
+ 13.199999999999999, 14, 16, 20, 13, 14, 16, 17, 13.199999999999999, 21,
+ 13, 10, 18, 15.4, 15.1, 16, 15.4, 19, 10.6, 13.199999999999999, 12.4,
+ 17, 11.9, 11.9, 15.9, 11.9, 15.9, 22.5, 22.5, 18.600000000000001,
+ 22.100000000000001, 18, NA, 15.9, 18.5, 19.600000000000001, 11.9, 15.9,
+ 14.5, 18.5, 14.5, 18.5, 11.9, 15.9, 11.9, 15.9, 14.199999999999999,
+ 15.9, 18.699999999999999, 17.199999999999999, 18.5, 13.199999999999999,
+ 13.199999999999999, 16.399999999999999, 17.699999999999999,
+ 17.199999999999999, 18, 15.9, 18, 13.6, 13.199999999999999,
+ 21.100000000000001, 18, 17.899999999999999, 17, 9.1999999999999993,
+ 15.9, 15.9, 15.9, 15.9, 15.9, 13.199999999999999, 18.5, 18.5, 11.9,
+ 14.5, 12.4, 14.5, 14.5, 14.5, 15.9, 15.800000000000001,
+ 15.800000000000001), Model2 = structure(.Data = c(1, 1, 1, 1, 1, 1, 1,
+ 14, 13, 1, 1, 1, 3, 4, NA, 7, 5, 19, 1, 1, NA, 1, 1, 20, 1, 2, 2, 18,
+ 12, 1, 1, 21, 16, 17, 15, 6, 1, 1, 1, 1, 9, 1, 1, 1, 10, 8, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Label = c("",
+ " Turbo 4 (3)", "Cadillac Fleetwood V8", "Cadillac Seville V8",
+ "Chevrolet Corsica 4", "Dodge Monaco V6", "GMC Safari V6",
+ "Mercury Cougar V6", "Mercury Grand Marquis V8", "Mercury Topaz 4",
+ "Mitsubishi Eclipse", "Mitsubishi Mirage 4", "Oldsmobile 88 Royale V6",
+ "Oldsmobile 98 Regency V6", "Plymouth Acclaim V6", "Plymouth Horizon 4",
+ "Plymouth Sundance Turbo 4", "Plymouth Voyager 4",
+ "Pontiac Firebird V6", "Silhouette V6/ Pontiac Trans Sport V6",
+ "Voyager V6/Chrysler Town & Country V6"), class = "factor"), Dist.n =
+ structure(.Data = c(19, 19, 17, 18, 6, 3, 19, 19, 20, 20, 20, 8, 19, 20,
+ NA, 9, 20, 8, 9, 21, NA, 4, 19, 14, 18, 19, 19, 15, 18, 18, 18, 14, 19,
+ 18, 18, 19, 18, 10, 16, 20, 10, 9, 19, 19, 17, 10, 14, 14, 19, 18, 17,
+ 18, 17, 18, 17, 10, 7, 18, 9, 9, NA, 19, 7, 7, 6, 16, 19, 4, 9, 8, 17,
+ 18, 17, 21, 11, 6, 7, 16, 19, 20, 17, 21, 12, 13, 8, 19, 19, 22, 16, 3,
+ 16, 18, 18, 18, 14, 17, 17, 1, 1, 19, 5, 5, 17, 2, 16, 17, 16, 17, 9, 6,
+ 7), .Label = c("(62)/38", "(64)/36", "50/[50]", "51/[49]", "52/[48]",
+ "53/[47]", "54/[46]", "55/[45]", "56/[44]", "57/[43]", "58/[42]",
+ "60/[40]", "62/[38]", "[58]/42", "[59]/41", "[60]/40", "[61]/39",
+ "[62]/38", "[63]/37", "[64]/36", "[65]/35", "[66]/34"), class =
+ "factor"), Tires2 = structure(.Data = c(3, 4, 4, 3, 3, 4, 3, 3, 3, 3, 4,
+ 4, 4, 4, NA, 4, 3, 4, 4, 2, NA, 6, 3, 3, 3, 3, 3, 3, 2, 4, 3, 3, 2, 3,
+ 3, 3, 3, 3, 3, 1, 4, 3, 3, 3, 3, 4, 1, 2, 3, 3, 2, 3, 3, 2, 3, 4, 4, 4,
+ 4, 4, NA, 3, 4, 3, 3, 3, 2, 4, 4, 4, 2, 3, 2, 4, 3, 4, 5, 3, 4, 2, 2, 3,
+ 3, 3, 4, 3, 4, 3, 2, 4, 4, 4, 4, 2, 3, 2, 2, 3, 4, 2, 4, 5, 2, 4, 2, 3,
+ 2, 2, 3, 3, 4), .Label = c("R12", "R13", "R14", "R15", "R16", "R17"),
+ class = "factor"), Pwr.Steer = structure(.Data = c(29, 34, 35, 14, 31,
+ 21, 19, 26, 30, 31, 19, 33, 26, 20, NA, 18, 15, 10, 23, 13, NA, 9, 31,
+ 19, 27, 11, 11, 25, 9, 4, 23, 32, 20, 3, 4, 20, 10, 35, 17, 6, 26, 21,
+ 17, 14, 21, 8, 1, 22, 2, 26, 18, 1, 7, 10, 23, 12, 30, 11, 31, 28, NA,
+ 15, 29, 36, 2, 14, 14, 5, 19, 15, 13, 23, 7, 20, 22, 9, 11, 24, 16, 11,
+ 11, 17, 17, 17, 23, 10, 21, 15, 21, 17, 28, 22, 34, 1, 20, 25, 17, 19,
+ 16, 22, 31, 15, 12, 14, 1, 18, 13, 13, 31, 18, 20), .Label = c("",
+ "0.57 pwr.", "0.58 pwr.", "0.60 pwr.", "0.61 pwr.", "0.62 pwr.",
+ "0.63 pwr.", "0.64 pwr.", "0.66 pwr.", "0.67 pwr.", "0.68 pwr.",
+ "0.69 pwr.", "0.70 pwr.", "0.71 pwr.", "0.72 pwr.", "0.73 pwr.",
+ "0.74 pwr.", "0.75 pwr.", "0.76 pwr.", "0.77 pwr.", "0.78 pwr.",
+ "0.79 pwr.", "0.80 pwr.", "0.81 pwr.", "0.82 pwr.", "0.83 pwr.",
+ "0.84 pwr.", "0.85 pwr.", "0.86 pwr.", "0.87 pwr.", "0.88 pwr.",
+ "0.91 pwr.", "0.93 pwr.", "0.96 pwr.", "0.97 pwr.", "1.06 pwr."), class
+ = "factor"), .empty. = structure(.Data = c(1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1), .Label = "", class = "factor"), Disp2 = c(1.8,
+ 2.7000000000000002, 2.2999999999999998, 2, 2.5, 3.5, 2.5,
+ 3.7999999999999998, 3.7999999999999998, 3.1000000000000001,
+ 3.7999999999999998, 5, 4.5, 4.5, NA, 4.2999999999999998,
+ 2.2000000000000002, 3.1000000000000001, 5, 2.2000000000000002, NA,
+ 5.7000000000000002, 2.5, 3.1000000000000001, 3.2999999999999998, 2.5,
+ 2.5, 2.5, 1.5, 2.2000000000000002, 2.5, 3.2999999999999998,
+ 2.2000000000000002, 2.5, 3, 3, 1.6000000000000001, 3,
+ 1.8999999999999999, 1.3, 5, 2.2999999999999998, 2.2000000000000002, 2.5,
+ 2.2999999999999998, 3.7999999999999998, 1, 1.6000000000000001,
+ 1.6000000000000001, 2.2000000000000002, 1.5, 1.6000000000000001, 2, 1.5,
+ 2.3999999999999999, 4.5, 4, 3.7999999999999998, 5, 5, NA,
+ 2.2000000000000002, 3, 3, 1.6000000000000001, 2.2000000000000002, 1.8,
+ 1.3, 2.6000000000000001, 3, 1.6000000000000001, 2, 1.5, 3,
+ 2.3999999999999999, 2.5, 3, 2.3999999999999999, 3, 1.6000000000000001,
+ 1.6000000000000001, 2.3999999999999999, 2.3999999999999999, 2,
+ 2.2000000000000002, 1.8, 3.7999999999999998, 2.2999999999999998,
+ 1.6000000000000001, 2.7000000000000002, 2, 2, 2.7000000000000002, 1.2,
+ 2.2000000000000002, 1.8, 1.8, 2, 2.2000000000000002, 1.6000000000000001,
+ 3, 3, 1.5, 1.8, 1.8, 1.8, 1.8, 1.8, 2.1000000000000001,
+ 2.2999999999999998, 2.2999999999999998), HP.revs = c(6000, 5900, 5500,
+ 5300, 5800, 5700, 5200, 4800, 4800, 4400, 4800, 3200, 4300, 4300, NA,
+ 4000, 5200, 4400, 4400, 5200, NA, 4400, 5200, 4200, 5000, 4800, 4800,
+ 4800, 5500, 4800, 4800, 4800, 4800, 4800, 5000, 5000, 6500, 4800, 4600,
+ 5000, 3200, 4000, 4700, 4400, 4400, 3800, 5700, 5800, 5800, 5200, 6000,
+ 6000, 6200, 5500, 4500, 6000, 5600, 3800, 4200, 3200, NA, 4700, 5500,
+ 5000, 6500, 4700, 5500, 7000, 5800, 5700, 5000, 5000, 5500, 5000, 5000,
+ 5600, 6400, 5600, 5200, 6000, 6000, 5600, 4800, 5200, 5000, 5000, 4800,
+ 6200, 5600, 5900, 6000, 6000, 5900, 5600, 5400, 5200, 5200, 5200, 5400,
+ 5800, 5600, 6000, 6000, 5600, 5500, 5400, 5400, 5400, 4800, 5400, 5400),
+ Trans2 = structure(.Data = c(4, 4, 3, 3, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4,
+ NA, 4, 3, 4, 4, 3, NA, 4, 3, 3, 4, 3, 3, 3, 3, 3, 3, 4, 3, 3, 4, 4, 4,
+ 4, 3, 3, 4, 4, 4, 3, 3, 4, 3, 3, 3, 4, 4, 1, 4, 4, 4, 4, 4, 4, 4, 4, NA,
+ 4, 4, 4, 1, 4, 4, 4, 4, 4, 3, 4, 4, 2, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4,
+ 4, 4, 3, 3, 3, 3, 4, 4, 5, 4, 3, 4, 4, 4, 3, 4, 4, 3, 1, 1, 1, 3, 3, 3,
+ 4, 4), .Label = c("", "auto 4", "auto.3", "auto.4", "auto.CVT"), class
+ = "factor"), Gear2 = structure(.Data = c(42, 36, 45, 45, 35, 31, 30, 5,
+ 5, 10, 5, 4, 7, 7, NA, 9, 41, 9, 3, 30, NA, 2, 41, 41, 11, 36, 36, 43,
+ 55, 36, 36, 11, 26, 36, 11, 20, 26, 13, 46, 44, 3, 27, 19, 46, 46, 8,
+ 59, 54, 13, 33, 48, 1, 44, 26, 24, 14, 17, 10, 6, 6, NA, 19, 28, 23, 1,
+ 19, 12, 28, 47, 32, 57, 24, 26, 16, 34, 29, 29, 22, 15, 55, 55, 15, 40,
+ 39, 49, 47, 5, 41, 51, 52, 56, 38, 36, 35, 18, 55, 21, 24, 53, 54, 25,
+ 37, 58, 1, 1, 1, 50, 50, 60, 18, 29), .Label = c("", "1.81", "1.83",
+ "1.96", "1.99", "2.06", "2.08", "2.19", "2.26", "2.33", "2.36", "2.43",
+ "2.45", "2.46", "2.53", "2.54", "2.55", "2.57", "2.59", "2.66", "2.69",
+ "2.70", "2.71", "2.74", "2.76", "2.78", "2.80", "2.81", "2.83", "2.84",
+ "2.85", "2.87", "2.89", "2.90", "2.99", "3.02", "3.03", "3.10", "3.12",
+ "3.17", "3.18", "3.21", "3.22", "3.23", "3.25", "3.26", "3.27", "3.28",
+ "3.38", "3.41", "3.43", "3.45", "3.52", "3.53", "3.60", "3.62", "3.63",
+ "3.72", "3.87", "4.09"), class = "factor"), Eng.Rev2 = structure(.Data
+ = c(63, 45, 55, 60, 49, 32, 34, 7, 7, 15, 6, 4, 9, 9, NA, 11, 52, 12,
+ 2, 36, NA, 5, 48, 36, 15, 44, 44, 51, 74, 45, 39, 14, 40, 48, 17, 27,
+ 41, 16, 61, 71, 3, 29, 24, 53, 58, 10, 79, 73, 26, 40, 67, 1, 64, 42,
+ 33, 16, 20, 13, 8, 8, NA, 24, 33, 18, 1, 24, 23, 35, 56, 46, 76, 33, 42,
+ 22, 30, 38, 38, 28, 19, 72, 75, 21, 47, 57, 62, 59, 8, 54, 66, 63, 68,
+ 49, 43, 65, 25, 75, 36, 33, 70, 73, 31, 50, 78, 1, 1, 1, 69, 69, 77, 22,
+ 37), .Label = c(" ", "1410", "1415", "1445", "1450", "1570", "1585",
+ "1595", "1645", "1730", "1740", "1810", "1840", "1880", "1925", "1970",
+ "1980", "2045", "2050", "2065", "2200", "2210", "2215", "2225", "2245",
+ "2255", "2260", "2270", "2285", "2310", "2315", "2325", "2355", "2380",
+ "2385", "2400", "2405", "2435", "2465", "2485", "2500", "2520", "2530",
+ "2535", "2565", "2575", "2585", "2595", "2600", "2610", "2630", "2670",
+ "2695", "2745", "2760", "2780", "2800", "2805", "2810", "2835", "2845",
+ "2875", "2890", "2905", "2910", "2980", "2990", "3075", "3110", "3130",
+ "3150", "3210", "3220", "3260", "3285", "3310", "3355", "3395", "3755"),
+ class = "factor"), Price = c(11950, 24760, 26900, 18900, 24650, 33200,
+ 13150, 20225, 16145, NA, 23040, 27400, 26960, 28855, NA, 13790, 10320,
+ 10995, 14525, 8620, NA, 31979, 12140, 13995, 25495, 10945, 12495, 11995,
+ 6851, 9745, 12995, 15395, 6995, 8845, NA, 15350, 8895, 12267, 7402,
+ 6319, 17257, 9456, 11470, 12640, 9483, 14980, 6695, 10125, 10390, 12145,
+ 6635, 9410, 13945, 5899, 9999, 38000, 35000, 29422, 29246, 27986, NA,
+ 12459, 23300, 14944, 13800, 12279, 6599, 17880, 31600, 39950, 8672,
+ 10989, 5899, 17879, 14929, 13249, 27900, 13949, 17899, 12249, 7399,
+ 11650, 14799, 15930, 19945, 10855, NA, 10565, 7254, 41990, 16995, 25995,
+ 23550, 5866, 11499, 9599, 13071, 11588, 12268, 8748, 21498, 22860, 6488,
+ 17900, 7225, 9995, 8695, 9995, 14080, 18450, 21700), Country =
+ structure(.Data = c(5, 5, 4, 4, 4, 4, 10, 10, 10, NA, 10, 10, 10, 10,
+ NA, 10, 10, 10, 10, 10, NA, 10, 10, 10, 10, 10, 10, 10, 5, 10, 10, 10,
+ 10, 10, NA, 10, 10, 10, 10, 7, 10, 10, 10, 10, 10, 10, 5, 6, 5, 6, 6, 5,
+ 5, 7, 7, 5, 5, 10, 10, 10, NA, 6, 5, 5, 5, 6, 5, 5, 4, 4, 8, 5, 7, 5, 5,
+ 5, 5, 5, 5, 5, 6, 5, 5, 3, 3, 10, NA, 10, 7, 4, 9, 9, 2, 5, 6, 5, 5, 6,
+ 5, 6, 5, 5, 5, 4, 1, 8, 8, 4, 4, 9, 9), .Label = c("Brazil", "England",
+ "France", "Germany", "Japan", "Japan/USA", "Korea", "Mexico", "Sweden",
+ "USA"), class = "factor"), Reliability = structure(.Data = c(5, 5, NA,
+ NA, 4, NA, 3, 3, 3, NA, 3, 1, 3, 3, NA, 1, 1, 1, 1, 1, NA, 1, NA, NA, 3,
+ 4, 2, 3, NA, 1, 3, 3, 1, 3, NA, 2, 4, 3, 2, 4, 3, 1, 3, 1, 2, 1, NA, 5,
+ NA, 5, 5, 5, 5, 2, NA, NA, NA, 2, 1, 3, NA, 4, 5, 5, NA, 5, 5, 5, 3, 1,
+ 4, 5, 2, 4, NA, 3, NA, NA, 5, 3, 5, 5, NA, NA, NA, NA, NA, 2, 1, NA, 2,
+ NA, NA, NA, 5, 5, NA, 5, NA, 5, 3, 4, 5, NA, NA, NA, 3, 3, NA, 3, 3),
+ .Label = c("1", "2", "3", "4", "5"), class = "factor"), Mileage = c(NA,
+ 20, NA, 27, NA, NA, 21, NA, 23, NA, NA, NA, NA, NA, NA, NA, 26, NA, 18,
+ NA, NA, NA, NA, 18, NA, 25, 22, NA, NA, 27, NA, 18, NA, NA, NA, 22, 33,
+ 18, 33, 37, 20, NA, 30, NA, 24, 23, NA, NA, NA, 26, 32, 33, 27, NA, 23,
+ NA, NA, NA, NA, NA, NA, 24, 21, 19, NA, NA, 32, NA, NA, NA, 26, 25, NA,
+ 21, 20, 24, NA, 20, 22, NA, 33, 21, 19, 24, NA, 26, NA, 23, 28, NA, NA,
+ NA, NA, 34, 23, 25, 28, 27, NA, 29, 23, NA, 35, NA, NA, NA, NA, 26, NA,
+ 23, NA), Type = structure(.Data = c(4, 3, 3, 1, 1, 3, 3, 2, 2, NA, 3, 2,
+ 2, 3, NA, 6, 1, 5, 2, 1, NA, 5, 3, 6, 3, 1, 3, 6, 4, 5, 3, 6, 4, 1, NA,
+ 3, 4, 6, 4, 4, 2, 5, 5, 3, 1, 3, 4, 4, 5, 1, 4, 5, 5, 4, 3, 3, 3, 3, 3,
+ 2, NA, 1, 3, 6, 5, 5, 4, 5, 1, 3, 4, 1, 4, 1, 6, 5, 5, 6, 3, 5, 4, 1, 6,
+ 1, 3, 5, NA, 1, 4, 5, 1, 3, 3, 4, 1, 4, 5, 1, 5, 4, 3, 5, 4, 5, 4, 5, 4,
+ 4, 6, 1, 3), .Label = c("Compact", "Large", "Medium", "Small", "Sporty",
+ "Van"), class = "factor")), row.names = c("Acura Integra",
+ "Acura Legend", "Audi 100", "Audi 80", "BMW 325i", "BMW 535i",
+ "Buick Century", "Buick Electra", "Buick Le Sabre", "Buick Regal",
+ "Buick Riviera", "Cadillac Brougham", "Cadillac De Ville",
+ "Cadillac Eldorado", "Cadillac Seville", "Chevrolet Astro",
+ "Chevrolet Beretta", "Chevrolet Camaro", "Chevrolet Caprice",
+ "Chevrolet Cavalier", "Chevrolet Corsica", "Chevrolet Corvette",
+ "Chevrolet Lumina", "Chevrolet Lumina APV", "Chrysler Imperial",
+ "Chrysler Le Baron", "Chrysler Le Baron Coupe", "Dodge Caravan",
+ "Dodge Colt", "Dodge Daytona", "Dodge Dynasty", "Dodge Grand Caravan",
+ "Dodge Omni", "Dodge Shadow", "Dodge Spirit", "Eagle Premier",
+ "Eagle Summit", "Ford Aerostar", "Ford Escort", "Ford Festiva",
+ "Ford LTD Crown Victoria", "Ford Mustang", "Ford Probe", "Ford Taurus",
+ "Ford Tempo", "Ford Thunderbird", "GEO Metro", "GEO Prizm", "GEO Storm",
+ "Honda Accord", "Honda Civic", "Honda Civic CRX", "Honda Prelude",
+ "Hyundai Excel", "Hyundai Sonata", "Infiniti Q45", "Lexus LS400",
+ "Lincoln Continental", "Lincoln Mark VII", "Lincoln Town Car",
+ "Mazda 323", "Mazda 626", "Mazda 929", "Mazda MPV", "Mazda MX-5 Miata",
+ "Mazda MX-6", "Mazda Protege", "Mazda RX7", "Mercedes-Benz 190",
+ "Mercedes-Benz 300E", "Mercury Tracer", "Mitsubishi Galant",
+ "Mitsubishi Precis", "Mitsubishi Sigma", "Mitsubishi Wagon",
+ "Nissan 240SX", "Nissan 300ZX", "Nissan Axxess", "Nissan Maxima",
+ "Nissan Pulsar NX", "Nissan Sentra", "Nissan Stanza", "Nissan Van",
+ "Peugeot 405", "Peugeot 505", "Plymouth Laser", "Pontiac Bonneville",
+ "Pontiac Grand Am", "Pontiac LeMans", "Porsche 944", "Saab 900",
+ "Saab 9000", "Sterling 827", "Subaru Justy", "Subaru Legacy",
+ "Subaru Loyale", "Subaru XT", "Toyota Camry", "Toyota Celica",
+ "Toyota Corolla", "Toyota Cressida", "Toyota Supra", "Toyota Tercel",
+ "Volkswagen Corrado", "Volkswagen Fox", "Volkswagen GTI",
+ "Volkswagen Golf", "Volkswagen Jetta", "Volkswagen Vanagon",
+ "Volvo 240", "Volvo 740"), class = "data.frame")
diff --git a/data/chest.txt b/data/chest.txt
new file mode 100644
index 0000000..c319cfd
--- /dev/null
+++ b/data/chest.txt
@@ -0,0 +1,74 @@
+age nolnor nolr lnor lr
+ 16 2 0 0 0
+ 17 16 0 0 1
+ 18 34 1 2 0
+ 19 65 1 4 3
+ 20 126 4 6 1
+ 21 117 6 4 0
+ 22 115 4 12 6
+ 23 160 6 13 10
+ 24 174 2 8 5
+ 25 153 3 10 2
+ 26 164 2 18 12
+ 27 178 4 21 4
+ 28 184 2 11 4
+ 29 216 4 17 6
+ 30 209 2 23 9
+ 31 198 5 19 9
+ 32 216 7 19 11
+ 33 217 6 14 6
+ 34 196 0 16 6
+ 35 239 3 16 12
+ 36 257 7 22 6
+ 37 233 3 20 5
+ 38 199 8 23 9
+ 39 231 4 20 11
+ 40 240 5 21 9
+ 41 250 1 16 12
+ 42 216 2 16 10
+ 43 258 2 14 7
+ 44 261 12 15 7
+ 45 255 5 10 5
+ 46 260 3 18 4
+ 47 194 2 18 7
+ 48 207 4 14 6
+ 49 177 7 13 5
+ 50 184 2 14 7
+ 51 232 7 9 3
+ 52 172 3 15 7
+ 53 157 4 12 6
+ 54 188 2 10 4
+ 55 138 2 4 2
+ 56 151 1 12 3
+ 57 122 1 6 6
+ 58 113 3 7 2
+ 59 106 3 4 2
+ 60 106 1 8 6
+ 61 69 1 3 3
+ 62 52 0 3 3
+ 63 50 1 5 0
+ 64 62 0 1 2
+ 65 49 0 2 1
+ 66 39 0 6 2
+ 67 44 0 2 0
+ 68 41 0 3 2
+ 69 40 1 1 1
+ 70 43 1 0 5
+ 71 38 0 2 2
+ 72 60 1 3 2
+ 73 63 1 9 1
+ 74 45 2 1 3
+ 75 43 0 3 2
+ 76 51 0 4 3
+ 77 49 3 1 1
+ 78 73 1 2 5
+ 79 43 0 7 3
+ 80 39 0 6 2
+ 81 51 1 2 2
+ 82 30 0 5 4
+ 83 28 1 2 4
+ 84 27 0 5 2
+ 85 20 0 3 2
+ 86 5 0 0 0
+ 87 3 0 0 0
+ 88 3 0 0 0
diff --git a/data/coalminers.txt b/data/coalminers.txt
new file mode 100644
index 0000000..ee09e88
--- /dev/null
+++ b/data/coalminers.txt
@@ -0,0 +1,10 @@
+ BW BnW nBW nBnW age
+ 9 7 95 1841 22
+ 23 9 105 1654 27
+ 54 19 177 1863 32
+ 121 48 257 2357 37
+ 169 54 273 1778 42
+ 269 88 324 1712 47
+ 404 117 245 1324 52
+ 406 152 225 967 57
+ 372 106 132 526 62
diff --git a/data/enzyme.txt b/data/enzyme.txt
new file mode 100644
index 0000000..aad5404
--- /dev/null
+++ b/data/enzyme.txt
@@ -0,0 +1,13 @@
+conc velocity
+2 0.0615
+2 0.0527
+0.667 0.0334
+0.667 0.0258
+0.40 0.0138
+0.40 0.0258
+0.286 0.0129
+0.286 0.0183
+0.222 0.0083
+0.222 0.0169
+0.2 0.0129
+0.2 0.0087
diff --git a/data/gew.txt b/data/gew.txt
new file mode 100644
index 0000000..299874c
--- /dev/null
+++ b/data/gew.txt
@@ -0,0 +1,21 @@
+ y1 x1 x2 y2 x3 x4
+ 40.05292 1170.6 97.8 2.52813 191.5 1.8
+ 54.64859 2015.8 104.4 24.91888 516.0 0.8
+ 40.31206 2803.3 118.0 29.34270 729.0 7.4
+ 84.21099 2039.7 156.2 27.61823 560.4 18.1
+127.57240 2256.2 172.6 60.35945 519.9 23.5
+124.87970 2132.2 186.6 50.61588 628.5 26.5
+ 96.55514 1834.1 220.9 30.70955 537.1 36.2
+131.16010 1588.0 287.8 60.69605 561.2 60.8
+ 77.02764 1749.4 319.9 30.00972 617.2 84.4
+ 46.96689 1687.2 321.3 42.50750 626.7 91.2
+100.65970 2007.7 319.6 58.61146 737.2 92.4
+115.74670 2208.3 346.0 46.96287 760.5 86.0
+114.58260 1656.7 456.4 57.87651 581.4 111.1
+119.87620 1604.4 543.4 43.22093 662.3 130.6
+105.56990 1431.8 618.3 22.87143 583.8 141.8
+148.42660 1610.5 647.4 52.94754 635.2 136.7
+194.36220 1819.4 671.3 71.23030 723.8 129.7
+158.20370 2079.7 726.1 61.72550 864.1 145.5
+163.09300 2371.6 800.3 85.13053 1193.5 174.8
+227.56340 2759.9 888.9 88.27518 1188.9 213.5
diff --git a/data/hspider.R b/data/hspider.R
new file mode 100644
index 0000000..b3143fd
--- /dev/null
+++ b/data/hspider.R
@@ -0,0 +1,47 @@
+"hspider" <-
+structure(list(WaterCon = c(2.3321, 3.0493, 2.5572, 2.6741, 3.0155,
+3.381, 3.1781, 2.6247, 2.4849, 2.1972, 2.2192, 2.2925, 3.5175,
+3.0865, 3.2696, 3.0301, 3.3322, 3.1224, 2.9232, 3.1091, 2.9755,
+1.2528, 1.1939, 1.6487, 1.8245, 0.9933, 0.9555, 0.9555), BareSand = c(0,
+0, 0, 0, 0, 2.3979, 0, 0, 0, 3.9318, 0, 0, 1.7918, 0, 0, 0, 0,
+0, 0, 0, 0, 3.2581, 3.0445, 3.2581, 3.5835, 4.5109, 2.3979, 3.434
+), FallTwig = c(0, 1.7918, 0, 0, 0, 3.434, 0, 4.2627, 0, 0, 0,
+0, 1.7918, 0, 4.3944, 4.6052, 4.4543, 4.3944, 4.5109, 4.5951,
+4.5643, 0, 0, 0, 0, 0, 0, 0), CoveMoss = c(3.0445, 1.0986, 2.3979,
+2.3979, 0, 2.3979, 0.6931, 1.0986, 4.3307, 3.434, 4.1109, 3.8286,
+0.6931, 1.7918, 0.6931, 0.6931, 0.6931, 0, 1.6094, 0.6931, 0.6931,
+4.3307, 4.0254, 4.0254, 1.0986, 1.7918, 3.8286, 3.7136), CoveHerb = c(4.4543,
+4.5643, 4.6052, 4.6151, 4.6151, 3.434, 4.6151, 3.434, 3.2581,
+3.0445, 3.7136, 4.0254, 4.5109, 4.5643, 3.0445, 0.6931, 3.0445,
+3.0445, 1.6094, 0.6931, 1.7918, 0.6931, 3.2581, 3.0445, 4.1109,
+1.7918, 3.434, 3.434), ReflLux = c(3.912, 1.6094, 3.6889, 2.9957,
+2.3026, 0.6931, 2.3026, 0.6931, 3.4012, 3.6889, 3.6889, 3.6889,
+3.4012, 1.0986, 0.6931, 0, 1.0986, 1.0986, 0, 0, 0, 3.912, 4.0943,
+4.0073, 2.3026, 4.382, 3.6889, 3.6889), Alopacce = c(25, 0, 15,
+2, 1, 0, 2, 0, 1, 3, 15, 16, 3, 0, 0, 0, 0, 0, 0, 0, 0, 7, 17,
+11, 9, 3, 29, 15), Alopcune = c(10, 2, 20, 6, 20, 6, 7, 11, 1,
+0, 1, 13, 43, 2, 0, 3, 0, 1, 1, 2, 1, 0, 0, 0, 1, 0, 0, 0), Alopfabr = c(0,
+0, 2, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 16,
+15, 20, 9, 6, 11, 14), Arctlute = c(0, 0, 2, 1, 2, 6, 12, 0,
+0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ Arctperi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 4, 7, 5, 0, 18, 4, 1), Auloalbi = c(4,
+ 30, 9, 24, 9, 6, 16, 7, 0, 0, 1, 0, 18, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2, 0, 0, 0), Pardlugu = c(0, 1, 1, 1, 1, 0,
+ 1, 55, 0, 0, 0, 0, 1, 3, 6, 6, 2, 5, 12, 13, 16, 0, 2, 0,
+ 1, 0, 0, 0), Pardmont = c(60, 1, 29, 7, 2, 11, 30, 2, 26,
+ 22, 95, 96, 24, 14, 0, 0, 0, 0, 0, 0, 1, 2, 6, 3, 11, 0,
+ 1, 6), Pardnigr = c(12, 15, 18, 29, 135, 27, 89, 2, 1, 0,
+ 0, 1, 53, 15, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 6, 0, 0, 0),
+ Pardpull = c(45, 37, 45, 94, 76, 24, 105, 1, 1, 0, 1, 8,
+ 72, 72, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), Trocterr = c(57,
+ 65, 66, 86, 91, 63, 118, 30, 2, 1, 4, 13, 97, 94, 25, 28,
+ 23, 25, 22, 22, 18, 1, 1, 0, 16, 1, 0, 2), Zoraspin = c(4,
+ 9, 1, 25, 17, 34, 16, 3, 0, 0, 0, 0, 22, 32, 3, 4, 2, 0,
+ 3, 2, 2, 0, 0, 0, 6, 0, 0, 0)), .Names = c("WaterCon", "BareSand",
+"FallTwig", "CoveMoss", "CoveHerb", "ReflLux", "Alopacce", "Alopcune",
+"Alopfabr", "Arctlute", "Arctperi", "Auloalbi", "Pardlugu", "Pardmont",
+"Pardnigr", "Pardpull", "Trocterr", "Zoraspin"), row.names = c("1",
+"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
+"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
+"25", "26", "27", "28"), class = "data.frame")
diff --git a/data/hunua.txt b/data/hunua.txt
new file mode 100644
index 0000000..4b2d076
--- /dev/null
+++ b/data/hunua.txt
@@ -0,0 +1,393 @@
+agaaus beitaw corlae cyadea cyamed daccup dacdac eladen hedarb hohpop kniexc kuneri lepsco metrob neslan rhosap vitluc altitude
+0 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 90
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 80
+0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 50
+0 0 0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 40
+0 1 1 1 0 0 0 0 0 0 1 0 0 0 0 0 1 40
+0 1 1 1 0 0 0 0 0 0 1 0 0 0 0 1 0 50
+0 1 0 0 0 0 1 0 0 0 1 1 0 1 0 0 1 70
+1 1 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 100
+1 1 0 0 0 1 1 0 0 0 1 1 0 0 1 0 0 120
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 60
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 70
+1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 90
+0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 30
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 110
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 40
+1 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 50
+1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 60
+0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 40
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 80
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 50
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 50
+0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 100
+0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 60
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 0 1 1 1 0 0 0 0 0 0 1 0 0 0 0 1 20
+0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 140
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 80
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 60
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 50
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 40
+0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 60
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 90
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 140
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 140
+0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 180
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 200
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 1 0 160
+0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 1 0 80
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 200
+0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 90
+0 0 0 1 0 1 0 0 0 0 1 0 0 0 0 0 0 60
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 40
+0 1 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 60
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 75
+0 1 0 0 0 1 1 0 0 0 0 0 0 0 1 0 1 78
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 60
+1 1 0 1 0 1 0 0 0 0 1 0 0 0 0 1 1 60
+0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 60
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 100
+0 1 0 1 1 1 1 0 0 0 0 0 0 0 0 1 0 80
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 60
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 170
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 140
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 100
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 80
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 80
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 80
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 70
+0 1 0 0 0 0 1 0 0 0 1 0 0 0 0 1 1 90
+0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 70
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 1 50
+1 1 0 1 0 1 1 0 0 0 0 0 0 0 0 1 1 100
+1 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 1 95
+0 1 0 1 1 1 0 0 0 0 1 0 0 0 0 1 1 100
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 90
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 90
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 80
+0 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 70
+1 1 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 35
+0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 25
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 0 0 60
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 35
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 60
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 3
+0 0 0 0 0 0 1 0 0 1 1 1 0 0 0 0 0 20
+1 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 60
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 220
+0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 200
+1 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 160
+0 1 1 0 0 0 0 0 0 0 1 0 0 0 0 1 1 140
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 160
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 180
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 40
+1 1 0 0 0 1 0 0 0 0 1 1 0 1 0 0 0 200
+0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 70
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 80
+0 0 0 1 1 0 0 0 0 0 0 1 0 1 0 0 0 60
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+1 1 0 0 1 0 0 1 0 0 1 0 0 0 0 0 1 100
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 150
+1 0 0 1 0 0 0 1 0 0 1 1 0 1 0 0 0 180
+1 0 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 160
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 260
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 115
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 270
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 240
+0 1 0 0 0 0 1 0 0 0 1 1 0 0 0 0 0 90
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 100
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 100
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 1 0 340
+1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 300
+0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 290
+0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 1 0 380
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 240
+1 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 40
+1 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 50
+0 1 1 0 1 0 0 0 0 0 1 1 0 0 0 0 1 60
+0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 105
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 0 200
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 140
+0 1 0 0 1 0 0 0 0 0 0 0 0 0 1 0 1 60
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 80
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 1 1 1 40
+0 1 1 1 0 0 0 0 1 0 1 0 0 0 0 1 0 220
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 80
+1 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 120
+0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 200
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 40
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 60
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 70
+0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 220
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 160
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 180
+0 1 1 1 0 0 0 0 0 0 1 0 0 0 0 1 1 180
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 120
+0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 1 1 120
+0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 130
+0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 190
+1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 0 100
+0 0 1 1 1 0 1 0 0 0 1 0 0 0 0 0 1 90
+1 1 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 80
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 220
+0 0 0 1 0 1 0 0 1 0 1 0 0 0 0 1 1 220
+1 1 0 0 0 1 0 0 1 0 1 0 0 0 0 0 0 190
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 140
+0 1 1 1 0 0 0 0 0 0 1 0 0 0 0 1 1 100
+1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 180
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 100
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 80
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 85
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 80
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 160
+0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 150
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 130
+0 1 0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 30
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 160
+0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 160
+0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 170
+0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 180
+0 1 0 1 0 1 1 0 0 0 1 0 0 0 0 0 0 185
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 180
+0 1 0 1 1 1 1 0 1 0 1 0 0 0 0 0 0 140
+0 0 0 1 1 0 0 0 1 0 1 0 0 0 0 1 0 130
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 1 0 100
+0 1 0 0 0 0 1 0 1 0 1 0 0 0 0 1 1 60
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 1 1 80
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 110
+0 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 200
+0 1 0 1 1 1 0 0 1 0 1 0 0 0 0 1 0 220
+0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 240
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 240
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 240
+0 1 0 0 0 1 1 0 0 0 0 1 0 0 0 0 1 80
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 240
+0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 260
+0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 260
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 200
+0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 80
+0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 220
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 280
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 200
+0 1 0 1 1 1 0 0 0 0 1 0 0 0 0 0 1 200
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 1 1 0 80
+0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 240
+0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 0 0 280
+0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 300
+0 0 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 240
+0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 1 200
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 1 1 160
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 210
+0 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 240
+0 1 0 1 1 0 0 0 1 0 1 0 0 0 0 1 0 220
+0 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 1 100
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 1 1 0 1 0 0 0 1 1 0 0 0 0 1 60
+0 1 0 0 0 1 1 0 0 0 1 1 0 1 0 0 1 75
+0 0 0 1 1 1 1 0 1 0 0 0 0 0 0 1 0 50
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 60
+1 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 70
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 1 1 90
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 95
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 65
+0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 80
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 1 100
+0 1 1 1 0 0 0 0 1 0 1 0 0 0 0 1 1 100
+0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 200
+0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 200
+0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 260
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 220
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 180
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 180
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 180
+0 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 120
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 120
+0 1 0 1 1 1 1 0 0 0 1 0 0 0 0 0 0 190
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 1 0 180
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 1 0 0 180
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 160
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 200
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 160
+0 0 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 120
+0 1 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 110
+0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 1 0 200
+0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 240
+0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 290
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 210
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 200
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 290
+0 1 0 1 1 1 1 0 0 0 1 0 0 0 0 1 0 120
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 1 0 300
+0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 1 0 240
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 300
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 320
+1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 270
+1 0 0 1 1 1 0 0 0 0 1 1 0 0 0 0 0 210
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 120
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 40
+1 1 0 0 0 1 0 0 0 0 1 0 0 0 1 0 0 80
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 260
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 200
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 130
+1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 150
+0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 170
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 1 0 150
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 150
+0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 250
+0 0 0 1 0 1 0 1 0 0 1 0 0 0 0 0 0 120
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 0 180
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 10
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 160
+0 1 0 1 0 1 1 0 0 0 1 0 0 0 0 1 0 280
+0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 0 210
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 260
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 170
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 80
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 60
+0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 100
+1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 160
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 90
+1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 150
+1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 130
+0 1 0 0 0 0 0 1 1 0 1 1 0 0 0 0 0 200
+0 1 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 100
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 100
+1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 60
+0 1 0 1 1 0 0 0 0 0 0 0 0 0 1 1 0 20
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 1 0 300
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 300
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 300
+1 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 450
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 390
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 400
+0 1 0 1 0 0 0 0 1 0 1 0 0 1 0 0 0 400
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 1 0 300
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 160
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 230
+0 1 0 1 1 0 0 0 1 0 0 0 0 0 0 1 0 200
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 280
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 260
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 300
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 400
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 500
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 400
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 500
+0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 600
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 620
+0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 600
+0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 600
+0 1 0 1 1 0 0 0 1 0 1 0 0 0 0 0 1 20
+0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 100
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 1 1 0 300
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 500
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 660
+0 0 0 0 0 1 1 0 1 0 1 0 0 0 0 0 0 360
+0 1 0 1 1 0 0 1 1 0 1 0 0 0 0 0 0 360
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 100
+1 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 230
+0 1 0 0 0 0 0 0 1 0 0 0 0 1 1 0 0 300
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 220
+0 1 0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 280
+0 0 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 230
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 220
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 135
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 280
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 140
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 150
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 100
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 600
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 600
+0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 480
+0 1 0 1 1 1 0 0 1 0 1 0 0 1 0 0 0 300
+0 1 0 1 0 1 0 0 0 0 1 0 0 0 0 0 0 200
+0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 245
+0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 245
+0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 260
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 280
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 260
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 320
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 300
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 1 1 0 180
+0 1 0 0 1 1 0 1 0 0 1 0 0 0 0 0 0 220
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 0 0 170
+0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 0 200
+0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 180
+0 1 0 1 0 1 0 1 0 0 1 0 0 0 0 0 0 150
+0 1 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 160
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 210
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 300
+0 1 0 1 0 1 0 0 1 0 1 0 0 0 0 0 0 320
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 70
+0 0 0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 190
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 370
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 420
+0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 400
+0 0 0 0 1 1 0 0 0 0 1 0 0 1 0 0 0 340
+0 1 0 1 0 1 0 0 0 0 1 0 0 0 0 0 0 380
+0 1 0 1 1 0 0 1 1 0 1 0 0 0 0 0 0 440
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 500
+0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 500
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 180
+0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 300
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 400
+0 1 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 200
+0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 110
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 180
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 300
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 300
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 320
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 130
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 110
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 90
+0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 120
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 300
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 180
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 190
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 240
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 20
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 20
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 8
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 15
+0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 0 1 20
+0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 20
+0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 8
+0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 1 20
+0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1 10
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 20
+0 1 1 0 0 0 0 0 0 0 0 1 0 1 0 0 1 35
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 8
+0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 20
+0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 20
+0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 45
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 5
+0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 10
+0 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 40
diff --git a/data/lirat.txt b/data/lirat.txt
new file mode 100644
index 0000000..d27b396
--- /dev/null
+++ b/data/lirat.txt
@@ -0,0 +1,59 @@
+ N R hb grp
+10 1 4.1 1
+11 4 3.2 1
+12 9 4.7 1
+ 4 4 3.5 1
+10 10 3.2 1
+11 9 5.9 1
+ 9 9 4.7 1
+11 11 4.7 1
+10 10 3.5 1
+10 7 4.8 1
+12 12 4.3 1
+10 9 4.1 1
+ 8 8 3.2 1
+11 9 6.3 1
+ 6 4 4.3 1
+ 9 7 3.1 1
+14 14 3.6 1
+12 7 4.1 1
+11 9 4.8 1
+13 8 4.7 1
+14 5 4.8 1
+10 10 6.7 1
+12 10 5.2 1
+13 8 4.3 1
+10 10 3.9 1
+14 3 6.3 1
+13 13 4.4 1
+ 4 3 5.2 1
+ 8 8 3.9 1
+13 5 7.7 1
+12 12 5.0 1
+10 1 8.6 2
+ 3 1 11.1 2
+13 1 7.2 2
+12 0 8.8 2
+14 4 9.3 2
+ 9 2 9.3 2
+13 2 8.5 2
+16 1 9.4 2
+11 0 6.9 2
+ 4 0 8.9 2
+ 1 0 11.1 2
+12 0 9.0 2
+ 8 0 11.2 3
+11 1 11.5 3
+14 0 12.6 3
+14 1 9.5 3
+11 0 9.8 3
+ 3 0 16.6 4
+13 0 14.5 4
+ 9 2 15.4 4
+17 2 14.5 4
+15 0 14.6 4
+ 2 0 16.5 4
+14 1 14.8 4
+ 8 0 13.6 4
+ 6 0 14.5 4
+17 0 12.4 4
diff --git a/data/nzc.txt b/data/nzc.txt
new file mode 100644
index 0000000..e8b94c2
--- /dev/null
+++ b/data/nzc.txt
@@ -0,0 +1,27 @@
+year male female nz
+1867 1213 6 217416
+1871 2637 4 254948
+1878 4814 2 344985
+1881 4995 9 458007
+1886 4527 15 620451
+1891 4426 18 668652
+1896 3773 86 743214
+1901 2885 78 815862
+1906 2515 55 936309
+1911 2542 88 1058313
+1916 2017 130 1149225
+1921 2993 273 1271667
+1926 2927 447 1408140
+1936 2432 511 1573812
+1945 3414 1526 1702329
+1951 3633 2090 1939473
+1956 4026 2705 2174061
+1961 4913 3611 2414985
+1966 5700 4583 2676918
+1971 7025 4583 2862630
+1976 8081 6779 3129384
+1981 8649 8004 3143307
+1986 9903 9600 3263283
+1991 18750 18939 3373926
+1996 39624 42696 3618303
+2001 50460 55020 3737277
diff --git a/data/oxtemp.txt b/data/oxtemp.txt
new file mode 100644
index 0000000..bc4572e
--- /dev/null
+++ b/data/oxtemp.txt
@@ -0,0 +1,81 @@
+maxtemp year
+89 1901
+84 1902
+84 1903
+85 1904
+81 1905
+92 1906
+79 1907
+84 1908
+85 1909
+79 1910
+95 1911
+87 1912
+83 1913
+87 1914
+82 1915
+83 1916
+89 1917
+84 1918
+86 1919
+79 1920
+89 1921
+86 1922
+93 1923
+86 1924
+85 1925
+85 1926
+80 1927
+87 1928
+87 1929
+89 1930
+75 1931
+95 1932
+89 1933
+86 1934
+86 1935
+83 1936
+87 1937
+84 1938
+84 1939
+85 1940
+89 1941
+88 1942
+92 1943
+87 1944
+84 1945
+84 1946
+90 1947
+90 1948
+89 1949
+86 1950
+82 1951
+89 1952
+90 1953
+83 1954
+87 1955
+82 1956
+86 1957
+80 1958
+91 1959
+82 1960
+87 1961
+77 1962
+81 1963
+87 1964
+81 1965
+80 1966
+83 1967
+89 1968
+88 1969
+88 1970
+84 1971
+77 1972
+85 1973
+77 1974
+91 1975
+94 1976
+80 1977
+80 1978
+85 1979
+83 1980
diff --git a/data/pneumo.R b/data/pneumo.R
new file mode 100644
index 0000000..c61a1f0
--- /dev/null
+++ b/data/pneumo.R
@@ -0,0 +1,6 @@
+"pneumo" <-
+structure(list(exposure.time = c(5.8, 15, 21.5, 27.5, 33.5, 39.5,
+46, 51.5), normal = c(98, 51, 34, 35, 32, 23, 12, 4), mild = c(0,
+2, 6, 5, 10, 7, 6, 2), severe = c(0, 1, 3, 8, 9, 8, 10, 5)), .Names = c("exposure.time",
+"normal", "mild", "severe"), class = "data.frame", row.names = c("1",
+"2", "3", "4", "5", "6", "7", "8"))
diff --git a/data/rainfall.R b/data/rainfall.R
new file mode 100644
index 0000000..7d3cf26
--- /dev/null
+++ b/data/rainfall.R
@@ -0,0 +1,1000 @@
+"rainfall" <- c(0, 23, 13, 69, 46, 0, 10, 15, 18, 18, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 33, 28, 30,
+ 0, 81, 15, 41, 0, 0, 0, 0, 0, 48, 318, 0, 15, 254, 51, 150,
+ 168, 163, 0, 0, 117, 23, 20, 109, 81, 23, 15, 0, 0, 0, 30, 18,
+ 25, 30, 66, 20, 84, 74, 119, 325, 107, 25, 183, 51, 135, 109,
+ 81, 8, 127, 3, 157, 185, 23, 18, 58, 20, 71, 23, 0, 107, 69,
+ 48, 0, 38, 38, 58, 84, 76, 30, 36, 36, 48, 147, 5, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 142, 56,
+ 25, 18, 64, 8, 20, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13,
+ 0, 0, 0, 0, 23, 51, 0, 13, 46, 0, 0, 0, 0, 3, 8, 64, 170, 5,
+ 51, 0, 0, 5, 0, 0, 0, 0, 0, 0, 155, 13, 0, 0, 0, 0, 0, 0, 0, 0,
+ 30, 38, 0, 178, 0, 130, 81, 0, 0, 0, 0, 0, 51, 0, 23, 13, 5, 3,
+ 279, 0, 0, 0, 46, 20, 30, 79, 10, 0, 46, 0, 203, 147, 10, 76,
+ 36, 0, 33, 71, 41, 25, 0, 0, 0, 0, 241, 43, 0, 0, 0, 0, 41, 0,
+ 5, 0, 36, 175, 10, 0, 0, 0, 0, 0, 0, 0, 0, 56, 0, 0, 74, 0,
+ 117, 76, 10, 66, 114, 10, 36, 66, 13, 76, 18, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 13, 8,
+ 0, 0, 0, 0, 0, 81, 150, 20, 3, 119, 13, 51, 102, 18, 33, 147,
+ 130, 0, 0, 94, 0, 25, 0, 0, 0, 0, 20, 33, 114, 132, 79, 0, 0,
+ 0, 46, 0, 0, 0, 0, 25, 30, 89, 64, 64, 229, 157, 0, 168, 38,
+ 221, 51, 119, 69, 0, 0, 0, 119, 23, 43, 46, 183, 0, 318, 81,
+ 15, 58, 30, 0, 0, 0, 114, 213, 117, 203, 13, 445, 140, 150, 69,
+ 13, 10, 18, 229, 81, 51, 10, 84, 38, 43, 8, 48, 28, 15, 8, 5,
+ 10, 157, 86, 25, 58, 0, 0, 0, 0, 0, 0, 66, 18, 76, 137, 117,
+ 206, 287, 64, 203, 135, 51, 168, 0, 102, 305, 0, 0, 432, 43,
+ 66, 10, 0, 30, 36, 13, 0, 0, 91, 20, 18, 0, 25, 38, 0, 3, 0, 5,
+ 0, 0, 20, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8, 23, 0, 51, 112, 23, 0, 51, 20, 13, 0, 0, 0,
+ 0, 0, 0, 0, 5, 61, 0, 46, 0, 0, 0, 0, 0, 0, 0, 0, 25, 0, 56, 8,
+ 0, 0, 0, 0, 0, 0, 0, 152, 211, 0, 74, 76, 0, 0, 112, 0, 64, 0,
+ 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 41, 53, 41, 0, 36, 43, 127, 76, 66, 229,
+ 36, 0, 0, 28, 0, 0, 152, 155, 13, 0, 3, 10, 0, 0, 51, 56, 259,
+ 0, 0, 41, 0, 25, 76, 122, 28, 71, 51, 36, 0, 0, 5, 48, 198,
+ 142, 3, 0, 51, 13, 0, 18, 0, 69, 51, 218, 102, 25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 3, 0, 8, 216, 20, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 13, 20, 18, 0, 0, 0, 0, 0, 0, 0, 25, 30, 0, 18,
+ 0, 69, 33, 28, 58, 0, 0, 0, 0, 0, 38, 0, 0, 28, 0, 48, 0, 15,
+ 0, 5, 0, 0, 0, 43, 13, 8, 193, 84, 0, 0, 165, 84, 0, 127, 76,
+ 15, 0, 0, 0, 0, 0, 8, 13, 188, 41, 218, 94, 64, 0, 36, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 145, 79, 102, 135, 76, 262,
+ 33, 76, 224, 203, 30, 41, 13, 41, 356, 36, 25, 0, 0, 0, 0, 64,
+ 114, 229, 168, 142, 185, 0, 0, 43, 191, 102, 71, 89, 5, 74, 5,
+ 13, 41, 0, 0, 0, 0, 25, 0, 18, 5, 51, 0, 8, 64, 36, 25, 0, 18,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 381, 53, 13, 56, 76, 64, 69,
+ 201, 0, 10, 94, 130, 91, 38, 51, 86, 25, 76, 0, 0, 5, 127, 25,
+ 18, 8, 15, 36, 76, 20, 30, 23, 53, 15, 36, 8, 18, 0, 0, 0, 0,
+ 0, 0, 0, 0, 30, 0, 66, 28, 5, 0, 13, 69, 36, 277, 0, 5, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 28, 23, 0, 0, 41, 38, 25, 64, 8,
+ 0, 0, 0, 5, 66, 0, 0, 0, 0, 0, 0, 8, 15, 58, 28, 3, 43, 51, 46,
+ 8, 3, 3, 38, 0, 28, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 25, 104, 38, 46, 25, 0, 64, 0, 25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 38, 38, 0, 0, 61, 0, 0, 127, 5, 64, 0, 51,
+ 43, 0, 157, 23, 0, 3, 0, 0, 41, 3, 0, 0, 10, 0, 0, 0, 0, 0, 0,
+ 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 114, 43, 51, 152, 3, 0, 0, 0, 0, 0, 89, 38, 213, 66, 0, 3, 320,
+ 0, 23, 0, 64, 5, 30, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 102,
+ 69, 5, 0, 0, 0, 0, 0, 0, 43, 91, 61, 84, 0, 41, 51, 178, 20,
+ 25, 76, 8, 0, 76, 0, 0, 51, 18, 25, 3, 25, 66, 23, 0, 0, 0,
+ 229, 56, 127, 38, 191, 173, 127, 66, 97, 38, 38, 76, 127, 318,
+ 130, 229, 274, 43, 41, 5, 0, 0, 0, 0, 0, 0, 191, 5, 127, 41,
+ 56, 3, 76, 5, 86, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 51,
+ 165, 69, 0, 0, 8, 0, 0, 0, 0, 0, 0, 147, 56, 102, 51, 64, 0, 0,
+ 0, 165, 127, 0, 23, 5, 5, 25, 0, 64, 0, 155, 5, 0, 64, 0, 117,
+ 58, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 130, 66, 0,
+ 0, 25, 3, 13, 0, 0, 23, 0, 0, 0, 114, 107, 76, 0, 23, 5, 112,
+ 64, 0, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 66, 51, 66, 0, 130, 0, 0, 41, 25, 89, 64, 3, 5, 30, 0, 0,
+ 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 127, 0, 0, 0, 0, 0, 0, 0, 76, 0, 0, 127, 0, 183,
+ 0, 38, 0, 0, 102, 53, 25, 0, 0, 69, 76, 0, 41, 0, 0, 0, 0, 3,
+ 0, 0, 25, 0, 3, 38, 36, 0, 0, 0, 38, 0, 25, 64, 330, 64, 0, 0,
+ 0, 0, 0, 0, 3, 0, 51, 8, 0, 0, 0, 0, 0, 0, 51, 74, 76, 0, 0, 0,
+ 0, 0, 8, 3, 0, 0, 0, 0, 8, 0, 43, 0, 0, 0, 5, 91, 3, 109, 0,
+ 76, 155, 43, 165, 127, 10, 3, 48, 0, 0, 8, 0, 165, 264, 8, 97,
+ 107, 391, 173, 0, 64, 0, 25, 0, 0, 0, 0, 91, 0, 25, 0, 0, 0, 0,
+ 94, 28, 0, 0, 114, 165, 5, 25, 0, 0, 0, 0, 5, 30, 0, 0, 0, 0,
+ 0, 0, 89, 114, 48, 5, 76, 137, 127, 10, 102, 135, 102, 76, 114,
+ 66, 64, 3, 0, 0, 3, 51, 64, 109, 64, 8, 28, 3, 224, 5, 51, 3,
+ 3, 163, 0, 43, 66, 38, 74, 91, 15, 76, 0, 0, 3, 0, 0, 0, 0, 0,
+ 38, 69, 0, 23, 25, 3, 23, 5, 0, 0, 0, 0, 0, 0, 0, 0, 5, 25, 94,
+ 0, 0, 0, 0, 23, 3, 66, 152, 0, 0, 0, 0, 51, 0, 0, 0, 0, 0, 0,
+ 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 69, 0, 0, 38, 64, 0, 8, 89, 305,
+ 130, 102, 318, 66, 71, 81, 0, 38, 10, 0, 5, 0, 0, 0, 0, 0, 64,
+ 152, 89, 3, 0, 71, 69, 8, 0, 0, 25, 25, 3, 0, 0, 0, 0, 0, 15,
+ 38, 0, 0, 0, 48, 0, 66, 81, 64, 0, 0, 0, 0, 51, 51, 0, 0, 0, 0,
+ 64, 0, 0, 0, 0, 0, 0, 48, 25, 0, 0, 0, 0, 0, 0, 0, 30, 76, 38,
+ 127, 124, 41, 3, 25, 0, 25, 89, 5, 0, 0, 38, 18, 0, 0, 0, 0,
+ 165, 25, 0, 5, 48, 0, 0, 0, 0, 0, 76, 0, 0, 0, 64, 0, 0, 3, 0,
+ 48, 0, 0, 0, 0, 0, 0, 56, 66, 0, 0, 0, 3, 0, 0, 0, 0, 76, 0, 0,
+ 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 0, 89, 0, 0, 0, 0,
+ 25, 28, 0, 0, 201, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 89, 76, 38, 51, 25, 5, 127, 43, 25, 38, 74,
+ 38, 36, 254, 38, 0, 51, 94, 0, 0, 0, 0, 0, 117, 0, 0, 51, 61,
+ 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 99,
+ 132, 0, 48, 0, 0, 38, 0, 0, 0, 0, 43, 218, 0, 102, 56, 117, 0,
+ 130, 38, 86, 76, 41, 114, 36, 74, 51, 76, 137, 89, 33, 51, 0,
+ 20, 76, 0, 203, 8, 0, 0, 43, 0, 66, 53, 64, 127, 10, 43, 8, 0,
+ 25, 28, 127, 0, 0, 64, 0, 0, 5, 48, 0, 0, 23, 0, 0, 10, 0, 25,
+ 23, 323, 23, 74, 229, 3, 0, 155, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 51, 89, 51, 0, 84, 76, 30, 0, 0, 71, 5, 203, 51,
+ 76, 157, 127, 102, 51, 51, 64, 127, 33, 74, 91, 89, 0, 89, 84,
+ 0, 53, 74, 5, 25, 38, 0, 0, 0, 25, 0, 203, 79, 25, 102, 48,
+ 203, 102, 25, 58, 23, 3, 76, 69, 119, 48, 38, 0, 38, 119, 178,
+ 66, 0, 0, 0, 63, 43, 63, 0, 0, 0, 0, 0, 0, 0, 69, 71, 84, 0, 0,
+ 0, 0, 0, 0, 0, 38, 0, 198, 119, 38, 79, 0, 229, 0, 0, 0, 8, 0,
+ 0, 0, 61, 3, 76, 132, 165, 63, 74, 53, 10, 130, 190, 20, 28, 0,
+ 0, 0, 0, 165, 66, 3, 0, 0, 0, 0, 0, 25, 97, 13, 25, 97, 28, 51,
+ 0, 0, 0, 0, 0, 43, 23, 114, 25, 51, 23, 112, 152, 15, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 89, 38, 46, 25, 41, 33, 15, 18, 5, 0, 0,
+ 0, 0, 13, 28, 86, 0, 0, 142, 0, 8, 0, 0, 0, 13, 25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 5, 0, 0, 0, 5, 25, 0, 0,
+ 0, 13, 0, 0, 28, 43, 3, 0, 0, 69, 8, 3, 0, 0, 0, 30, 23, 0, 0,
+ 69, 0, 0, 0, 0, 0, 0, 36, 0, 20, 20, 0, 0, 0, 46, 109, 0, 51,
+ 48, 0, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 53, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 18, 13, 30, 155, 36, 3, 0, 63, 0, 198, 36, 203,
+ 0, 69, 104, 0, 43, 13, 41, 170, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 5, 97, 46, 25, 81, 0, 13, 0, 41, 0, 0, 0, 0, 81, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 46, 51, 33, 33, 0, 0, 0, 0, 0, 0, 0,
+ 114, 63, 13, 0, 0, 18, 25, 0, 0, 0, 0, 5, 0, 0, 0, 0, 122, 147,
+ 0, 102, 0, 0, 0, 0, 132, 119, 99, 36, 0, 89, 0, 18, 51, 97, 23,
+ 0, 76, 0, 165, 254, 25, 36, 38, 25, 76, 41, 0, 25, 213, 140, 0,
+ 79, 84, 198, 0, 8, 0, 46, 18, 25, 38, 163, 104, 36, 163, 30,
+ 130, 30, 213, 43, 269, 41, 33, 18, 41, 89, 66, 20, 13, 46, 23,
+ 127, 56, 3, 20, 15, 38, 56, 0, 18, 13, 20, 36, 23, 25, 102, 30,
+ 28, 38, 58, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 91, 48, 0, 0, 0, 25, 0, 38, 41, 13, 5, 0,
+ 13, 0, 53, 0, 191, 330, 97, 89, 94, 41, 43, 38, 10, 18, 3, 0,
+ 0, 0, 23, 28, 20, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 41, 0, 51, 13, 64, 0, 0, 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 18, 38, 56, 127, 122, 79, 3, 8, 3, 0, 89, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 10, 18, 86, 28, 25, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 10, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 23, 0, 0, 3,
+ 0, 0, 13, 56, 15, 18, 38, 0, 211, 0, 0, 13, 114, 10, 13, 41,
+ 203, 0, 13, 0, 0, 69, 71, 102, 38, 0, 0, 0, 0, 74, 0, 0, 0, 61,
+ 0, 0, 5, 0, 28, 8, 15, 0, 28, 109, 0, 0, 0, 0, 0, 0, 0, 30, 18,
+ 109, 25, 25, 13, 0, 0, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 18, 25, 41, 0, 0, 0, 0, 0, 0, 64, 0, 0, 20, 0, 0, 0, 0, 130,
+ 5, 8, 13, 38, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 94, 56, 15, 43,
+ 5, 46, 0, 0, 0, 0, 0, 284, 76, 43, 74, 0, 0, 0, 0, 48, 0, 0, 0,
+ 0, 33, 0, 0, 13, 178, 20, 0, 0, 0, 0, 0, 13, 15, 0, 23, 0, 0,
+ 0, 112, 0, 0, 13, 13, 38, 25, 74, 79, 5, 76, 61, 130, 33, 46,
+ 25, 66, 0, 0, 23, 46, 64, 28, 46, 33, 0, 58, 0, 0, 0, 168, 33,
+ 218, 0, 0, 246, 28, 147, 8, 0, 0, 0, 13, 0, 122, 15, 137, 71,
+ 145, 79, 117, 124, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 89,
+ 33, 28, 61, 76, 41, 97, 0, 76, 51, 191, 208, 135, 48, 79, 66,
+ 76, 71, 165, 20, 0, 0, 0, 0, 0, 109, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 112, 56, 0, 23, 28, 25, 20, 114, 0, 81, 94, 15, 38, 10, 28, 0,
+ 0, 0, 0, 130, 213, 86, 132, 91, 0, 0, 0, 0, 0, 0, 46, 38, 41,
+ 25, 13, 94, 38, 0, 0, 48, 147, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 142, 0, 28, 0, 0, 0, 18, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 10, 20, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0,
+ 48, 0, 79, 79, 0, 0, 74, 23, 13, 196, 15, 76, 20, 216, 38, 130,
+ 124, 10, 0, 0, 18, 25, 38, 25, 0, 28, 0, 8, 15, 33, 0, 0, 0, 0,
+ 89, 0, 0, 25, 0, 36, 0, 13, 46, 23, 38, 64, 112, 119, 3, 165,
+ 13, 112, 0, 0, 0, 48, 0, 0, 0, 0, 13, 66, 0, 25, 0, 18, 38, 18,
+ 0, 117, 58, 86, 0, 33, 0, 0, 0, 0, 0, 0, 0, 0, 305, 33, 23, 0,
+ 15, 0, 0, 13, 0, 0, 0, 0, 0, 160, 152, 0, 0, 0, 61, 58, 0, 0,
+ 163, 58, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 104, 25, 165, 23, 36, 0, 28, 165, 221, 102, 28,
+ 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 86, 0, 0, 0, 0, 0, 43, 20, 0, 0, 0, 0, 0, 0, 46, 53, 89, 43,
+ 325, 13, 300, 13, 25, 51, 89, 25, 94, 97, 239, 61, 15, 41, 51,
+ 8, 13, 229, 114, 28, 25, 71, 64, 3, 0, 8, 13, 13, 0, 5, 0, 0,
+ 20, 28, 0, 0, 8, 0, 10, 13, 18, 53, 69, 36, 25, 43, 0, 0, 0,
+ 485, 353, 18, 406, 157, 38, 61, 107, 64, 81, 208, 130, 51, 0,
+ 33, 79, 196, 160, 71, 165, 28, 51, 51, 114, 89, 10, 0, 15, 33,
+ 23, 0, 0, 0, 0, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 0, 0,
+ 51, 132, 30, 0, 0, 0, 23, 0, 0, 56, 20, 0, 0, 20, 0, 183, 15,
+ 0, 127, 119, 107, 0, 15, 25, 0, 0, 0, 38, 0, 0, 102, 51, 0, 74,
+ 0, 0, 0, 28, 0, 0, 36, 0, 0, 0, 25, 76, 61, 132, 51, 69, 0, 66,
+ 0, 0, 0, 41, 0, 0, 28, 0, 51, 0, 10, 0, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 20, 0, 0, 0, 0, 0, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 64, 0,
+ 0, 15, 0, 51, 0, 0, 0, 3, 0, 5, 25, 18, 0, 0, 18, 0, 191, 0, 0,
+ 0, 61, 0, 0, 0, 0, 0, 10, 8, 13, 3, 0, 0, 8, 53, 0, 30, 8, 0,
+ 0, 178, 8, 18, 0, 81, 10, 264, 20, 36, 41, 0, 94, 3, 0, 0, 10,
+ 0, 0, 0, 0, 0, 15, 127, 38, 8, 272, 23, 58, 66, 3, 109, 99, 3,
+ 185, 10, 38, 0, 8, 0, 64, 81, 38, 0, 5, 8, 173, 25, 5, 76, 292,
+ 124, 64, 25, 0, 0, 25, 76, 25, 76, 127, 76, 33, 43, 89, 305,
+ 58, 3, 58, 64, 0, 0, 0, 41, 38, 114, 66, 3, 76, 56, 0, 0, 0,
+ 241, 25, 127, 64, 89, 25, 38, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 267, 132, 114, 0, 0, 64, 114, 25, 0, 23, 25, 41, 0, 0, 0, 5,
+ 13, 23, 0, 18, 23, 25, 66, 114, 127, 208, 51, 0, 43, 51, 0, 0,
+ 76, 71, 13, 0, 15, 28, 157, 94, 33, 114, 157, 61, 25, 3, 41,
+ 76, 132, 51, 25, 127, 130, 3, 132, 0, 0, 0, 0, 25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 91, 0, 0, 114, 89, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 38, 0, 0, 3, 5, 25, 0, 0, 69, 51, 43, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 25, 241, 114, 36, 0, 30, 0, 8, 66, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, 114, 64, 0, 343, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 119, 53, 89, 244, 0, 89, 0, 0, 30, 0, 8, 38, 5,
+ 0, 38, 30, 193, 89, 48, 3, 0, 0, 0, 0, 46, 8, 25, 102, 25, 102,
+ 102, 0, 0, 0, 0, 51, 25, 48, 0, 0, 0, 51, 8, 0, 38, 0, 51, 76,
+ 292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 76, 56, 51, 135, 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 191, 0,
+ 0, 292, 3, 0, 0, 0, 56, 0, 173, 132, 15, 0, 0, 64, 0, 114, 41,
+ 0, 41, 0, 0, 0, 0, 0, 127, 0, 46, 51, 117, 71, 163, 30, 25,
+ 132, 127, 51, 0, 25, 0, 0, 64, 81, 38, 46, 0, 0, 0, 0, 0, 25,
+ 76, 51, 66, 51, 25, 51, 229, 0, 23, 3, 25, 0, 66, 127, 8, 81,
+ 66, 160, 91, 5, 0, 0, 89, 33, 10, 5, 15, 0, 66, 79, 328, 23, 0,
+ 25, 0, 0, 0, 0, 0, 0, 0, 69, 38, 81, 51, 0, 0, 0, 48, 84, 135,
+ 76, 226, 196, 175, 274, 0, 0, 0, 0, 0, 0, 0, 38, 5, 74, 0, 0,
+ 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 114, 102, 25, 137, 279, 89,
+ 89, 5, 8, 140, 25, 0, 0, 0, 0, 0, 0, 0, 97, 114, 132, 0, 3, 5,
+ 0, 0, 0, 140, 102, 89, 140, 216, 94, 33, 241, 76, 46, 251, 89,
+ 20, 30, 18, 0, 0, 66, 0, 0, 0, 0, 8, 69, 15, 10, 0, 0, 0, 0, 0,
+ 64, 5, 76, 229, 38, 61, 86, 5, 0, 64, 0, 0, 13, 0, 114, 25,
+ 140, 23, 10, 61, 305, 170, 10, 51, 53, 76, 5, 18, 5, 8, 102,
+ 185, 76, 104, 191, 89, 0, 8, 0, 0, 0, 0, 0, 0, 5, 0, 13, 36, 0,
+ 0, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 25, 51, 0, 0, 0, 0, 0, 0,
+ 38, 8, 0, 203, 119, 36, 0, 5, 64, 30, 0, 175, 28, 119, 15, 0,
+ 0, 84, 0, 0, 0, 46, 23, 0, 8, 13, 5, 33, 25, 30, 0, 0, 69, 99,
+ 117, 203, 25, 13, 28, 0, 0, 0, 0, 0, 0, 56, 36, 25, 0, 0, 0,
+ 114, 25, 76, 41, 191, 46, 48, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 64, 0, 0, 53, 0, 0, 94, 23, 5, 0, 0, 0, 0, 0, 0, 30, 109,
+ 0, 0, 152, 122, 33, 0, 61, 241, 122, 51, 3, 152, 25, 8, 0, 0,
+ 0, 69, 5, 25, 170, 0, 198, 76, 191, 0, 0, 0, 0, 0, 0, 0, 25,
+ 127, 38, 0, 0, 0, 0, 89, 0, 0, 0, 0, 51, 0, 0, 0, 91, 3, 0, 23,
+ 25, 0, 0, 0, 0, 0, 51, 0, 5, 46, 457, 94, 43, 254, 15, 0, 97,
+ 0, 0, 8, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84,
+ 0, 8, 89, 180, 56, 318, 71, 25, 140, 127, 25, 25, 102, 130, 5,
+ 335, 127, 5, 89, 53, 206, 94, 36, 41, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 99, 76, 69, 25, 0, 0, 0, 119, 58, 25, 0, 0, 0, 0, 0,
+ 0, 99, 130, 20, 5, 0, 5, 0, 64, 25, 0, 0, 0, 173, 25, 43, 74,
+ 38, 23, 51, 79, 64, 160, 188, 140, 23, 335, 150, 8, 66, 18, 66,
+ 0, 18, 0, 0, 0, 0, 5, 10, 127, 0, 117, 81, 0, 76, 64, 107, 102,
+ 53, 18, 81, 25, 43, 64, 51, 198, 51, 0, 114, 119, 137, 0, 0, 0,
+ 0, 0, 0, 0, 38, 76, 25, 64, 152, 25, 28, 23, 0, 0, 3, 8, 0, 0,
+ 0, 0, 0, 0, 53, 86, 38, 3, 5, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 94,
+ 0, 0, 0, 0, 0, 0, 132, 102, 48, 71, 117, 18, 0, 0, 30, 0, 0, 0,
+ 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 140, 56, 0, 58, 23, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 10, 15, 43, 69, 81, 0, 191,
+ 25, 0, 0, 0, 0, 71, 0, 165, 64, 183, 25, 94, 0, 0, 5, 89, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 127, 41, 0, 28,
+ 0, 5, 0, 0, 0, 0, 0, 0, 13, 28, 180, 0, 3, 0, 0, 114, 0, 25, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 30, 58, 114, 0, 5, 0, 5, 0, 0,
+ 0, 64, 0, 23, 89, 0, 25, 10, 0, 0, 0, 0, 0, 0, 30, 0, 53, 0, 0,
+ 0, 5, 0, 43, 0, 0, 33, 0, 5, 0, 43, 0, 0, 0, 56, 0, 0, 0, 0, 0,
+ 0, 58, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 5, 3, 0, 76,
+ 25, 117, 23, 41, 0, 0, 0, 0, 0, 0, 0, 183, 89, 0, 64, 71, 0, 0,
+ 0, 318, 0, 0, 163, 0, 79, 51, 348, 353, 229, 0, 86, 89, 10, 89,
+ 102, 132, 64, 81, 38, 249, 64, 0, 0, 53, 25, 28, 119, 46, 5, 3,
+ 81, 33, 30, 38, 5, 3, 0, 0, 0, 0, 0, 0, 38, 0, 0, 8, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 64, 30, 23, 41, 33, 76,
+ 0, 56, 20, 53, 191, 130, 102, 58, 74, 0, 0, 94, 36, 51, 0, 208,
+ 13, 157, 13, 378, 43, 64, 84, 86, 0, 0, 0, 30, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 114, 28, 99, 89, 114, 41, 79, 259, 51,
+ 140, 43, 8, 76, 66, 33, 56, 76, 25, 18, 41, 0, 0, 0, 0, 0, 3,
+ 5, 3, 0, 10, 13, 173, 127, 122, 102, 38, 28, 79, 46, 30, 203,
+ 0, 69, 10, 38, 114, 152, 33, 102, 10, 0, 0, 0, 0, 0, 13, 0, 0,
+ 0, 0, 0, 0, 20, 3, 8, 15, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 13, 0, 8, 0, 0, 18, 13, 104, 0, 0, 0, 33, 0, 0, 0, 0, 0, 0,
+ 30, 25, 33, 0, 0, 0, 0, 0, 0, 30, 0, 25, 0, 0, 0, 0, 0, 0, 0,
+ 0, 254, 28, 89, 0, 5, 25, 28, 64, 33, 56, 28, 0, 38, 33, 33,
+ 51, 0, 127, 284, 41, 69, 0, 0, 0, 0, 25, 0, 0, 767, 61, 0, 0,
+ 0, 3, 43, 8, 10, 5, 51, 58, 102, 259, 10, 48, 25, 0, 0, 0, 0,
+ 0, 0, 30, 89, 66, 38, 5, 25, 33, 69, 5, 81, 3, 173, 10, 53, 99,
+ 51, 0, 5, 28, 3, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 0, 208, 3, 48,
+ 112, 127, 23, 20, 51, 117, 155, 38, 43, 51, 0, 0, 152, 157,
+ 127, 61, 66, 69, 8, 79, 86, 25, 51, 8, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 69, 38, 3, 165, 160, 0, 46, 30, 15, 114,
+ 5, 76, 71, 5, 64, 0, 5, 46, 64, 89, 53, 0, 41, 46, 51, 0, 0,
+ 30, 5, 0, 157, 114, 99, 79, 64, 23, 66, 0, 0, 0, 23, 51, 0, 0,
+ 0, 0, 0, 0, 130, 203, 0, 36, 0, 0, 0, 0, 30, 66, 0, 0, 0, 0, 0,
+ 132, 323, 114, 81, 147, 254, 0, 0, 0, 0, 0, 0, 269, 30, 0, 30,
+ 20, 28, 0, 8, 15, 56, 0, 168, 0, 56, 64, 0, 0, 69, 0, 81, 272,
+ 0, 145, 81, 8, 86, 18, 340, 13, 61, 64, 36, 86, 23, 86, 30, 23,
+ 18, 36, 10, 79, 3, 279, 10, 36, 15, 43, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 33, 112, 0, 28, 38, 13, 5, 36, 8, 0, 0, 0, 0, 0, 0, 0,
+ 13, 69, 10, 61, 259, 208, 155, 86, 66, 3, 61, 36, 20, 79, 99,
+ 61, 13, 0, 41, 64, 18, 23, 0, 5, 81, 61, 46, 28, 30, 10, 18,
+ 18, 3, 0, 13, 0, 0, 0, 0, 0, 0, 0, 13, 18, 0, 0, 3, 0, 0, 0, 0,
+ 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 10, 13, 38, 0, 13,
+ 191, 0, 0, 5, 3, 0, 0, 5, 46, 0, 0, 0, 89, 0, 43, 25, 91, 5,
+ 18, 0, 0, 0, 76, 5, 0, 0, 53, 25, 0, 25, 5, 0, 0, 15, 43, 8,
+ 30, 3, 0, 0, 0, 43, 0, 0, 8, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 15, 8, 25, 114, 51, 36, 165, 114, 0, 0, 0, 0,
+ 0, 0, 38, 0, 0, 33, 135, 38, 36, 41, 0, 0, 0, 0, 30, 122, 43,
+ 5, 0, 38, 3, 117, 46, 43, 8, 0, 0, 0, 0, 0, 0, 41, 0, 102, 0,
+ 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 127,
+ 173, 0, 0, 0, 0, 0, 175, 0, 0, 71, 76, 91, 104, 64, 20, 0, 81,
+ 38, 79, 0, 86, 196, 28, 10, 188, 48, 38, 18, 213, 61, 3, 36,
+ 38, 18, 25, 10, 0, 0, 15, 15, 0, 0, 0, 89, 38, 28, 3, 150, 91,
+ 124, 38, 48, 94, 102, 109, 86, 130, 69, 71, 51, 30, 18, 33, 3,
+ 13, 3, 0, 8, 5, 30, 10, 13, 58, 338, 3, 0, 0, 0, 76, 130, 5,
+ 18, 36, 0, 3, 8, 20, 43, 99, 20, 282, 46, 66, 61, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 36, 3, 0, 0, 0, 5, 18, 0, 13, 13, 0, 0, 0, 119,
+ 0, 0, 0, 0, 0, 180, 0, 61, 0, 366, 20, 0, 0, 33, 28, 0, 33, 43,
+ 15, 0, 0, 0, 0, 8, 13, 0, 0, 0, 33, 0, 0, 5, 0, 15, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 84,
+ 0, 0, 20, 18, 0, 0, 0, 0, 0, 0, 13, 0, 0, 51, 13, 0, 0, 0, 0,
+ 0, 3, 3, 0, 43, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 76, 5, 8,
+ 0, 5, 0, 15, 180, 287, 8, 28, 10, 0, 13, 71, 0, 25, 226, 0, 0,
+ 0, 0, 0, 0, 0, 3, 58, 0, 0, 0, 36, 0, 58, 0, 0, 23, 8, 0, 0,
+ 109, 5, 0, 41, 0, 0, 0, 180, 71, 117, 64, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 58, 74, 0, 0, 38, 5, 112, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 20, 3, 0, 0, 0, 0, 305, 13, 18,
+ 99, 3, 0, 457, 117, 91, 109, 0, 43, 0, 0, 20, 0, 5, 0, 38, 61,
+ 51, 23, 0, 25, 25, 0, 3, 46, 0, 0, 0, 0, 0, 0, 36, 0, 0, 3, 0,
+ 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 5, 13, 0, 3, 0, 0, 0,
+ 0, 0, 0, 0, 46, 86, 137, 71, 5, 132, 866, 25, 213, 30, 0, 0,
+ 46, 3, 0, 0, 0, 0, 18, 122, 0, 127, 0, 18, 38, 211, 20, 0, 36,
+ 79, 56, 0, 0, 0, 23, 0, 0, 244, 23, 140, 0, 89, 25, 295, 48, 5,
+ 0, 127, 18, 0, 356, 478, 81, 124, 201, 91, 475, 74, 20, 282,
+ 175, 191, 23, 135, 109, 48, 343, 81, 229, 201, 132, 109, 114,
+ 10, 23, 25, 64, 0, 0, 0, 0, 46, 267, 5, 91, 175, 277, 36, 53,
+ 33, 231, 86, 109, 10, 43, 3, 46, 211, 53, 8, 109, 3, 91, 163,
+ 94, 76, 20, 188, 8, 0, 0, 0, 0, 0, 0, 239, 10, 112, 13, 114, 0,
+ 23, 46, 20, 300, 64, 0, 119, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
+ 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, 33, 0, 0, 0, 0, 0, 18, 0, 94,
+ 0, 0, 165, 58, 46, 10, 23, 79, 13, 0, 23, 25, 5, 5, 10, 48, 69,
+ 0, 0, 5, 20, 28, 10, 137, 3, 206, 18, 104, 53, 0, 0, 0, 61,
+ 160, 8, 28, 10, 86, 5, 18, 0, 5, 0, 33, 10, 0, 10, 10, 8, 28,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 5, 13, 53, 51, 0, 132, 71, 5,
+ 0, 0, 41, 48, 15, 0, 0, 23, 0, 0, 51, 30, 0, 10, 15, 25, 8, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 13, 18, 0, 0, 23, 0, 0, 0, 0, 20, 20,
+ 28, 0, 74, 10, 5, 0, 18, 0, 0, 18, 3, 112, 33, 0, 18, 3, 18, 3,
+ 0, 0, 0, 0, 28, 23, 132, 13, 30, 20, 48, 109, 3, 0, 0, 0, 30,
+ 18, 66, 18, 69, 0, 3, 157, 20, 168, 28, 178, 117, 3, 3, 0, 18,
+ 3, 13, 36, 0, 18, 0, 41, 28, 18, 485, 10, 137, 3, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 15, 3, 46, 0, 84, 10, 155, 0, 15, 145, 3,
+ 33, 46, 254, 267, 175, 155, 15, 23, 25, 25, 41, 3, 0, 15, 0, 0,
+ 0, 0, 48, 86, 69, 58, 94, 25, 5, 3, 18, 0, 10, 119, 241, 18,
+ 25, 53, 84, 46, 56, 216, 13, 216, 18, 25, 38, 25, 10, 5, 122,
+ 94, 122, 0, 33, 122, 8, 114, 13, 10, 0, 0, 0, 0, 3, 61, 0, 0,
+ 76, 183, 185, 175, 30, 241, 41, 48, 86, 0, 122, 0, 0, 0, 25, 0,
+ 0, 0, 10, 38, 71, 104, 51, 226, 51, 38, 38, 208, 41, 20, 76,
+ 15, 0, 0, 0, 13, 13, 25, 18, 36, 130, 76, 107, 15, 0, 33, 91,
+ 10, 0, 0, 0, 0, 0, 18, 107, 13, 0, 23, 5, 13, 5, 13, 25, 8,
+ 107, 175, 74, 53, 25, 3, 41, 18, 13, 20, 234, 8, 8, 0, 0, 160,
+ 15, 23, 36, 61, 25, 109, 58, 43, 38, 30, 56, 10, 0, 36, 41, 28,
+ 18, 10, 25, 30, 41, 36, 56, 0, 0, 160, 5, 10, 0, 0, 0, 0, 0, 0,
+ 0, 10, 0, 0, 0, 0, 0, 51, 86, 5, 10, 0, 0, 0, 0, 0, 58, 5, 0,
+ 0, 61, 122, 0, 5, 86, 18, 0, 46, 0, 0, 0, 0, 0, 0, 3, 25, 10,
+ 25, 13, 0, 8, 0, 246, 160, 46, 53, 5, 8, 0, 0, 0, 0, 0, 0, 3,
+ 36, 69, 0, 0, 58, 0, 13, 30, 46, 23, 25, 0, 0, 0, 0, 46, 94,
+ 135, 3, 0, 0, 173, 0, 38, 71, 175, 0, 0, 0, 0, 112, 46, 53,
+ 107, 18, 33, 58, 0, 0, 53, 0, 269, 0, 48, 33, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 36, 0, 46, 74, 0, 46, 102, 0, 15, 51, 56, 206,
+ 51, 183, 23, 5, 86, 51, 13, 3, 0, 5, 0, 284, 25, 71, 28, 3, 3,
+ 18, 5, 0, 0, 0, 307, 94, 3, 56, 150, 15, 28, 5, 8, 15, 140,
+ 236, 38, 0, 239, 51, 94, 0, 0, 0, 30, 0, 0, 0, 8, 71, 0, 0,
+ 163, 119, 434, 25, 56, 3, 0, 0, 0, 5, 10, 30, 0, 28, 0, 23, 18,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 3, 5, 0, 5, 0, 155, 0,
+ 0, 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
+ 53, 13, 3, 0, 0, 74, 594, 0, 3, 15, 150, 8, 109, 112, 124, 0,
+ 5, 231, 0, 0, 3, 102, 99, 25, 13, 10, 191, 0, 36, 290, 48, 5,
+ 43, 0, 0, 84, 43, 8, 76, 107, 36, 15, 5, 13, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 5, 10, 0, 0, 86, 8, 10, 130, 38, 3, 10,
+ 25, 114, 157, 99, 0, 351, 170, 15, 239, 30, 64, 13, 114, 10, 5,
+ 10, 3, 5, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0,
+ 0, 0, 41, 0, 10, 10, 5, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 38, 3, 0, 0, 43, 58, 114, 28, 188, 97, 58, 30, 43, 0, 0, 0, 43,
+ 3, 5, 107, 94, 38, 3, 94, 15, 13, 36, 3, 10, 15, 8, 10, 23, 0,
+ 0, 18, 33, 99, 15, 76, 74, 0, 8, 0, 0, 3, 20, 0, 71, 28, 8, 94,
+ 157, 20, 64, 18, 0, 8, 180, 86, 183, 3, 10, 0, 0, 5, 3, 43, 38,
+ 51, 10, 5, 0, 0, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 152, 533, 30, 0, 15, 25, 30,
+ 64, 10, 3, 0, 0, 0, 0, 48, 5, 0, 38, 0, 0, 15, 5, 5, 23, 51,
+ 175, 79, 18, 8, 38, 5, 147, 0, 36, 5, 0, 15, 0, 0, 0, 0, 0, 0,
+ 0, 0, 30, 10, 0, 0, 0, 0, 0, 46, 259, 0, 0, 0, 0, 0, 0, 0, 119,
+ 10, 0, 10, 132, 135, 0, 38, 114, 188, 28, 0, 18, 0, 18, 3, 0,
+ 0, 0, 0, 216, 20, 0, 0, 112, 0, 28, 56, 0, 0, 0, 107, 193, 3,
+ 5, 38, 0, 0, 3, 335, 76, 3, 203, 25, 84, 234, 41, 43, 13, 13,
+ 3, 97, 175, 213, 107, 145, 36, 109, 51, 99, 10, 145, 33, 3, 5,
+ 5, 3, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 8, 97,
+ 20, 114, 137, 5, 38, 53, 10, 0, 66, 191, 3, 102, 10, 0, 0, 0,
+ 0, 0, 0, 0, 13, 0, 81, 18, 8, 0, 5, 0, 0, 20, 145, 33, 79, 0,
+ 3, 0, 48, 18, 231, 0, 188, 97, 198, 15, 51, 10, 0, 13, 25, 0,
+ 3, 28, 0, 18, 79, 109, 8, 53, 23, 51, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 30, 23, 76, 191, 5, 23, 5, 5, 140, 46, 3, 10, 43, 0, 0, 0,
+ 0, 0, 5, 33, 0, 5, 23, 20, 0, 244, 30, 56, 76, 84, 206, 94, 79,
+ 38, 0, 191, 13, 8, 0, 0, 0, 0, 0, 0, 0, 91, 236, 64, 61, 124,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 25, 36, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 61, 76, 28, 74, 0,
+ 58, 41, 86, 5, 43, 23, 38, 173, 114, 30, 18, 38, 0, 0, 3, 43,
+ 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 8, 8, 13, 0, 0, 0, 0, 13, 58,
+ 140, 10, 0, 0, 0, 0, 0, 0, 0, 0, 58, 33, 0, 0, 13, 30, 36, 91,
+ 89, 36, 15, 56, 3, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 41,
+ 30, 81, 20, 33, 58, 117, 112, 305, 8, 3, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 104, 20, 25, 0, 0, 0, 0, 0, 0, 0, 0, 15, 3, 0, 150, 0,
+ 25, 25, 5, 0, 3, 0, 0, 25, 56, 94, 3, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 41, 0, 89,
+ 185, 0, 117, 53, 13, 5, 0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 15, 5,
+ 0, 89, 302, 13, 0, 46, 0, 43, 0, 0, 3, 18, 0, 38, 0, 290, 20,
+ 0, 69, 99, 104, 8, 20, 28, 30, 25, 0, 0, 0, 0, 0, 0, 30, 48, 0,
+ 0, 41, 51, 25, 0, 0, 3, 119, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 218, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 231, 13, 18, 13, 0, 38, 30, 13, 38, 0, 38,
+ 18, 0, 0, 213, 104, 25, 142, 163, 97, 10, 89, 13, 0, 0, 0, 20,
+ 0, 0, 117, 5, 0, 0, 0, 5, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 0, 0, 20, 0,
+ 28, 28, 71, 0, 0, 79, 130, 15, 0, 0, 191, 41, 79, 28, 84, 30,
+ 0, 38, 18, 0, 13, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 51, 0,
+ 0, 0, 36, 41, 8, 165, 3, 0, 0, 5, 165, 0, 61, 0, 0, 69, 20, 94,
+ 10, 0, 13, 0, 0, 0, 0, 0, 130, 150, 203, 0, 28, 0, 0, 0, 0, 0,
+ 0, 38, 18, 3, 0, 18, 3, 18, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0,
+ 0, 0, 18, 13, 46, 0, 0, 0, 0, 0, 3, 0, 33, 0, 0, 3, 13, 0, 58,
+ 0, 94, 28, 206, 102, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 107, 0, 0, 0, 0, 0, 5, 0, 33, 0, 0, 0, 15, 0, 3, 0, 5, 15,
+ 5, 66, 218, 3, 76, 20, 409, 10, 13, 15, 0, 10, 66, 122, 0, 0,
+ 0, 0, 0, 0, 8, 0, 18, 18, 20, 0, 0, 0, 10, 155, 23, 122, 5, 0,
+ 234, 0, 0, 10, 15, 3, 15, 0, 0, 0, 0, 0, 0, 18, 122, 28, 0, 48,
+ 48, 0, 122, 61, 0, 3, 46, 10, 109, 10, 36, 0, 0, 53, 112, 64,
+ 36, 43, 0, 5, 0, 0, 0, 10, 18, 38, 15, 43, 5, 5, 0, 74, 0, 20,
+ 38, 180, 10, 28, 33, 18, 61, 58, 122, 20, 0, 0, 25, 48, 13, 79,
+ 41, 36, 0, 0, 0, 0, 61, 0, 0, 0, 0, 175, 10, 8, 0, 0, 0, 15,
+ 38, 5, 3, 13, 229, 10, 191, 81, 201, 130, 241, 427, 25, 66, 84,
+ 51, 48, 71, 79, 84, 79, 170, 74, 36, 10, 84, 43, 38, 157, 84,
+ 43, 300, 15, 66, 15, 61, 3, 8, 0, 0, 0, 0, 0, 0, 0, 66, 48, 25,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 64, 0, 0, 0, 0, 28, 0, 30,
+ 23, 3, 112, 64, 0, 0, 0, 0, 0, 0, 43, 38, 30, 15, 23, 0, 30,
+ 13, 104, 185, 10, 38, 130, 155, 145, 53, 56, 157, 30, 20, 0, 8,
+ 0, 0, 0, 0, 89, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 3, 41, 30, 5, 0,
+ 0, 0, 0, 0, 0, 0, 3, 0, 8, 28, 18, 114, 119, 76, 145, 53, 3, 0,
+ 71, 30, 157, 48, 28, 132, 28, 86, 8, 124, 13, 53, 0, 0, 0, 0,
+ 0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 198,
+ 36, 145, 10, 0, 0, 0, 8, 13, 0, 0, 0, 86, 0, 0, 53, 5, 30, 46,
+ 119, 157, 13, 0, 8, 23, 69, 81, 0, 71, 74, 10, 84, 3, 5, 3, 0,
+ 0, 0, 114, 112, 13, 0, 0, 0, 20, 10, 0, 3, 0, 0, 0, 0, 5, 0, 8,
+ 0, 0, 0, 0, 0, 5, 0, 0, 36, 10, 61, 0, 0, 0, 0, 0, 18, 25, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 0, 0, 0, 0, 10, 10,
+ 0, 0, 0, 0, 89, 61, 0, 76, 33, 15, 0, 163, 15, 122, 64, 145,
+ 10, 28, 0, 0, 0, 0, 0, 48, 10, 5, 160, 33, 104, 23, 79, 25, 0,
+ 48, 25, 69, 20, 0, 46, 30, 76, 81, 41, 28, 94, 86, 216, 833,
+ 18, 79, 38, 178, 13, 10, 3, 5, 3, 3, 20, 0, 23, 66, 8, 8, 30,
+ 53, 33, 0, 28, 23, 30, 5, 8, 137, 5, 33, 74, 46, 25, 10, 8, 81,
+ 206, 5, 157, 46, 107, 549, 56, 203, 3, 5, 43, 36, 0, 0, 0, 0,
+ 0, 25, 23, 18, 36, 201, 102, 38, 53, 89, 38, 56, 36, 18, 0, 0,
+ 0, 0, 0, 107, 33, 0, 0, 30, 0, 0, 0, 0, 43, 99, 38, 155, 76,
+ 13, 152, 198, 89, 10, 3, 25, 15, 99, 43, 66, 84, 94, 15, 79, 5,
+ 0, 5, 0, 3, 86, 20, 124, 53, 10, 79, 10, 58, 64, 137, 91, 183,
+ 20, 124, 46, 25, 3, 0, 13, 64, 0, 0, 0, 216, 137, 3, 0, 0, 3,
+ 10, 38, 239, 51, 13, 81, 0, 33, 23, 8, 0, 28, 51, 13, 36, 5, 0,
+ 36, 38, 25, 10, 46, 170, 56, 0, 0, 0, 0, 0, 0, 0, 0, 20, 43, 3,
+ 81, 3, 13, 28, 71, 20, 15, 246, 3, 0, 173, 79, 0, 132, 0, 0, 0,
+ 0, 0, 0, 0, 0, 25, 0, 0, 8, 5, 3, 0, 36, 18, 119, 0, 53, 15,
+ 53, 0, 13, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 5, 0,
+ 23, 43, 18, 0, 0, 0, 0, 0, 15, 30, 0, 0, 0, 0, 0, 28, 33, 15,
+ 99, 5, 18, 0, 8, 10, 0, 5, 0, 99, 8, 0, 13, 0, 0, 0, 0, 13, 15,
+ 0, 0, 0, 28, 15, 0, 0, 0, 592, 41, 269, 104, 10, 3, 178, 122,
+ 15, 213, 211, 48, 38, 86, 20, 229, 25, 28, 241, 119, 23, 13, 3,
+ 104, 25, 5, 23, 13, 41, 5, 0, 56, 81, 10, 3, 13, 0, 25, 8, 0,
+ 0, 0, 0, 38, 3, 5, 0, 3, 0, 5, 0, 51, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 155, 89, 107, 23, 124, 10, 0, 61, 3, 43, 28, 10,
+ 46, 10, 0, 0, 0, 0, 135, 3, 0, 0, 18, 18, 5, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 46, 0, 0, 18, 3, 8, 0, 8, 5,
+ 46, 107, 74, 20, 71, 5, 41, 10, 0, 109, 41, 30, 46, 5, 287,
+ 168, 203, 84, 38, 229, 51, 43, 5, 94, 38, 20, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 18, 0, 30, 8, 5, 0, 51, 114, 38, 8, 8, 48, 0, 0, 18,
+ 20, 318, 208, 267, 58, 231, 15, 8, 5, 3, 8, 5, 13, 0, 0, 0, 76,
+ 43, 3, 168, 89, 23, 0, 46, 117, 25, 0, 0, 25, 48, 69, 295, 0,
+ 0, 274, 155, 284, 18, 56, 300, 373, 3, 130, 160, 3, 3, 25, 5,
+ 5, 112, 38, 10, 282, 191, 157, 3, 66, 122, 43, 30, 10, 0, 79,
+ 74, 33, 81, 28, 33, 94, 10, 3, 211, 10, 84, 157, 325, 86, 99,
+ 109, 13, 0, 0, 0, 0, 56, 8, 0, 38, 201, 107, 36, 61, 20, 91,
+ 127, 137, 185, 56, 122, 0, 25, 13, 18, 183, 0, 0, 0, 0, 0, 0,
+ 56, 71, 28, 3, 10, 94, 76, 5, 66, 170, 48, 51, 25, 0, 170, 15,
+ 0, 0, 48, 53, 18, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 48, 0,
+ 18, 0, 119, 13, 0, 76, 152, 0, 0, 0, 0, 0, 0, 33, 76, 130, 163,
+ 64, 5, 3, 13, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 74, 51, 5, 20,
+ 28, 84, 25, 3, 0, 3, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 3,
+ 23, 5, 0, 48, 0, 168, 15, 0, 25, 10, 0, 193, 76, 0, 5, 0, 23,
+ 0, 0, 15, 0, 33, 36, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 58, 10, 0, 28, 218, 5, 0, 64, 3, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 0, 0, 0, 30, 0, 127, 0, 0, 170,
+ 76, 20, 137, 86, 71, 36, 61, 0, 48, 3, 0, 23, 0, 0, 0, 0, 0,
+ 10, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 66, 165, 86, 147, 56, 0, 137, 25, 137, 5, 64, 3, 0, 0, 0, 0,
+ 0, 13, 0, 0, 0, 0, 3, 0, 0, 0, 5, 23, 79, 33, 0, 43, 239, 0, 0,
+ 0, 0, 0, 0, 86, 157, 203, 5, 0, 183, 28, 0, 30, 0, 0, 114, 28,
+ 208, 53, 64, 48, 0, 0, 0, 30, 157, 18, 13, 5, 8, 13, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 8, 15, 130, 236, 127, 119, 41, 81, 23,
+ 239, 137, 86, 0, 112, 0, 18, 0, 15, 3, 0, 51, 135, 201, 58, 51,
+ 20, 163, 66, 8, 0, 0, 0, 0, 0, 25, 46, 0, 0, 8, 0, 0, 0, 8, 28,
+ 0, 0, 0, 0, 0, 0, 13, 193, 74, 38, 53, 0, 0, 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 25, 0, 64, 0, 15, 20, 0, 5,
+ 18, 8, 13, 0, 33, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 109, 168, 117, 61, 10, 58, 18, 0, 0, 0, 0, 0, 0, 41,
+ 206, 58, 15, 91, 3, 89, 229, 0, 18, 3, 30, 94, 0, 0, 5, 0, 8,
+ 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 53, 15, 0, 0, 0, 64, 58, 30, 3,
+ 5, 3, 23, 127, 38, 340, 104, 81, 0, 81, 36, 0, 0, 119, 0, 0,
+ 15, 10, 10, 0, 0, 0, 0, 5, 41, 13, 198, 10, 3, 10, 0, 142, 0,
+ 673, 46, 30, 0, 20, 8, 20, 38, 0, 8, 3, 0, 15, 20, 0, 38, 0,
+ 18, 48, 312, 8, 13, 0, 0, 241, 104, 0, 0, 0, 0, 0, 0, 0, 23,
+ 25, 51, 3, 0, 0, 0, 0, 0, 20, 0, 0, 43, 135, 38, 13, 61, 18, 5,
+ 8, 0, 0, 0, 25, 48, 41, 196, 94, 254, 99, 114, 48, 36, 142, 13,
+ 3, 5, 147, 28, 0, 48, 0, 48, 25, 0, 0, 0, 18, 0, 0, 48, 43, 30,
+ 36, 43, 0, 86, 18, 0, 53, 10, 3, 5, 8, 0, 3, 0, 218, 302, 10,
+ 0, 69, 13, 5, 147, 259, 86, 124, 94, 132, 66, 361, 15, 122, 41,
+ 64, 61, 43, 18, 10, 66, 18, 15, 53, 23, 368, 165, 178, 38, 226,
+ 0, 185, 10, 8, 0, 0, 5, 0, 0, 0, 0, 0, 64, 0, 18, 28, 18, 86,
+ 66, 28, 109, 25, 0, 140, 0, 38, 25, 84, 36, 33, 3, 295, 170,
+ 38, 246, 20, 71, 152, 140, 198, 3, 155, 91, 8, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 89, 160, 8, 0, 3, 15, 0, 5, 3, 10, 18, 36, 0,
+ 0, 244, 127, 8, 10, 48, 43, 157, 38, 41, 0, 58, 160, 10, 18, 0,
+ 20, 36, 10, 48, 0, 0, 0, 0, 8, 0, 25, 0, 0, 20, 58, 36, 58, 3,
+ 0, 0, 0, 0, 0, 0, 48, 46, 178, 384, 20, 74, 0, 0, 0, 0, 0, 58,
+ 86, 43, 10, 36, 0, 0, 0, 0, 10, 8, 94, 38, 33, 10, 0, 3, 28,
+ 61, 3, 0, 0, 150, 74, 5, 0, 25, 0, 0, 0, 0, 0, 38, 15, 5, 3, 0,
+ 0, 23, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99,
+ 0, 0, 23, 0, 0, 30, 36, 218, 0, 61, 5, 0, 36, 41, 71, 0, 81,
+ 97, 0, 43, 20, 28, 5, 3, 5, 0, 122, 117, 277, 20, 41, 0, 0, 0,
+ 0, 188, 279, 211, 79, 99, 102, 76, 30, 76, 84, 64, 33, 18, 5,
+ 15, 30, 97, 25, 28, 15, 150, 5, 25, 5, 15, 23, 10, 69, 25, 0,
+ 0, 0, 0, 0, 0, 0, 0, 64, 109, 25, 28, 0, 0, 36, 74, 5, 0, 0, 0,
+ 94, 0, 81, 269, 5, 0, 0, 0, 0, 0, 41, 0, 64, 0, 0, 0, 0, 0, 0,
+ 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91, 216, 20, 0,
+ 231, 0, 25, 25, 36, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 53, 38,
+ 25, 0, 0, 53, 30, 109, 94, 64, 36, 97, 310, 33, 124, 142, 41,
+ 0, 3, 10, 48, 89, 109, 102, 66, 292, 8, 0, 0, 165, 254, 0, 150,
+ 163, 48, 124, 23, 61, 33, 13, 198, 97, 53, 5, 152, 56, 36, 130,
+ 10, 0, 0, 8, 0, 0, 0, 15, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 18, 145, 20, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 203,
+ 0, 0, 0, 0, 0, 25, 165, 127, 152, 36, 25, 8, 3, 0, 30, 142,
+ 109, 30, 191, 8, 0, 0, 0, 0, 0, 0, 41, 23, 333, 119, 137, 30,
+ 0, 0, 0, 30, 0, 0, 246, 69, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 69, 102, 8, 0, 180, 150, 127, 89, 8, 0, 46, 71, 8, 0, 0,
+ 0, 10, 64, 8, 20, 20, 25, 20, 53, 0, 5, 13, 0, 0, 0, 0, 20, 0,
+ 0, 58, 15, 0, 117, 43, 51, 0, 23, 8, 5, 3, 15, 15, 20, 259, 48,
+ 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 18, 0, 10, 5, 79, 102, 84, 81, 0, 10, 0, 0, 0, 0, 46, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 150, 0, 3, 91, 28, 0,
+ 0, 36, 0, 0, 15, 0, 36, 51, 61, 20, 13, 173, 102, 124, 470,
+ 185, 10, 8, 38, 86, 15, 107, 5, 3, 0, 64, 0, 33, 30, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 25, 23, 3, 15, 0, 0, 0, 0, 30,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20,
+ 36, 5, 30, 0, 180, 5, 66, 61, 66, 15, 86, 76, 10, 0, 119, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 28, 69, 30, 89, 18, 107, 30, 56, 0, 0,
+ 38, 109, 33, 0, 0, 0, 25, 74, 0, 3, 18, 0, 0, 0, 0, 0, 0, 127,
+ 292, 142, 320, 330, 107, 127, 91, 0, 114, 249, 81, 267, 53,
+ 216, 28, 137, 28, 20, 13, 89, 38, 147, 0, 13, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 56, 23, 109, 97, 23, 132, 119, 71, 0, 117, 297, 15,
+ 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, 48, 109, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 51, 18, 114, 79, 58,
+ 91, 3, 38, 107, 97, 43, 97, 46, 18, 43, 3, 18, 142, 0, 0, 94,
+ 89, 91, 43, 46, 0, 8, 76, 165, 84, 46, 107, 5, 18, 0, 56, 0,
+ 130, 0, 0, 0, 163, 127, 86, 157, 28, 30, 8, 38, 150, 38, 8, 3,
+ 30, 23, 0, 0, 71, 0, 0, 0, 38, 0, 0, 13, 0, 18, 13, 74, 104,
+ 10, 79, 0, 0, 254, 15, 0, 10, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 23, 0, 0, 76, 15, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 81, 152, 13,
+ 38, 147, 109, 64, 109, 5, 0, 0, 0, 0, 84, 183, 292, 5, 8, 43,
+ 0, 20, 30, 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 43, 41, 41, 58,
+ 91, 18, 0, 0, 0, 0, 0, 0, 0, 0, 41, 76, 81, 20, 0, 0, 0, 0, 0,
+ 0, 102, 36, 127, 109, 20, 33, 0, 10, 41, 208, 226, 0, 18, 0, 0,
+ 64, 381, 36, 5, 0, 89, 15, 18, 0, 0, 56, 13, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84, 0,
+ 30, 0, 0, 0, 0, 0, 30, 0, 0, 102, 58, 94, 0, 0, 0, 38, 0, 41,
+ 64, 104, 56, 33, 0, 0, 0, 0, 15, 0, 25, 38, 0, 56, 0, 0, 0, 0,
+ 0, 0, 46, 15, 10, 8, 91, 5, 8, 0, 0, 23, 99, 25, 0, 76, 43, 41,
+ 142, 8, 58, 0, 3, 114, 30, 0, 0, 71, 0, 0, 0, 0, 191, 0, 25,
+ 15, 28, 43, 38, 107, 102, 64, 38, 0, 8, 0, 13, 10, 33, 0, 3, 0,
+ 10, 206, 0, 0, 0, 0, 0, 0, 122, 56, 0, 0, 20, 30, 10, 0, 0,
+ 272, 0, 0, 58, 66, 91, 51, 107, 0, 0, 33, 264, 56, 15, 10, 152,
+ 66, 10, 79, 66, 28, 18, 25, 23, 0, 20, 30, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 216, 5, 0, 0,
+ 0, 0, 53, 81, 15, 0, 18, 66, 64, 262, 71, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 5, 97, 28, 25, 3, 107, 191, 30, 119, 64, 180, 43,
+ 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 119, 30, 86, 0, 0, 213, 38, 305,
+ 196, 0, 43, 0, 23, 239, 135, 112, 124, 137, 51, 97, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 41, 51, 10, 41, 3, 51, 3, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 135, 259, 13, 5, 48, 20, 74,
+ 5, 10, 46, 5, 3, 18, 0, 0, 0, 0, 5, 5, 18, 0, 15, 0, 99, 0, 0,
+ 0, 0, 0, 8, 81, 84, 0, 0, 10, 66, 84, 25, 58, 33, 0, 0, 20, 91,
+ 0, 0, 58, 0, 0, 119, 15, 0, 58, 58, 0, 0, 0, 64, 165, 94, 43,
+ 58, 61, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 5, 0, 0, 97, 81,
+ 74, 53, 28, 33, 97, 0, 0, 124, 234, 0, 0, 0, 5, 58, 0, 0, 28,
+ 28, 38, 18, 8, 10, 36, 0, 0, 0, 28, 10, 8, 38, 13, 20, 178, 18,
+ 185, 38, 0, 0, 61, 0, 86, 0, 36, 0, 33, 102, 46, 20, 0, 0, 0,
+ 5, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 15, 30,
+ 58, 10, 147, 0, 5, 0, 36, 18, 91, 724, 41, 160, 41, 114, 249,
+ 150, 71, 0, 157, 76, 272, 0, 0, 0, 18, 0, 0, 53, 25, 64, 46,
+ 41, 25, 0, 71, 0, 25, 0, 0, 64, 114, 343, 124, 86, 58, 15, 99,
+ 20, 5, 132, 109, 23, 20, 8, 91, 114, 5, 20, 246, 193, 371, 267,
+ 185, 61, 58, 0, 58, 46, 0, 330, 66, 30, 8, 10, 3, 18, 23, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
+ 13, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 10, 91, 36, 20, 18, 0, 18,
+ 38, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 20, 23, 130, 102, 13, 0, 0, 23, 66, 0, 23, 0, 0, 0, 41,
+ 0, 196, 211, 284, 229, 10, 48, 0, 0, 0, 0, 0, 0, 0, 3, 0, 36,
+ 74, 119, 0, 20, 0, 0, 0, 0, 173, 409, 28, 0, 43, 8, 36, 76, 48,
+ 0, 30, 10, 0, 38, 28, 112, 13, 41, 5, 25, 8, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 25, 89, 28, 0, 0, 36, 81, 86, 20, 8, 38,
+ 216, 0, 20, 25, 201, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 56,
+ 8, 5, 124, 175, 0, 5, 33, 13, 20, 0, 13, 84, 3, 0, 0, 0, 0, 25,
+ 236, 112, 94, 30, 36, 51, 23, 28, 23, 74, 3, 51, 140, 3, 0, 89,
+ 5, 13, 23, 0, 3, 28, 0, 64, 208, 262, 0, 97, 71, 112, 51, 10,
+ 48, 0, 0, 10, 102, 15, 41, 0, 15, 5, 23, 3, 0, 8, 64, 48, 0, 0,
+ 0, 0, 0, 15, 5, 0, 51, 267, 53, 124, 211, 20, 8, 208, 94, 8, 0,
+ 0, 0, 0, 0, 23, 183, 53, 38, 20, 0, 36, 3, 23, 10, 0, 20, 38,
+ 51, 183, 15, 0, 8, 25, 0, 0, 0, 66, 15, 64, 58, 15, 10, 33, 58,
+ 122, 23, 79, 48, 0, 0, 0, 23, 13, 208, 15, 0, 0, 0, 0, 0, 0, 0,
+ 89, 282, 56, 43, 183, 18, 5, 5, 18, 20, 0, 0, 10, 0, 5, 5, 5,
+ 13, 0, 0, 0, 8, 33, 84, 64, 33, 48, 46, 0, 0, 0, 69, 46, 43,
+ 13, 20, 218, 53, 132, 91, 15, 3, 0, 0, 0, 10, 30, 33, 3, 10, 0,
+ 8, 51, 0, 76, 0, 0, 5, 15, 23, 81, 3, 0, 0, 5, 5, 0, 0, 0, 0,
+ 0, 0, 0, 0, 25, 3, 3, 3, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0,
+ 15, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 30, 46, 23, 102,
+ 64, 3, 0, 0, 94, 3, 20, 20, 3, 79, 71, 48, 0, 0, 0, 137, 0, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 51, 33, 0, 0, 0, 0, 0, 0,
+ 0, 15, 0, 0, 48, 13, 3, 0, 0, 0, 0, 0, 0, 30, 0, 0, 0, 0, 36,
+ 41, 8, 0, 0, 36, 3, 0, 0, 48, 3, 0, 10, 46, 20, 0, 38, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 279, 48, 48, 208, 175, 201, 79, 147, 0, 13,
+ 3, 0, 74, 38, 33, 13, 0, 0, 51, 5, 61, 5, 0, 5, 0, 10, 91, 0,
+ 0, 0, 38, 135, 56, 46, 33, 0, 0, 0, 0, 0, 0, 0, 46, 10, 0, 0,
+ 0, 5, 0, 0, 0, 0, 48, 10, 188, 23, 5, 25, 262, 10, 15, 0, 0,
+ 66, 74, 30, 20, 41, 193, 33, 66, 221, 20, 0, 48, 15, 0, 0, 0,
+ 0, 0, 109, 43, 0, 0, 0, 3, 0, 0, 23, 132, 5, 25, 0, 0, 8, 25,
+ 130, 8, 51, 79, 43, 0, 0, 0, 0, 0, 157, 244, 20, 155, 5, 84,
+ 145, 152, 102, 267, 178, 20, 28, 58, 0, 0, 48, 74, 99, 0, 0, 0,
+ 0, 0, 23, 5, 137, 61, 124, 74, 0, 23, 25, 46, 71, 30, 0, 399,
+ 170, 203, 173, 99, 124, 211, 28, 46, 36, 114, 198, 23, 20, 3,
+ 48, 86, 58, 46, 142, 130, 91, 30, 51, 104, 53, 10, 0, 48, 43,
+ 470, 30, 18, 3, 10, 28, 0, 0, 0, 0, 0, 10, 0, 0, 15, 8, 3, 23,
+ 114, 25, 0, 10, 46, 208, 0, 0, 23, 0, 0, 0, 0, 30, 15, 94, 132,
+ 0, 0, 0, 15, 0, 0, 152, 0, 0, 127, 18, 74, 124, 0, 127, 91, 10,
+ 38, 28, 97, 163, 114, 66, 41, 0, 0, 0, 23, 5, 0, 8, 0, 0, 0,
+ 13, 0, 5, 8, 0, 10, 30, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
+ 8, 0, 0, 18, 147, 0, 0, 5, 0, 56, 0, 0, 36, 36, 30, 0, 53, 91,
+ 0, 0, 0, 0, 0, 0, 0, 0, 18, 25, 0, 0, 18, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 18, 28, 56, 0, 10, 8, 124, 236, 147, 89, 0, 13, 20,
+ 0, 74, 0, 23, 8, 15, 0, 0, 0, 5, 112, 46, 5, 41, 13, 0, 8, 38,
+ 0, 168, 76, 74, 5, 10, 114, 216, 20, 292, 61, 13, 10, 84, 84,
+ 23, 13, 0, 20, 0, 0, 0, 0, 58, 46, 0, 86, 5, 0, 0, 5, 89, 28,
+ 15, 56, 0, 0, 160, 10, 0, 0, 0, 0, 0, 0, 43, 15, 13, 102, 287,
+ 3, 0, 23, 183, 5, 10, 0, 0, 0, 135, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 15, 0, 36, 0, 0, 0, 0, 0, 0, 64, 3, 10, 15, 0, 18, 99, 279,
+ 51, 5, 20, 8, 0, 64, 0, 25, 0, 69, 135, 10, 5, 0, 0, 0, 0, 3,
+ 0, 46, 3, 0, 43, 48, 18, 0, 0, 43, 53, 18, 91, 33, 25, 0, 5,
+ 25, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 363, 290, 124, 292, 124, 175, 142, 147, 0, 0, 0, 0, 0,
+ 0, 33, 8, 0, 0, 13, 0, 0, 0, 5, 0, 0, 48, 0, 0, 0, 0, 51, 5, 8,
+ 36, 0, 0, 13, 0, 0, 0, 0, 0, 81, 46, 109, 180, 0, 20, 135, 0,
+ 8, 0, 5, 0, 13, 61, 81, 74, 165, 295, 13, 5, 56, 99, 130, 51,
+ 46, 46, 150, 86, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, 224, 145,
+ 183, 25, 0, 0, 0, 0, 0, 0, 20, 0, 0, 213, 15, 33, 198, 10, 15,
+ 124, 94, 20, 145, 69, 132, 102, 23, 94, 18, 15, 53, 0, 18, 15,
+ 18, 0, 0, 0, 0, 0, 15, 10, 0, 0, 13, 66, 33, 99, 23, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 0, 0, 0, 0, 41, 18, 0, 89,
+ 229, 36, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 119, 15, 0, 0,
+ 10, 0, 0, 0, 0, 0, 0, 0, 5, 20, 0, 0, 0, 0, 0, 0, 0, 76, 132,
+ 81, 30, 0, 0, 0, 0, 0, 0, 0, 0, 188, 0, 0, 0, 0, 0, 0, 23, 53,
+ 0, 102, 124, 81, 46, 25, 0, 0, 0, 46, 119, 43, 10, 23, 124,
+ 201, 43, 20, 0, 91, 8, 0, 0, 0, 109, 71, 43, 0, 0, 46, 46, 18,
+ 36, 76, 97, 0, 0, 0, 0, 23, 107, 0, 191, 0, 0, 0, 0, 15, 13,
+ 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 114, 140, 69, 18, 13, 0, 91,
+ 66, 0, 13, 0, 46, 41, 127, 0, 5, 8, 5, 3, 3, 0, 46, 5, 20, 25,
+ 305, 305, 28, 61, 38, 30, 0, 0, 51, 0, 0, 0, 23, 18, 0, 33, 0,
+ 48, 114, 142, 15, 150, 84, 46, 56, 559, 56, 76, 79, 318, 46,
+ 13, 18, 8, 99, 5, 20, 0, 3, 15, 185, 127, 165, 5, 69, 0, 5, 3,
+ 13, 0, 0, 163, 13, 71, 142, 15, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 38, 290, 74, 18, 0, 0, 0, 160, 30, 0, 0, 0, 0, 0,
+ 51, 3, 0, 5, 10, 0, 0, 0, 0, 0, 10, 20, 43, 79, 25, 13, 38, 53,
+ 513, 64, 0, 51, 853, 61, 51, 269, 419, 104, 64, 64, 109, 124,
+ 51, 0, 127, 140, 193, 48, 30, 244, 25, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 23, 229, 30, 38, 41, 107, 10, 0, 13, 114, 30, 53, 66, 145,
+ 0, 297, 79, 15, 218, 20, 168, 46, 191, 137, 0, 0, 66, 8, 13, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, 229, 0, 0, 0, 0, 46, 0, 8, 0, 0,
+ 15, 8, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20,
+ 0, 0, 0, 0, 0, 0, 0, 76, 89, 142, 61, 305, 48, 23, 76, 104, 91,
+ 107, 0, 130, 97, 170, 99, 5, 216, 69, 224, 56, 38, 152, 20, 5,
+ 43, 53, 36, 193, 13, 89, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 79, 0,
+ 117, 71, 91, 107, 13, 0, 0, 0, 5, 46, 8, 15, 46, 43, 48, 74,
+ 15, 0, 0, 30, 0, 0, 61, 0, 20, 28, 46, 208, 0, 0, 0, 0, 5, 0,
+ 81, 30, 0, 0, 0, 0, 119, 8, 0, 46, 91, 41, 213, 0, 0, 0, 0, 0,
+ 0, 0, 188, 23, 0, 0, 0, 23, 64, 0, 0, 0, 0, 0, 5, 117, 145, 0,
+ 0, 81, 0, 0, 0, 69, 0, 18, 76, 102, 64, 43, 3, 8, 0, 0, 23, 0,
+ 0, 0, 15, 330, 99, 0, 0, 0, 0, 0, 0, 0, 0, 30, 0, 56, 13, 53,
+ 0, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 356, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 23, 23, 0, 0, 102,
+ 89, 0, 15, 114, 0, 20, 46, 25, 48, 0, 71, 61, 0, 0, 0, 0, 8,
+ 20, 0, 0, 0, 0, 0, 0, 0, 0, 30, 46, 0, 0, 0, 25, 10, 0, 0, 0,
+ 0, 0, 0, 213, 218, 0, 0, 0, 0, 0, 0, 0, 56, 10, 61, 140, 25,
+ 36, 76, 0, 0, 0, 71, 15, 5, 81, 25, 56, 0, 15, 56, 20, 5, 8,
+ 20, 36, 23, 38, 89, 0, 0, 0, 0, 0, 20, 0, 20, 81, 86, 66, 114,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 122, 127, 147,
+ 46, 15, 25, 13, 109, 10, 61, 51, 216, 0, 140, 74, 53, 23, 206,
+ 127, 178, 107, 84, 58, 25, 130, 109, 48, 3, 15, 0, 5, 8, 213,
+ 183, 36, 211, 150, 18, 10, 127, 99, 109, 3, 86, 20, 15, 8, 18,
+ 0, 33, 30, 18, 18, 0, 0, 0, 0, 15, 20, 0, 0, 0, 0, 0, 0, 20, 0,
+ 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 10, 18, 41, 8, 15,
+ 3, 0, 0, 0, 0, 0, 0, 0, 51, 66, 86, 262, 46, 152, 155, 43, 8,
+ 41, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 51, 15, 112, 51, 0,
+ 0, 0, 0, 0, 58, 36, 30, 23, 30, 193, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 0, 94, 0, 0, 0, 25, 239, 89,
+ 109, 10, 8, 38, 25, 196, 0, 0, 23, 0, 0, 0, 0, 0, 15, 48, 43,
+ 140, 41, 56, 46, 25, 0, 0, 0, 132, 30, 46, 0, 0, 0, 0, 46, 64,
+ 0, 0, 0, 0, 0, 0, 23, 51, 3, 0, 0, 53, 201, 10, 18, 91, 0, 0,
+ 61, 13, 18, 0, 0, 0, 0, 0, 0, 0, 56, 0, 51, 38, 241, 71, 74,
+ 18, 23, 76, 8, 13, 25, 0, 23, 38, 5, 30, 0, 231, 0, 0, 23, 0,
+ 0, 0, 0, 0, 0, 36, 15, 292, 0, 89, 15, 0, 18, 15, 0, 20, 30,
+ 107, 33, 15, 5, 0, 0, 0, 18, 0, 0, 0, 0, 0, 0, 0, 89, 0, 0, 0,
+ 0, 0, 0, 0, 0, 18, 0, 0, 0, 0, 185, 0, 152, 25, 46, 97, 196,
+ 99, 41, 33, 18, 20, 8, 41, 145, 53, 36, 0, 0, 0, 28, 69, 15,
+ 94, 48, 13, 107, 0, 0, 0, 0, 0, 0, 0, 66, 213, 38, 0, 0, 30, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 559, 48, 10, 33, 53, 152,
+ 193, 201, 229, 254, 140, 58, 81, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 53, 64, 43, 160, 69, 84, 0, 74, 5, 25, 0, 15, 10, 13, 38,
+ 10, 5, 20, 66, 84, 0, 13, 5, 3, 10, 0, 3, 18, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 10, 155, 188, 5, 0, 0, 43, 18, 10, 0, 0,
+ 0, 36, 0, 0, 30, 23, 5, 13, 0, 0, 3, 0, 10, 0, 0, 152, 69, 117,
+ 0, 0, 0, 0, 0, 13, 23, 66, 0, 0, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 97, 178, 23, 53, 79, 13, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 107, 0, 0, 61, 0, 0, 13, 99, 0, 0, 0, 0, 0,
+ 13, 15, 3, 0, 0, 0, 0, 0, 0, 0, 5, 102, 58, 13, 20, 86, 0, 0,
+ 25, 109, 8, 0, 5, 196, 0, 71, 71, 15, 66, 61, 51, 3, 3, 0, 8,
+ 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 5, 0, 25, 0, 0, 0, 0, 0, 5, 0, 112, 23, 142,
+ 442, 5, 0, 0, 0, 0, 0, 0, 0, 10, 10, 13, 0, 41, 0, 140, 48, 18,
+ 0, 51, 13, 152, 20, 20, 38, 61, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0,
+ 0, 0, 0, 381, 0, 0, 0, 0, 0, 0, 0, 191, 10, 13, 0, 74, 0, 0, 0,
+ 0, 0, 0, 58, 267, 170, 0, 25, 46, 51, 191, 81, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 79, 74, 25, 23, 76, 64, 0, 0, 56,
+ 25, 89, 343, 318, 86, 119, 25, 155, 320, 127, 91, 38, 0, 0, 38,
+ 0, 0, 0, 0, 0, 64, 114, 61, 119, 135, 13, 28, 25, 132, 114,
+ 102, 71, 224, 318, 18, 25, 61, 99, 79, 18, 15, 5, 23, 38, 5,
+ 43, 46, 30, 13, 0, 0, 13, 18, 102, 64, 38, 0, 0, 0, 41, 91, 25,
+ 20, 0, 38, 25, 30, 0, 18, 0, 13, 0, 18, 0, 0, 0, 0, 0, 0, 13,
+ 5, 0, 51, 25, 0, 3, 0, 0, 10, 5, 8, 38, 114, 51, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 48, 38, 140, 188, 193, 48, 84, 178, 36,
+ 46, 140, 97, 124, 97, 124, 38, 56, 3, 0, 0, 0, 224, 28, 0, 0,
+ 69, 102, 5, 0, 0, 36, 0, 36, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 89, 142, 122, 36, 145, 112, 36, 13, 18, 48, 0, 0, 0, 0, 0, 0,
+ 0, 0, 15, 48, 38, 13, 0, 0, 0, 51, 15, 48, 38, 18, 0, 0, 23,
+ 46, 160, 51, 0, 5, 0, 25, 15, 30, 41, 33, 0, 64, 25, 20, 69,
+ 13, 0, 0, 10, 0, 51, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 8,
+ 20, 10, 56, 163, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 89, 64, 0, 0, 20, 292, 30, 25, 135, 48, 20, 56, 5,
+ 3, 10, 0, 0, 0, 0, 0, 0, 8, 38, 13, 0, 0, 0, 114, 48, 0, 168,
+ 5, 236, 97, 13, 3, 5, 33, 46, 193, 18, 13, 66, 8, 0, 0, 0, 13,
+ 58, 112, 102, 0, 0, 0, 0, 0, 356, 5, 0, 5, 152, 0, 10, 140, 61,
+ 76, 33, 61, 452, 20, 8, 79, 56, 102, 5, 51, 81, 124, 25, 0,
+ 104, 66, 5, 23, 0, 107, 3, 259, 18, 13, 109, 18, 71, 89, 46,
+ 239, 30, 10, 86, 84, 36, 41, 3, 91, 117, 3, 114, 20, 25, 274,
+ 56, 23, 0, 0, 0, 5, 3, 5, 23, 5, 0, 0, 104, 23, 20, 5, 10, 5,
+ 0, 0, 0, 23, 10, 0, 0, 33, 104, 0, 10, 79, 0, 25, 114, 0, 0, 0,
+ 0, 0, 38, 36, 124, 183, 170, 130, 23, 43, 41, 76, 155, 231,
+ 305, 89, 64, 0, 0, 0, 229, 102, 5, 0, 213, 20, 30, 13, 8, 15,
+ 46, 20, 0, 124, 203, 48, 41, 188, 71, 25, 91, 56, 10, 8, 23, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 196, 135, 48, 0, 79, 109, 224, 94, 150,
+ 23, 25, 246, 46, 71, 43, 145, 0, 48, 0, 0, 13, 5, 10, 48, 5,
+ 10, 23, 48, 0, 0, 0, 8, 8, 10, 53, 394, 163, 43, 15, 61, 5, 28,
+ 10, 0, 0, 0, 0, 302, 117, 64, 97, 84, 56, 61, 8, 25, 36, 51,
+ 30, 0, 0, 0, 0, 0, 0, 0, 445, 8, 13, 3, 25, 51, 175, 170, 38,
+ 3, 109, 119, 13, 56, 0, 58, 0, 25, 3, 76, 38, 0, 38, 74, 0, 76,
+ 28, 0, 89, 15, 69, 114, 18, 191, 10, 10, 5, 152, 0, 0, 0, 13,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 20, 18, 33, 124, 117, 0, 74,
+ 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 25, 0, 13, 23, 107, 56, 318,
+ 152, 0, 0, 175, 102, 8, 13, 0, 0, 0, 0, 0, 0, 0, 8, 0, 8, 338,
+ 30, 36, 33, 38, 25, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 20,
+ 46, 15, 76, 0, 0, 0, 0, 8, 0, 5, 0, 23, 180, 175, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 267, 140, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 107,
+ 193, 5, 150, 112, 8, 0, 3, 51, 249, 20, 10, 0, 0, 15, 36, 157,
+ 15, 18, 36, 0, 56, 38, 145, 71, 43, 71, 48, 46, 0, 33, 0, 94,
+ 13, 10, 0, 0, 5, 0, 43, 10, 53, 53, 224, 5, 38, 0, 0, 0, 0, 0,
+ 79, 38, 269, 46, 51, 150, 3, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 13, 0, 0, 0, 8, 0, 0, 5, 48, 51, 3, 0, 0, 0, 0, 152, 58,
+ 119, 48, 53, 79, 107, 269, 516, 130, 69, 64, 5, 76, 20, 0, 0,
+ 0, 145, 61, 140, 297, 89, 74, 130, 36, 69, 51, 157, 15, 0, 23,
+ 0, 18, 20, 5, 13, 36, 0, 41, 36, 0, 170, 20, 0, 0, 0, 0, 0, 0,
+ 0, 8, 56, 64, 0, 0, 0, 114, 124, 74, 221, 84, 269, 58, 0, 36,
+ 185, 48, 13, 43, 0, 18, 0, 119, 140, 198, 18, 0, 51, 10, 56,
+ 112, 20, 0, 0, 0, 0, 0, 0, 46, 109, 81, 15, 58, 84, 102, 76,
+ 13, 25, 41, 0, 0, 38, 28, 5, 10, 28, 23, 41, 76, 25, 0, 18, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 81, 127, 33, 211, 46,
+ 130, 0, 0, 0, 0, 0, 0, 198, 0, 51, 0, 30, 10, 61, 10, 61, 91,
+ 0, 0, 0, 5, 8, 0, 0, 0, 0, 0, 97, 150, 74, 46, 13, 112, 41, 0,
+ 28, 86, 69, 5, 58, 0, 0, 28, 91, 48, 28, 0, 0, 0, 0, 8, 0, 15,
+ 76, 13, 74, 76, 10, 183, 58, 69, 18, 114, 254, 79, 3, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 25, 51, 58, 102, 81, 0,
+ 0, 0, 20, 18, 170, 0, 0, 0, 51, 13, 0, 0, 0, 48, 5, 0, 10, 41,
+ 5, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 3, 5, 38, 8, 353, 0, 0, 0, 0,
+ 0, 0, 8, 0, 8, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51,
+ 0, 36, 0, 107, 3, 51, 160, 292, 28, 8, 218, 5, 0, 147, 594, 99,
+ 33, 51, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 8, 18, 3, 20, 23,
+ 0, 38, 0, 198, 79, 8, 3, 5, 0, 0, 0, 0, 18, 28, 3, 135, 23, 0,
+ 0, 58, 48, 33, 107, 157, 64, 91, 69, 0, 0, 0, 0, 0, 0, 0, 20,
+ 0, 5, 114, 152, 0, 0, 5, 0, 30, 8, 0, 152, 107, 28, 69, 20, 79,
+ 287, 48, 58, 147, 130, 20, 30, 10, 74, 8, 3, 51, 36, 13, 5, 0,
+ 0, 0, 46, 48, 0, 0, 33, 51, 46, 203, 0, 0, 0, 36, 104, 89, 41,
+ 58, 0, 0, 0, 0, 0, 0, 13, 5, 10, 61, 23, 61, 71, 18, 30, 69,
+ 41, 20, 157, 10, 221, 23, 13, 38, 53, 43, 33, 28, 38, 5, 61,
+ 41, 0, 0, 0, 0, 61, 3, 0, 0, 20, 5, 8, 0, 3, 0, 15, 5, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 13, 3, 3, 132, 3, 0, 0, 0, 0, 5, 3, 0, 86,
+ 249, 38, 5, 3, 89, 0, 0, 8, 30, 36, 23, 18, 18, 8, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 61, 36, 36, 122, 28, 3, 13, 119, 33, 102, 74,
+ 18, 0, 0, 8, 0, 107, 28, 3, 0, 13, 28, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 102, 53, 254, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13,
+ 137, 335, 28, 53, 23, 353, 15, 152, 33, 89, 5, 0, 0, 3, 0, 0,
+ 0, 71, 3, 15, 25, 0, 0, 0, 0, 0, 0, 8, 0, 0, 8, 0, 43, 114, 36,
+ 5, 56, 69, 102, 305, 10, 0, 0, 0, 23, 20, 10, 0, 0, 0, 0, 0, 0,
+ 0, 0, 3, 84, 0, 0, 213, 13, 30, 10, 13, 114, 8, 18, 46, 10, 58,
+ 33, 3, 0, 86, 36, 13, 46, 8, 249, 0, 0, 0, 0, 20, 13, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 28, 23, 36, 0, 119, 48, 142, 25, 132, 5, 0,
+ 0, 0, 13, 127, 0, 0, 124, 23, 0, 20, 0, 0, 0, 0, 0, 0, 0, 20,
+ 0, 198, 33, 297, 5, 0, 259, 46, 201, 66, 10, 25, 0, 0, 56, 0,
+ 36, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 28, 0, 0,
+ 0, 0, 23, 30, 94, 368, 18, 478, 18, 0, 99, 46, 211, 168, 127,
+ 64, 15, 13, 8, 33, 140, 30, 0, 13, 10, 13, 8, 0, 0, 0, 0, 0, 0,
+ 0, 0, 114, 10, 69, 112, 259, 0, 5, 5, 0, 0, 122, 41, 0, 0, 0,
+ 5, 0, 0, 5, 0, 5, 18, 0, 0, 0, 0, 0, 0, 0, 10, 0, 23, 79, 18,
+ 3, 0, 0, 15, 0, 0, 18, 0, 0, 0, 25, 51, 33, 3, 0, 5, 56, 160,
+ 33, 5, 13, 0, 0, 0, 38, 23, 43, 0, 0, 119, 8, 0, 0, 0, 10, 0,
+ 0, 0, 0, 0, 0, 208, 0, 155, 119, 51, 30, 140, 117, 3, 0, 0,
+ 150, 84, 15, 25, 0, 41, 13, 0, 109, 91, 36, 94, 58, 152, 74, 5,
+ 10, 231, 0, 0, 10, 0, 0, 0, 0, 81, 0, 0, 0, 201, 25, 8, 226,
+ 51, 5, 0, 5, 10, 0, 0, 107, 51, 79, 38, 36, 61, 0, 43, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 13, 218, 130, 46, 43, 66, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 74, 282, 79, 20, 3, 201, 114, 43, 3, 0, 0, 0, 0,
+ 25, 13, 292, 102, 259, 84, 160, 89, 0, 0, 51, 25, 5, 23, 0, 25,
+ 10, 8, 0, 15, 20, 8, 43, 23, 0, 13, 46, 3, 69, 20, 119, 3, 41,
+ 25, 23, 0, 0, 0, 71, 0, 15, 137, 79, 3, 0, 13, 0, 0, 25, 28,
+ 429, 41, 43, 30, 10, 0, 18, 28, 8, 0, 3, 185, 33, 25, 89, 20,
+ 23, 33, 168, 0, 69, 0, 0, 152, 25, 13, 25, 48, 3, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 10, 23, 0, 18, 89, 43, 76, 48, 28, 114, 74, 23,
+ 51, 36, 66, 13, 10, 0, 28, 20, 5, 0, 277, 130, 58, 71, 13, 10,
+ 3, 23, 13, 0, 119, 112, 15, 0, 36, 18, 23, 8, 3, 8, 3, 5, 76,
+ 107, 28, 43, 38, 28, 30, 0, 185, 38, 10, 124, 5, 140, 23, 109,
+ 48, 18, 0, 0, 74, 46, 10, 0, 135, 30, 104, 66, 25, 48, 0, 0, 0,
+ 0, 0, 10, 0, 165, 376, 234, 180, 249, 554, 267, 43, 193, 23,
+ 20, 48, 0, 0, 94, 119, 279, 102, 43, 56, 226, 109, 25, 18, 5,
+ 46, 0, 0, 23, 0, 0, 38, 0, 8, 30, 10, 15, 0, 0, 0, 0, 0, 0,
+ 163, 99, 10, 0, 0, 0, 0, 147, 13, 0, 211, 84, 150, 0, 0, 0, 86,
+ 53, 28, 20, 0, 0, 38, 15, 0, 221, 25, 135, 69, 71, 168, 56,
+ 124, 0, 211, 23, 13, 20, 0, 0, 13, 30, 69, 5, 33, 0, 48, 0, 79,
+ 0, 0, 0, 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, 43, 3, 0, 8, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 74, 0, 0, 99, 79, 5, 206, 33, 0, 0, 0, 0, 0,
+ 8, 0, 0, 30, 23, 0, 0, 74, 25, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 5, 48, 0, 64, 79, 43, 0, 89, 41, 43, 10, 94, 23, 3,
+ 38, 69, 18, 0, 71, 66, 79, 25, 48, 226, 25, 0, 0, 0, 0, 0, 0,
+ 0, 58, 64, 0, 0, 0, 0, 15, 10, 112, 23, 0, 137, 28, 5, 0, 0, 0,
+ 226, 0, 0, 0, 0, 0, 84, 0, 0, 0, 0, 28, 13, 0, 5, 0, 64, 0, 15,
+ 0, 0, 33, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 0, 53, 208, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 10,
+ 0, 0, 0, 0, 0, 8, 0, 28, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 25, 0, 0, 84, 0, 0, 0, 0, 28, 79, 0, 0, 145, 28, 25, 18, 0,
+ 0, 0, 30, 208, 28, 0, 3, 0, 0, 0, 0, 23, 0, 0, 15, 10, 33, 175,
+ 36, 109, 5, 0, 0, 0, 0, 0, 0, 8, 0, 23, 353, 36, 33, 0, 0, 0,
+ 0, 0, 0, 15, 0, 5, 0, 0, 0, 424, 330, 119, 25, 91, 41, 216, 41,
+ 290, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0,
+ 0, 0, 13, 0, 0, 8, 10, 10, 5, 51, 0, 58, 109, 269, 124, 56, 20,
+ 13, 0, 330, 25, 0, 48, 76, 25, 71, 41, 3, 64, 76, 208, 18, 8,
+ 5, 0, 0, 3, 0, 5, 33, 152, 112, 15, 43, 401, 41, 25, 8, 28, 0,
+ 36, 97, 66, 188, 23, 15, 107, 15, 8, 56, 10, 114, 13, 0, 0, 0,
+ 25, 3, 0, 0, 0, 0, 0, 0, 15, 3, 0, 0, 0, 20, 38, 8, 25, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 8, 13, 25, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 51, 3, 0, 0, 69, 135, 51, 28, 43, 3, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 25, 0, 0, 0, 3, 0, 0, 0, 97, 0, 43, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 20, 97, 89, 0, 0, 0, 43, 8, 91, 8, 0, 0, 0, 0, 20, 51,
+ 25, 5, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 5, 0, 30, 0, 0,
+ 0, 10, 5, 5, 188, 48, 8, 117, 66, 0, 0, 0, 0, 3, 0, 0, 71, 71,
+ 0, 5, 8, 8, 0, 0, 0, 0, 64, 0, 0, 8, 3, 140, 76, 41, 25, 43, 3,
+ 5, 0, 0, 0, 0, 0, 0, 8, 0, 3, 5, 249, 348, 25, 0, 0, 0, 5, 0,
+ 0, 0, 5, 381, 99, 15, 109, 163, 0, 0, 8, 43, 33, 0, 0, 0, 28,
+ 3, 99, 8, 0, 8, 30, 130, 48, 3, 0, 10, 0, 71, 56, 137, 229, 8,
+ 25, 20, 20, 0, 140, 155, 66, 64, 241, 91, 0, 282, 13, 0, 41, 3,
+ 0, 0, 0, 0, 56, 0, 0, 18, 119, 178, 15, 3, 0, 0, 394, 20, 3,
+ 18, 221, 3, 15, 64, 23, 15, 8, 33, 0, 0, 0, 0, 0, 0, 0, 66, 79,
+ 3, 48, 0, 0, 0, 5, 38, 58, 13, 10, 41, 15, 5, 0, 0, 0, 0, 0, 0,
+ 0, 0, 150, 10, 74, 28, 0, 20, 0, 0, 0, 0, 0, 51, 10, 0, 0, 0,
+ 8, 43, 23, 147, 69, 18, 38, 8, 15, 36, 15, 0, 3, 10, 5, 48, 41,
+ 10, 66, 43, 165, 277, 58, 5, 0, 0, 0, 0, 0, 119, 0, 211, 36,
+ 340, 20, 216, 33, 0, 51, 10, 15, 28, 104, 0, 18, 3, 13, 0, 10,
+ 20, 0, 0, 0, 0, 0, 0, 0, 46, 53, 5, 94, 43, 79, 53, 3, 102,
+ 114, 91, 356, 18, 5, 178, 226, 150, 234, 343, 135, 30, 157, 58,
+ 74, 94, 97, 20, 58, 23, 30, 0, 38, 10, 36, 74, 71, 61, 10, 0,
+ 0, 0, 0, 0, 23, 142, 89, 33, 221, 335, 15, 0, 0, 0, 0, 0, 69,
+ 18, 20, 185, 3, 3, 0, 10, 20, 102, 64, 20, 3, 0, 0, 15, 0, 3,
+ 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 5, 8, 3, 0, 5, 13, 8, 10, 0, 0,
+ 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 53, 0, 89, 114,
+ 56, 10, 20, 185, 10, 170, 15, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 8, 0, 0, 15, 51, 170, 203, 0, 0, 0, 0, 0, 0, 0, 0,
+ 218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 74, 0, 0, 0, 51,
+ 107, 5, 23, 5, 150, 30, 145, 13, 25, 13, 147, 112, 13, 147, 15,
+ 25, 30, 0, 28, 5, 5, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 310,
+ 107, 152, 91, 157, 56, 18, 0, 5, 0, 0, 3, 0, 15, 20, 23, 58, 5,
+ 48, 30, 0, 0, 38, 15, 0, 25, 0, 46, 5, 107, 0, 0, 173, 241, 99,
+ 84, 188, 91, 10, 3, 0, 0, 0, 5, 152, 18, 41, 84, 69, 0, 0, 41,
+ 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 203, 231,
+ 38, 23, 18, 38, 0, 13, 28, 23, 119, 10, 119, 107, 5, 114, 107,
+ 152, 175, 61, 76, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 13, 0, 0,
+ 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 94, 61, 41, 94,
+ 366, 109, 25, 69, 0, 0, 0, 48, 10, 20, 51, 41, 25, 38, 41, 0,
+ 3, 18, 0, 0, 13, 5, 0, 0, 0, 221, 107, 79, 36, 104, 58, 168,
+ 58, 10, 0, 3, 10, 13, 8, 18, 20, 18, 15, 25, 91, 30, 168, 13,
+ 221, 61, 3, 0, 0, 0, 0, 0, 28, 74, 43, 25, 41, 363, 163, 69, 0,
+ 94, 0, 0, 8, 13, 0, 8, 3, 18, 145, 160, 152, 3, 0, 0, 76, 8, 0,
+ 0, 0, 18, 5, 36, 0, 8, 0, 43, 20, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 13, 18, 0, 160, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30,
+ 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 23, 132, 20, 0, 0, 0,
+ 0, 0, 0, 0, 0, 28, 41, 13, 28, 10, 5, 53, 0, 56, 0, 188, 10, 8,
+ 0, 5, 8, 5, 282, 257, 56, 0, 0, 0, 56, 8, 23, 20, 127, 64, 30,
+ 5, 10, 188, 5, 0, 74, 18, 0, 0, 0, 0, 0, 0, 0, 25, 79, 15, 84,
+ 165, 48, 89, 74, 0, 0, 0, 25, 0, 0, 0, 28, 3, 0, 36, 0, 0, 0,
+ 0, 112, 213, 8, 0, 28, 8, 0, 0, 13, 58, 76, 0, 28, 36, 10, 76,
+ 127, 43, 0, 8, 28, 23, 71, 109, 79, 38, 124, 145, 43, 3, 114,
+ 91, 8, 86, 10, 0, 8, 0, 43, 178, 25, 5, 64, 104, 384, 3, 3, 0,
+ 0, 0, 5, 8, 0, 381, 0, 0, 0, 51, 114, 0, 8, 0, 0, 5, 0, 191, 5,
+ 0, 18, 41, 124, 84, 89, 36, 213, 3, 0, 0, 58, 229, 180, 112,
+ 107, 218, 470, 79, 56, 99, 86, 5, 13, 3, 66, 18, 97, 5, 3, 5,
+ 0, 23, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 5, 81, 0,
+ 206, 79, 3, 61, 97, 0, 0, 104, 64, 48, 0, 0, 0, 0, 30, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 122, 170,
+ 53, 0, 0, 0, 188, 64, 0, 257, 208, 13, 0, 0, 0, 0, 0, 79, 81,
+ 117, 142, 117, 0, 170, 41, 76, 310, 132, 0, 0, 0, 0, 0, 0, 66,
+ 0, 0, 0, 0, 236, 221, 91, 226, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
+ 0, 0, 0, 0, 3, 0, 8, 15, 218, 157, 211, 109, 122, 20, 0, 0,
+ 109, 43, 18, 30, 99, 0, 0, 0, 0, 0, 0, 30, 0, 137, 15, 38, 38,
+ 23, 152, 163, 15, 5, 3, 0, 0, 0, 0, 13, 23, 135, 58, 43, 102,
+ 58, 25, 33, 28, 234, 0, 0, 0, 56, 0, 0, 0, 94, 305, 36, 56, 5,
+ 0, 0, 46, 0, 56, 0, 0, 0, 0, 0, 46, 5, 13, 0, 0, 0, 0, 0, 0, 0,
+ 0, 13, 58, 5, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 0, 0, 23, 15, 76,
+ 20, 25, 18, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 0, 33,
+ 102, 8, 33, 28, 5, 20, 0, 0, 0, 0, 0, 0, 0, 0, 64, 38, 13, 0,
+ 0, 13, 71, 33, 0, 0, 0, 0, 0, 0, 0, 28, 71, 58, 188, 23, 0, 0,
+ 0, 5, 0, 0, 0, 0, 0, 0, 0, 64, 66, 46, 127, 3, 0, 0, 0, 0, 0,
+ 89, 10, 3, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 0, 0, 0, 13, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 102, 0, 191, 15, 56, 86, 23, 0, 0, 0, 84,
+ 312, 51, 8, 84, 3, 23, 0, 30, 38, 185, 79, 107, 13, 38, 23, 23,
+ 130, 5, 3, 10, 0, 0, 36, 79, 109, 28, 183, 229, 356, 38, 13, 0,
+ 198, 188, 36, 0, 36, 109, 257, 488, 10, 5, 152, 203, 56, 38,
+ 51, 38, 41, 178, 246, 112, 25, 15, 0, 0, 0, 145, 76, 165, 30,
+ 91, 76, 191, 33, 23, 160, 23, 112, 109, 287, 236, 124, 33, 13,
+ 127, 18, 109, 33, 13, 0, 0, 0, 0, 0, 0, 0, 56, 0, 0, 0, 15, 51,
+ 15, 41, 119, 43, 236, 419, 36, 5, 13, 13, 33, 3, 0, 119, 41,
+ 292, 137, 38, 0, 0, 0, 0, 0, 0, 30, 0, 0, 51, 13, 0, 0, 51, 23,
+ 13, 15, 15, 0, 193, 38, 317, 18, 0, 15, 63, 56, 0, 0, 0, 0, 0,
+ 10, 8, 3, 41, 269, 38, 15, 8, 0, 5, 48, 8, 0, 0, 0, 0, 0, 0, 0,
+ 0, 46, 13, 8, 0, 312, 178, 130, 41, 15, 28, 0, 33, 0, 13, 0,
+ 142, 0, 13, 165, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 15, 0, 0, 0, 0, 0, 0, 86, 104, 140, 160, 43, 0, 36, 0, 0,
+ 0, 10, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 5,
+ 185, 102, 10, 5, 5, 56, 112, 5, 8, 13, 0, 0, 0, 0, 0, 0, 229,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 140, 10, 23, 30, 5, 102, 152, 3,
+ 0, 46, 15, 0, 180, 74, 185, 58, 0, 33, 66, 23, 5, 8, 3, 208,
+ 20, 5, 107, 48, 8, 0, 18, 0, 0, 15, 23, 33, 112, 30, 13, 33,
+ 102, 25, 0, 20, 0, 5, 3, 0, 51, 53, 142, 66, 76, 13, 152, 43,
+ 48, 13, 25, 5, 102, 84, 0, 0, 102, 0, 0, 0, 0, 13, 0, 3, 127,
+ 0, 152, 51, 5, 38, 5, 38, 229, 0, 0, 0, 0, 190, 38, 297, 264,
+ 178, 185, 79, 63, 79, 23, 25, 190, 48, 18, 63, 5, 0, 0, 0, 0,
+ 5, 127, 5, 28, 33, 13, 114, 23, 8, 513, 43, 0, 0, 8, 178, 112,
+ 43, 89, 43, 91, 0, 0, 109, 165, 58, 20, 107, 81, 51, 20, 183,
+ 58, 20, 71, 20, 56, 28, 38, 58, 122, 63, 5, 20, 89, 203, 130,
+ 84, 335, 81, 107, 15, 0, 0, 0, 0, 61, 8, 0, 0, 0, 5, 3, 13,
+ 127, 13, 15, 18, 8, 25, 190, 66, 160, 69, 81, 58, 25, 211, 102,
+ 38, 0, 160, 33, 38, 51, 38, 15, 0, 5, 28, 0, 0, 0, 28, 89, 74,
+ 15, 3, 13, 0, 25, 0, 284, 51, 287, 229, 43, 20, 43, 89, 58, 38,
+ 81, 69, 25, 33, 0, 20, 10, 0, 0, 5, 0, 3, 0, 0, 0, 0, 0, 3,
+ 251, 33, 102, 152, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 5, 0, 3, 0, 0, 0, 0, 0, 0, 10, 0, 5, 53, 0, 51, 51,
+ 20, 51, 165, 191, 13, 0, 0, 0, 76, 89, 51, 41, 0, 0, 0, 0, 10,
+ 257, 185, 48, 46, 5, 102, 89, 23, 23, 0, 10, 13, 58, 23, 86,
+ 89, 0, 43, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 86, 56, 79, 43,
+ 102, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 13, 8, 0, 0, 0, 0,
+ 206, 0, 0, 0, 0, 0, 0, 13, 0, 0, 107, 241, 5, 142, 20, 0, 0,
+ 10, 0, 0, 0, 5, 15, 0, 23, 0, 64, 0, 0, 0, 8, 0, 0, 0, 76, 13,
+ 0, 175, 15, 46, 376, 0, 15, 10, 13, 5, 18, 15, 13, 13, 5, 18,
+ 0, 0, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 18, 33, 0, 0, 51,
+ 76, 3, 152, 160, 28, 51, 25, 0, 0, 0, 0, 5, 8, 0, 0, 0, 38,
+ 107, 18, 191, 28, 43, 38, 157, 33, 127, 394, 25, 102, 183, 38,
+ 3, 0, 0, 0, 13, 28, 140, 58, 5, 8, 48, 157, 163, 38, 160, 36,
+ 3, 25, 8, 0, 13, 71, 20, 13, 0, 81, 18, 107, 15, 5, 51, 18, 13,
+ 0, 0, 0, 0, 0, 0, 0, 0, 13, 5, 0, 8, 10, 0, 0, 18, 56, 84, 53,
+ 51, 38, 5, 13, 86, 38, 64, 114, 76, 94, 104, 30, 5, 3, 0, 0,
+ 13, 5, 0, 0, 0, 0, 0, 3, 8, 0, 38, 127, 0, 0, 0, 0, 0, 0, 3,
+ 13, 10, 38, 216, 183, 117, 58, 38, 46, 394, 58, 13, 137, 8,
+ 267, 86, 41, 38, 61, 13, 66, 15, 0, 0, 5, 25, 5, 64, 10, 53,
+ 10, 64, 38, 0, 13, 0, 10, 69, 3, 8, 3, 5, 5, 0, 0, 0, 0, 0, 0,
+ 0, 0, 25, 51, 0, 0, 0, 8, 0, 0, 0, 46, 43, 30, 28, 10, 3, 0, 0,
+ 155, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 0, 142, 23, 5, 15, 15,
+ 127, 20, 97, 0, 0, 91, 10, 18, 38, 18, 5, 0, 0, 18, 8, 3, 43,
+ 23, 137, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107, 71, 102,
+ 25, 25, 127, 43, 0, 0, 3, 13, 30, 3, 15, 198, 5, 183, 8, 0, 0,
+ 0, 8, 0, 25, 10, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0,
+ 0, 0, 13, 0, 18, 13, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 51, 25, 91, 0, 38, 79, 0, 0, 0, 124, 5, 33,
+ 3, 33, 5, 0, 5, 0, 0, 0, 0, 20, 0, 33, 38, 173, 0, 25, 165, 3,
+ 0, 18, 25, 41, 15, 0, 74, 15, 94, 0, 0, 28, 38, 13, 0, 152, 0,
+ 8, 84, 25, 0, 0, 0, 0, 0, 5, 15, 102, 38, 20, 0, 0, 8, 64, 119,
+ 8, 0, 13, 25, 51, 0, 0, 0, 0, 0, 0, 0, 0, 33, 43, 64, 51, 457,
+ 102, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 71, 76, 0, 66, 3, 102, 64, 191, 102, 79, 0, 208, 0,
+ 5, 10, 3, 10, 23, 0, 0, 0, 61, 8, 249, 107, 18, 0, 137, 3, 51,
+ 25, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 51, 51, 130,
+ 13, 38, 20, 33, 0, 20, 30, 76, 13, 0, 0, 0, 0, 0, 23, 13, 0,
+ 18, 38, 51)
diff --git a/data/usagrain.txt b/data/usagrain.txt
new file mode 100644
index 0000000..5f7e1a8
--- /dev/null
+++ b/data/usagrain.txt
@@ -0,0 +1,143 @@
+ wheat.flour corn wheat rye
+ 5.58 1.12 1.62 1.075
+ 5.46 1.15 1.58 1.064
+ 5.50 1.18 1.56 1.158
+ 5.53 1.09 1.39 1.073
+ 5.63 1.15 1.28 1.125
+ 5.65 1.13 1.28 1.070
+ 5.70 1.18 1.36 1.298
+ 5.68 1.15 1.46 1.168
+ 5.75 1.11 1.45 1.205
+ 5.66 1.11 1.47 1.298
+ 5.70 1.14 1.51 1.320
+ 5.75 1.12 1.53 1.295
+ 5.75 1.09 1.52 1.298
+ 5.78 1.11 1.52 1.293
+ 5.72 1.11 1.54 1.253
+ 5.80 1.12 1.59 1.228
+ 5.95 1.16 1.65 1.230
+ 6.09 1.15 1.63 1.278
+ 6.25 1.12 1.62 1.125
+ 6.37 1.14 1.57 1.145
+ 6.45 1.13 1.59 1.120
+ 6.44 1.12 1.55 1.148
+ 6.46 1.10 1.59 1.164
+ 6.39 1.17 1.58 1.210
+ 6.03 1.21 1.60 1.258
+ 6.05 1.21 1.63 1.228
+ 6.02 1.20 1.64 1.195
+ 6.15 1.21 1.68 1.233
+ 6.09 1.23 1.63 1.196
+ 6.05 1.30 1.38 1.220
+ 5.70 1.34 1.30 1.205
+ 5.28 1.34 1.30 1.185
+ 5.76 1.36 1.47 1.355
+ 5.98 1.22 1.64 1.433
+ 6.41 1.18 1.69 1.420
+ 6.83 1.23 1.69 1.423
+ 6.45 1.26 1.78 1.463
+ 6.90 1.23 1.75 1.376
+ 6.64 1.24 1.48 1.303
+ 6.68 1.26 1.66 1.280
+ 6.70 1.29 1.54 1.263
+ 6.53 1.26 1.43 1.265
+ 7.09 1.23 1.46 1.245
+ 6.70 1.25 1.44 1.210
+ 6.73 1.30 1.52 1.280
+ 6.84 1.25 1.50 1.236
+ 6.90 1.19 1.56 1.233
+ 6.90 1.28 1.59 1.185
+ 6.68 1.29 1.57 1.184
+ 6.70 1.31 1.58 1.160
+ 6.70 1.34 1.56 1.190
+ 6.70 1.35 1.56 1.170
+ 6.70 1.37 1.46 1.173
+ 6.90 1.36 1.45 1.100
+ 7.07 1.33 1.43 1.001
+ 7.10 1.29 1.53 1.088
+ 7.10 1.31 1.54 1.111
+ 7.20 1.27 1.53 1.105
+ 7.28 1.17 1.66 1.125
+ 7.21 1.24 1.74 1.180
+ 7.22 1.32 1.71 1.243
+ 7.22 1.32 1.75 1.225
+ 7.22 1.26 1.67 1.155
+ 7.19 1.30 1.65 1.268
+ 7.20 1.32 1.67 1.153
+ 7.63 1.31 1.77 1.130
+ 8.07 1.42 1.90 1.280
+ 8.09 1.45 1.89 1.248
+ 7.92 1.46 1.92 1.243
+ 7.72 1.40 1.69 1.158
+ 7.73 1.35 1.75 1.160
+ 7.62 1.44 1.83 1.218
+ 7.35 1.44 1.80 1.203
+ 7.48 1.39 1.68 1.163
+ 7.55 1.42 1.83 1.208
+ 7.27 1.38 1.80 1.188
+ 7.43 1.39 1.67 1.180
+ 7.34 1.38 1.57 1.160
+ 7.40 1.33 1.48 1.191
+ 7.29 1.21 1.42 1.099
+ 7.27 1.20 1.45 1.173
+ 7.24 1.20 1.49 1.140
+ 7.18 1.08 1.44 1.118
+ 7.21 1.18 1.50 1.148
+ 7.18 1.15 1.52 1.154
+ 7.04 1.17 1.56 1.190
+ 6.94 1.14 1.51 1.179
+ 6.76 1.16 1.46 1.145
+ 6.78 1.21 1.47 1.130
+ 6.77 1.16 1.26 1.073
+ 6.73 1.15 1.29 1.125
+ 6.72 1.08 1.19 1.046
+ 6.85 1.08 1.18 1.119
+ 6.89 1.10 1.28 1.145
+ 6.90 1.17 1.36 1.169
+ 6.85 1.15 1.34 1.168
+ 6.85 1.19 1.43 1.205
+ 6.83 1.17 1.40 1.201
+ 6.76 1.17 1.35 1.215
+ 6.75 1.22 1.36 1.215
+ 6.75 1.31 1.40 1.240
+ 6.75 1.32 1.33 1.205
+ 6.92 1.30 1.30 1.185
+ 6.86 1.29 1.28 1.035
+ 6.78 1.27 1.33 1.055
+ 6.89 1.27 1.33 1.105
+ 6.95 1.16 1.42 1.115
+ 7.14 1.18 1.48 1.135
+ 7.14 1.26 1.50 1.135
+ 7.17 1.26 1.52 1.135
+ 7.19 1.25 1.54 1.145
+ 7.19 1.29 1.54 1.165
+ 7.19 1.32 1.55 1.165
+ 7.20 1.36 1.40 1.215
+ 7.16 1.39 1.43 1.085
+ 7.21 1.37 1.45 1.065
+ 7.20 1.45 1.68 1.085
+ 7.35 1.44 1.67 1.085
+ 7.43 1.42 1.74 1.145
+ 7.50 1.33 1.63 1.165
+ 7.45 1.39 1.72 1.155
+ 7.45 1.40 1.74 1.165
+ 7.45 1.36 1.68 1.135
+ 7.51 1.33 1.66 1.145
+ 7.58 1.31 1.66 1.185
+ 7.55 1.38 1.63 1.165
+ 7.52 1.30 1.44 1.035
+ 7.48 1.29 1.31 0.875
+ 7.48 1.17 1.33 0.945
+ 7.48 1.12 1.39 0.965
+ 7.48 1.09 1.49 0.895
+ 7.48 1.26 1.60 0.980
+ 7.28 1.23 1.58 1.030
+ 7.15 1.21 1.52 1.050
+ 7.15 1.23 1.58 1.030
+ 7.15 1.25 1.68 1.040
+ 7.07 1.28 1.67 1.060
+ 7.07 1.28 1.36 0.990
+ 7.07 1.30 1.47 0.980
+ 7.66 1.29 1.60 0.960
+ 8.39 1.39 1.86 0.990
+ 8.57 1.31 1.92 1.01
diff --git a/data/venice.R b/data/venice.R
new file mode 100644
index 0000000..63a2e25
--- /dev/null
+++ b/data/venice.R
@@ -0,0 +1,50 @@
+"venice" <-
+structure(list(year = c(1931, 1932, 1933, 1934, 1935, 1936, 1937,
+1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948,
+1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959,
+1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970,
+1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981
+), r1 = c(103, 78, 121, 116, 115, 147, 119, 114, 89, 102, 99,
+91, 97, 106, 105, 136, 126, 132, 104, 117, 151, 116, 107, 112,
+97, 95, 119, 124, 118, 145, 122, 114, 118, 107, 110, 194, 138,
+144, 138, 123, 122, 120, 114, 96, 125, 124, 120, 132, 166, 134,
+138), r2 = c(99, 78, 113, 113, 107, 106, 107, 97, 86, 101, 98,
+91, 88, 95, 102, 104, 108, 126, 102, 96, 117, 104, 102, 100,
+96, 91, 107, 114, 117, 126, 108, 110, 116, 104, 108, 127, 118,
+132, 120, 122, 116, 118, 111, 95, 110, 122, 102, 114, 140, 114,
+136), r3 = c(98, 74, 106, 91, 105, 93, 107, 85, 82, 98, 96, 87,
+82, 94, 98, 103, 101, 119, 102, 91, 114, 103, 98, 95, 96, 90,
+100, 113, 108, 123, 104, 108, 114, 104, 106, 126, 118, 123, 116,
+119, 116, 113, 99, 95, 109, 114, 100, 110, 131, 111, 130), r4 = c(96,
+73, 105, 91, 101, 90, 106, 83, 81, 97, 95, 83, 79, 90, 88, 101,
+99, 107, 101, 89, 109, 98, 98, 94, 95, 85, 98, 110, 107, 116,
+100, 107, 112, 103, 102, 104, 107, 114, 114, 110, 109, 111, 98,
+93, 103, 109, 98, 107, 130, 109, 128), r5 = c(94, 73, 102, 91,
+93, 87, 105, 82, 80, 96, 94, 83, 78, 89, 86, 100, 98, 101, 93,
+88, 106, 91, 92, 94, 94, 85, 98, 108, 105, 114, 100, 106, 110,
+102, 101, 103, 100, 112, 108, 105, 104, 96, 97, 92, 102, 108,
+96, 105, 122, 107, 119), r6 = c(89, 72, 89, 89, 91, 87, 102,
+81, 80, 94, 94, 81, 78, 84, 84, 91, 98, 98, 92, 86, 104, 91,
+89, 90, 92, 84, 97, 104, 102, 110, 95, 104, 109, 98, 101, 102,
+96, 110, 106, 99, 101, 92, 97, 90, 101, 108, 96, 102, 118, 106,
+110), r7 = c(86, 71, 89, 88, NA, 87, 98, 79, 78, 94, 89, 78,
+76, 84, 84, 88, 96, 92, 90, 86, 100, 90, 89, 86, 90, 82, 93,
+104, 96, 108, 94, 99, 104, 92, 100, 102, 95, 108, 104, 99, 100,
+91, 96, 90, 101, 104, 95, 100, 116, 104, 107), r8 = c(85, 70,
+88, 88, NA, 84, 95, 76, 78, 91, 88, 75, 76, 83, 80, 84, 96, 86,
+88, 86, 99, 90, 89, 82, 90, 82, 92, 102, 96, 108, 93, 99, 103,
+90, 98, 99, 93, 107, 103, 97, 100, 91, 92, 89, 97, 104, 94, 100,
+115, 103, 104), r9 = c(84, 70, 86, 86, NA, 82, 94, 74, 77, 90,
+87, 75, 74, 82, 80, 83, 94, 85, 88, 86, 99, 87, 84, 81, 90, 82,
+91, 100, 94, 107, 92, 98, 103, 90, 98, 98, 92, 107, 103, 96,
+99, 90, 90, 88, 90, 102, 91, 100, 115, 102, 104), r10 = c(79,
+69, 85, 81, NA, 81, 94, 69, 77, 89, 86, 73, 71, 80, 80, 82, 93,
+83, 86, 85, 99, 86, 84, 81, 88, 80, 90, 99, 94, 106, 91, 95,
+102, 90, 98, 97, 92, 106, 103, 96, 98, 89, 90, 88, 89, 100, 90,
+99, 112, 99, 104)), .Names = c("year", "r1", "r2", "r3", "r4",
+"r5", "r6", "r7", "r8", "r9", "r10"), class = "data.frame", row.names = c("1",
+"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
+"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
+"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
+"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46",
+"47", "48", "49", "50", "51"))
diff --git a/data/waitakere.txt b/data/waitakere.txt
new file mode 100644
index 0000000..a15d254
--- /dev/null
+++ b/data/waitakere.txt
@@ -0,0 +1,580 @@
+agaaus beitaw corlae cyadea cyamed daccup dacdac eladen hedarb hohpop kniexc kuneri lepsco metrob neslan rhosap vitluc altitude
+0 0 1 1 1 0 1 0 1 0 0 0 0 0 0 1 1 260
+0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 200
+1 0 0 1 0 1 0 1 1 1 1 0 0 1 0 0 0 320
+0 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 0 320
+0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 260
+0 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 200
+0 1 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 160
+0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 80
+1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 0 1 1 1 0 0 0 0 1 0 1 1 0 0 1 0 60
+0 0 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 320
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1 220
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 1 1 0 0 0 1 0 0 1 0 0 0 1 0 40
+0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 20
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 200
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 200
+0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 430
+0 0 0 1 0 1 0 1 0 0 0 0 1 0 0 0 0 410
+0 1 0 1 0 1 0 1 0 0 1 0 0 1 0 1 0 400
+0 0 0 0 1 1 1 1 0 0 1 0 0 0 0 0 0 410
+0 0 0 0 1 1 1 0 1 0 1 0 0 0 0 0 0 459
+1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 420
+0 1 0 1 1 0 0 1 1 0 1 0 0 0 0 1 0 410
+0 1 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 400
+0 1 0 1 0 0 0 1 0 1 1 1 0 0 0 0 0 428
+0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 0 0 400
+1 0 0 1 0 1 0 0 0 0 1 1 0 0 0 1 0 290
+0 0 0 1 1 1 0 1 1 0 1 0 0 1 0 1 0 300
+0 1 0 0 1 1 0 1 1 0 1 1 0 0 0 0 0 310
+0 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 0 280
+0 1 0 0 1 1 0 1 0 0 1 0 0 0 0 1 0 260
+0 0 0 1 0 1 0 1 1 0 1 0 0 1 1 1 0 290
+0 1 0 0 1 1 0 0 1 0 1 0 0 1 0 1 0 340
+1 1 0 1 1 0 0 1 1 0 1 1 1 0 0 1 0 310
+1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 0 280
+0 1 0 1 0 1 0 0 0 0 1 0 0 0 0 1 0 300
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 310
+0 0 0 0 0 1 0 1 0 1 1 0 0 1 0 1 0 390
+0 1 0 1 1 1 0 0 0 0 1 0 0 1 0 1 0 370
+1 1 0 0 0 1 0 0 1 0 1 1 0 1 0 0 0 360
+0 1 0 0 0 1 0 1 0 0 0 0 0 1 1 0 0 310
+0 0 0 1 0 0 0 0 1 0 1 1 0 0 1 1 0 270
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 160
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 170
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 300
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 360
+0 0 0 0 0 1 0 0 1 0 1 1 1 0 0 0 0 360
+1 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 280
+0 0 0 0 0 1 0 0 1 0 1 1 0 0 1 1 0 280
+0 0 0 0 1 0 0 0 1 1 1 0 1 0 1 1 0 180
+1 0 0 0 0 0 0 1 0 1 1 1 0 0 0 0 0 150
+1 1 1 0 1 1 0 0 1 0 1 0 0 0 0 1 1 140
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 1 140
+0 1 1 1 1 0 0 0 1 1 1 0 0 1 0 1 1 160
+0 0 0 1 0 0 0 0 1 0 1 0 1 0 0 1 0 260
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 240
+0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 300
+1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 300
+0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 0 280
+0 0 0 1 1 0 0 0 1 1 1 1 0 0 0 1 0 260
+1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 220
+1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 270
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 210
+1 1 0 1 1 0 0 0 1 1 0 0 0 0 1 1 0 240
+0 1 1 0 0 0 0 0 0 0 1 1 0 1 0 0 1 200
+0 1 0 0 1 0 0 1 0 1 1 0 1 0 0 1 0 100
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 120
+0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 220
+0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 240
+0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 1 0 260
+1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 180
+0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 0 1 70
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 150
+1 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 90
+0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 160
+1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 1 0 200
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 230
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 200
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 140
+0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 190
+0 1 0 1 1 0 0 0 1 1 1 0 0 0 0 1 0 270
+1 1 1 1 1 0 0 0 1 0 0 0 0 1 0 1 1 150
+1 0 0 1 0 1 0 0 0 0 1 1 0 0 0 0 0 200
+0 1 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 100
+0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 0 1 100
+0 1 0 1 1 0 1 0 0 0 1 1 0 0 1 1 1 130
+1 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 0 200
+0 0 0 1 0 1 0 1 1 1 0 0 0 0 0 1 1 250
+0 1 1 1 1 0 0 0 1 0 1 0 0 0 0 1 1 120
+0 1 0 1 1 0 1 0 1 1 1 0 0 1 0 1 1 180
+0 1 1 1 1 0 0 0 1 0 0 0 0 0 0 1 1 80
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 1 0 370
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 240
+1 1 1 1 0 0 0 0 0 0 1 1 0 1 0 1 1 180
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 120
+1 1 0 1 0 1 0 1 0 0 1 1 0 0 1 0 1 125
+1 1 1 0 1 0 0 0 0 0 1 0 0 0 0 1 0 80
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 90
+0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 90
+0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 0 90
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 120
+0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 1 1 20
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 90
+0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 0 100
+1 1 0 1 0 1 0 0 1 0 1 1 0 0 0 1 0 120
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 180
+1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 150
+0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 1 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 160
+1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 60
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 150
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 180
+1 0 0 1 1 1 0 0 0 1 1 1 1 0 0 1 0 260
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 225
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 290
+1 1 1 1 0 0 0 0 0 1 1 1 0 0 0 1 0 80
+1 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 180
+0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 1 0 60
+0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 1 1 30
+0 1 0 0 0 1 1 0 0 0 1 0 0 1 0 1 0 290
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 290
+0 0 1 1 1 0 0 0 1 1 1 0 0 1 0 1 1 140
+1 0 0 0 1 1 0 0 0 0 1 1 1 0 0 1 0 60
+1 1 1 1 1 0 0 0 0 1 1 0 0 0 0 1 1 60
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 70
+1 0 1 0 1 1 0 0 1 0 1 1 0 0 0 1 1 90
+0 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 1 40
+0 0 1 1 1 0 0 0 1 1 1 1 0 0 0 1 1 50
+1 0 1 0 1 0 0 0 0 1 1 1 0 0 0 1 1 85
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 140
+0 1 1 0 1 0 0 0 0 0 1 0 0 0 0 1 1 140
+1 1 1 0 1 0 0 0 0 1 1 0 0 0 0 1 1 220
+1 0 0 0 1 0 0 0 1 1 0 1 0 0 1 1 0 140
+0 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 0 250
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0 160
+1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 310
+0 1 1 1 1 0 0 0 1 0 1 0 0 1 0 1 1 180
+1 0 0 0 1 1 0 1 0 0 1 1 1 0 0 0 0 300
+0 0 0 1 1 1 0 1 1 1 1 0 1 0 0 1 0 300
+0 0 0 1 1 1 0 0 0 0 1 1 1 0 0 0 0 260
+1 0 0 1 1 0 0 1 1 0 1 0 0 1 0 1 0 150
+0 0 0 1 1 1 0 1 0 0 1 0 0 1 1 1 0 260
+0 1 1 0 1 0 0 1 1 1 1 0 0 1 0 1 1 200
+0 1 1 1 1 1 0 1 1 0 1 0 0 0 1 1 1 280
+0 1 1 1 1 0 0 1 0 0 1 1 0 0 0 1 1 250
+0 1 1 1 1 0 0 0 1 0 1 0 0 1 1 1 1 250
+1 1 0 1 1 0 0 1 1 1 1 0 0 1 1 0 0 230
+0 1 0 1 1 0 0 1 1 1 1 0 0 0 0 1 0 320
+1 1 0 1 1 1 0 1 1 1 1 0 0 0 1 0 0 200
+0 1 1 1 1 0 0 1 0 1 1 0 0 1 0 1 0 200
+0 1 0 1 1 0 0 1 1 0 1 0 0 0 0 1 0 250
+0 0 1 1 1 0 0 1 0 1 1 0 0 0 1 1 1 240
+1 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 200
+0 0 0 1 1 1 0 1 1 0 1 0 0 0 1 1 0 300
+0 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 1 140
+0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 1 1 90
+0 0 1 1 0 0 0 1 0 0 1 0 0 0 0 1 1 60
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 60
+0 0 0 1 1 1 1 0 0 0 1 0 0 0 0 1 1 260
+0 0 1 1 1 0 0 0 0 1 1 0 1 0 0 1 1 180
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 260
+0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 260
+1 0 0 1 1 1 0 0 0 0 1 1 1 0 0 0 0 140
+1 1 1 1 1 0 0 0 0 0 1 1 0 0 0 1 1 20
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 1 1 100
+0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 100
+0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 0 1 80
+0 1 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 80
+0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 80
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 120
+0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 200
+0 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 250
+1 0 1 1 0 0 0 0 0 0 1 0 0 0 0 1 1 110
+0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0 60
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 115
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 190
+1 0 0 0 0 1 0 0 0 0 1 1 1 0 1 0 0 120
+1 0 0 0 1 1 0 0 0 1 1 1 1 0 1 0 0 140
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 190
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 130
+0 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 1 190
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 40
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
+0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 200
+0 0 0 1 1 1 1 0 0 1 0 1 1 0 0 1 0 160
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 240
+0 1 1 1 1 1 0 0 1 1 1 0 0 0 1 1 1 200
+1 0 0 1 1 1 0 0 1 0 1 1 0 0 0 1 1 200
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 220
+0 0 0 1 1 0 0 1 1 0 1 1 0 0 1 1 0 260
+0 1 1 1 1 0 0 1 1 1 1 0 0 0 0 1 0 160
+0 1 0 1 1 1 0 1 0 1 1 1 1 0 1 0 0 290
+0 1 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 140
+0 0 0 1 1 1 1 0 1 1 1 1 0 0 0 1 1 300
+0 0 0 0 1 1 0 0 1 0 1 1 0 0 0 0 0 320
+0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 270
+0 1 0 1 1 0 0 1 1 1 1 1 0 0 1 0 0 260
+0 0 1 1 1 0 0 1 1 1 1 0 0 0 1 1 0 240
+0 0 0 0 1 1 1 0 0 0 0 1 0 0 1 0 0 310
+0 0 0 0 0 1 1 0 0 1 0 1 0 0 0 0 0 300
+0 0 0 0 0 1 1 1 0 0 0 0 1 0 1 0 0 300
+0 1 0 1 1 1 0 1 1 0 0 0 0 0 1 1 0 340
+0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 320
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 330
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 300
+0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 1 0 370
+0 1 0 0 0 1 0 1 0 0 1 0 0 0 0 0 0 320
+0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 1 0 390
+0 1 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 340
+0 1 0 0 0 1 1 1 0 0 1 0 0 1 1 0 0 305
+1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 220
+1 0 0 0 1 1 1 1 0 0 1 1 0 0 0 1 0 400
+0 0 0 0 0 1 1 0 0 0 1 0 0 1 0 0 0 230
+0 0 0 1 0 1 0 0 1 1 1 0 0 0 0 1 0 300
+0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 340
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 50
+0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 1 1 10
+0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 250
+0 0 0 1 0 1 1 0 0 0 1 0 0 0 0 0 0 218
+0 0 0 0 1 0 0 0 1 0 1 1 1 0 1 0 0 245
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 100
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 200
+0 0 0 1 1 0 0 0 0 0 1 1 1 0 0 0 0 160
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 200
+0 0 0 1 1 1 0 0 1 0 1 0 0 1 1 1 0 250
+0 1 0 1 0 0 0 1 1 1 1 1 0 0 0 1 0 290
+1 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1 0 280
+0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 1 1 80
+0 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 270
+0 0 0 1 1 0 0 0 1 0 0 1 0 0 0 0 0 260
+0 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 110
+0 1 0 1 1 0 1 1 0 1 1 0 0 0 1 1 0 200
+0 1 1 1 1 0 0 0 1 0 1 0 0 0 0 1 1 120
+0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 1 1 160
+0 1 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 320
+0 0 0 1 1 1 0 1 1 0 0 0 0 0 1 1 0 300
+0 0 0 0 1 1 0 1 0 0 1 0 0 1 0 0 0 370
+0 1 0 1 1 1 0 0 1 0 1 0 0 0 1 1 0 340
+0 1 0 1 0 1 0 1 0 0 1 1 0 1 1 0 0 280
+0 1 0 1 1 0 0 0 0 1 1 0 0 0 0 1 0 200
+0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 190
+0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1 0 200
+0 1 0 1 1 1 0 0 0 1 1 0 0 0 0 1 0 180
+0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 100
+1 0 0 0 1 1 0 0 0 0 1 1 1 0 0 0 0 120
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 140
+0 1 0 1 1 1 0 0 0 0 1 0 0 1 1 0 0 110
+0 0 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 200
+1 1 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 220
+0 1 0 0 0 1 1 0 0 0 1 0 0 0 0 1 0 110
+0 1 0 0 0 1 1 1 0 0 1 0 0 0 1 1 0 100
+0 0 0 0 1 1 1 0 0 0 1 0 0 1 0 1 0 0
+1 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 0 100
+0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 1 0 180
+0 1 1 0 0 1 1 0 0 0 1 0 0 0 0 1 0 180
+0 1 0 1 1 1 0 0 0 0 1 0 0 1 0 1 0 320
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 360
+0 1 1 1 1 0 0 0 0 0 1 0 0 0 0 1 1 150
+0 1 0 0 1 0 1 0 1 0 1 1 0 1 0 1 0 140
+0 0 0 0 0 1 0 1 0 0 1 1 0 0 0 0 0 325
+0 1 1 0 1 1 0 0 1 0 1 0 0 0 0 1 0 240
+0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 1 0 150
+0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 1 1 160
+0 1 0 1 1 1 1 0 1 0 1 0 0 0 1 1 0 140
+1 0 0 0 1 0 0 0 0 0 1 1 1 0 1 0 0 70
+0 0 0 1 1 1 1 0 1 0 1 0 0 1 0 1 0 160
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 110
+0 0 0 0 1 0 1 0 1 1 1 0 0 1 1 1 0 90
+0 0 0 0 1 1 1 0 1 0 1 0 0 1 0 1 0 200
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 70
+0 1 0 1 1 0 0 0 1 1 1 0 0 0 0 1 0 110
+1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 280
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 200
+0 1 1 1 1 0 0 0 1 1 1 0 0 0 0 1 1 220
+0 1 0 1 1 0 0 1 1 0 1 1 0 0 1 0 0 360
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 380
+0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 260
+0 0 0 0 0 1 0 0 0 0 1 0 0 1 0 1 0 390
+1 0 0 1 0 1 0 0 0 0 1 1 0 0 1 0 0 280
+1 0 1 1 1 1 0 0 1 0 1 0 0 1 1 1 1 160
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 320
+0 0 0 0 0 0 0 1 0 0 1 1 1 0 0 0 0 220
+1 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 0 300
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 345
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 260
+0 1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 0 180
+1 0 1 0 1 1 0 0 1 1 1 0 0 1 1 1 0 200
+0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 390
+0 1 0 1 1 1 1 0 1 1 1 0 0 1 0 1 0 180
+0 0 0 1 0 1 0 1 0 0 1 0 0 1 1 1 0 90
+0 0 0 1 1 1 0 1 1 0 1 0 0 1 1 1 0 270
+0 0 0 0 1 1 0 1 0 0 1 0 0 1 0 1 0 200
+0 0 0 1 1 1 1 1 1 0 1 0 0 1 1 1 0 340
+0 0 0 0 0 1 0 1 0 0 1 0 0 1 0 1 0 370
+0 0 0 1 1 1 1 1 1 0 1 0 0 1 1 1 0 260
+1 0 0 0 0 1 0 0 0 0 1 1 0 1 1 0 0 240
+0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 1 0 240
+0 1 0 0 1 1 0 0 1 1 0 0 0 1 1 1 0 210
+0 0 0 0 1 1 0 1 0 0 1 1 1 0 0 0 0 300
+0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 0 0 360
+0 1 0 0 1 1 0 1 0 0 1 1 0 1 0 0 0 340
+0 0 0 0 1 1 0 1 0 0 1 0 0 1 0 1 0 200
+1 0 0 0 0 0 1 1 0 0 0 1 0 1 0 0 0 200
+0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 0 0 300
+0 0 0 0 1 0 0 0 1 0 1 0 0 1 0 1 0 270
+0 0 0 1 1 1 0 0 1 0 1 0 0 0 0 1 0 280
+0 0 0 1 0 0 0 0 1 0 1 0 0 1 1 0 0 200
+0 1 0 1 1 1 0 0 1 1 1 0 0 0 1 1 1 170
+0 1 0 1 1 1 1 1 1 1 1 0 0 1 1 1 0 160
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 190
+0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 190
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 140
+1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 0 140
+0 0 1 1 1 1 0 1 1 1 1 0 0 0 0 1 0 150
+0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 170
+1 0 0 0 0 1 0 0 0 0 0 1 0 1 1 0 0 360
+0 0 0 0 0 1 0 1 0 0 0 0 1 0 0 0 0 320
+0 0 0 0 0 1 1 0 1 0 1 1 0 0 0 0 0 325
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 1 1 0 0 1 0 1 1 0 0 0 0 1 0 320
+0 1 0 1 0 1 0 1 0 0 1 0 0 1 1 0 0 320
+0 0 0 0 0 1 0 1 1 1 1 0 0 1 0 1 0 340
+0 1 0 0 1 1 0 1 0 0 0 0 0 1 1 1 0 280
+0 0 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 260
+1 0 0 1 1 1 0 0 0 0 1 0 0 1 1 0 0 200
+0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 200
+0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 140
+0 1 0 1 1 1 0 0 0 1 0 0 0 1 0 1 0 240
+0 0 0 1 1 1 0 1 1 1 1 0 0 0 1 1 0 170
+0 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 0 190
+0 0 0 0 1 0 0 1 1 1 0 0 0 0 0 1 0 200
+0 1 0 0 1 0 0 0 0 1 1 0 0 0 1 1 0 150
+0 0 0 0 0 1 0 1 1 1 1 0 0 1 0 1 0 340
+0 1 0 0 1 0 0 0 0 1 1 1 0 0 0 0 1 220
+0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 1 0 180
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 240
+0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 160
+0 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1 0 230
+0 1 0 1 1 1 0 1 1 1 1 0 0 0 0 1 0 240
+0 0 0 1 1 1 0 1 1 1 1 0 0 0 1 1 0 220
+0 0 0 1 0 1 0 1 1 1 1 0 0 0 0 1 0 210
+1 0 0 1 1 1 1 0 1 0 1 1 0 0 0 1 1 220
+0 0 0 1 0 1 0 0 1 1 1 0 0 1 0 1 1 210
+0 0 0 1 1 1 0 1 1 1 1 0 0 0 0 1 1 210
+0 0 0 1 0 0 1 0 1 0 1 0 0 0 0 1 0 170
+1 0 0 1 0 1 1 1 1 0 1 1 0 1 0 1 0 280
+0 1 1 0 1 0 0 0 1 1 1 0 0 0 0 1 1 200
+0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 300
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 310
+0 1 0 1 1 0 0 0 1 1 1 0 0 0 0 1 0 120
+0 1 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 290
+0 0 0 1 0 1 1 0 1 0 1 0 0 1 0 1 0 120
+1 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 120
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 100
+0 0 0 1 1 1 0 0 1 1 1 0 0 1 0 1 0 140
+0 0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 1 100
+1 0 0 0 0 1 0 0 0 0 1 1 0 1 0 0 0 140
+0 1 0 1 1 0 1 1 0 1 1 0 0 0 1 1 0 200
+1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 200
+0 1 0 1 1 1 1 0 0 0 1 0 0 1 0 1 0 80
+0 0 0 1 1 0 0 0 1 0 1 0 0 0 0 1 0 120
+0 1 0 1 1 1 0 1 0 1 0 0 0 0 0 1 0 200
+0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 120
+1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 200
+0 1 0 1 1 0 1 0 1 0 1 0 0 0 0 1 0 100
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 220
+0 0 0 0 1 1 1 0 0 0 1 0 0 1 0 1 0 200
+0 0 0 0 0 0 1 0 0 0 1 1 1 0 0 0 0 220
+0 1 0 1 1 0 0 0 0 1 1 0 0 0 1 1 0 210
+0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 0 35
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 50
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 140
+0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 1 0 150
+0 0 0 1 0 1 0 0 1 1 1 0 1 0 0 1 0 210
+0 1 0 1 1 0 0 1 1 1 1 0 0 0 0 1 0 160
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 180
+0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 240
+0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 1 0 220
+1 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 270
+0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 240
+0 0 0 1 1 1 1 1 0 1 1 1 0 0 0 0 0 240
+0 0 0 0 1 1 1 0 0 1 1 0 0 0 0 1 0 250
+0 1 1 0 1 0 0 0 0 1 0 0 0 0 0 1 0 280
+0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 300
+0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 1 1 220
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 90
+0 1 0 1 1 0 0 0 0 1 1 0 0 0 0 1 1 100
+1 0 0 1 1 1 0 0 0 0 0 1 0 0 0 0 0 140
+0 0 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 100
+0 1 0 1 1 0 0 0 1 1 1 0 0 0 0 1 0 140
+0 1 1 1 1 0 0 0 1 1 1 0 0 0 0 1 1 110
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 140
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 10
+0 1 0 1 0 0 0 0 0 0 1 1 0 0 0 1 1 60
+1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 0 30
+0 0 0 1 1 0 1 0 1 0 1 0 0 0 0 1 1 140
+0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 5
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 50
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 25
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 50
+0 1 1 1 1 0 0 0 0 0 1 1 0 1 1 1 1 100
+0 1 0 1 1 0 0 0 1 0 1 0 0 0 0 1 0 100
+0 1 1 0 1 1 0 0 0 0 1 1 0 1 0 1 1 70
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 200
+0 0 0 1 1 0 0 0 1 0 0 0 1 0 0 1 0 180
+0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 1 120
+0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 1 0 120
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 100
+0 0 1 0 0 0 0 0 1 0 1 1 0 0 0 1 1 120
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 1 130
+0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 30
+1 1 0 0 0 0 1 0 0 1 0 1 0 0 0 0 1 70
+0 0 1 0 0 0 0 1 1 0 1 1 0 0 0 0 0 5
+0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 0 0 25
+0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 0 20
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 10
+0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 1 0 45
+0 0 1 0 0 0 0 0 0 1 1 1 0 1 1 1 1 180
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 200
+1 0 0 0 0 1 0 0 0 0 1 1 0 1 0 0 1 200
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 60
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 140
+0 0 0 0 1 0 0 0 0 1 0 0 1 0 0 1 0 170
+0 0 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 200
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 150
+0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 180
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 40
+0 0 1 0 0 0 0 0 0 0 0 1 1 0 0 1 1 40
+0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 1 30
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 90
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 200
+0 1 1 1 0 0 0 0 0 0 1 1 0 0 0 1 1 140
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 200
+0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1 0 60
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 20
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 100
+0 0 0 0 1 0 0 0 0 0 0 1 1 0 0 1 0 200
+0 0 0 1 1 0 0 0 0 0 1 0 1 0 0 0 0 180
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 50
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 100
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45
+0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 100
+0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 1 0 80
+0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 1 0 70
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 60
+1 0 0 1 0 0 0 0 1 1 1 1 0 0 1 1 1 130
+0 0 1 1 0 0 0 0 0 0 1 1 0 0 1 1 1 100
+0 0 1 0 0 0 0 0 0 0 1 1 0 0 0 1 1 85
+0 1 1 1 1 0 0 1 1 0 1 1 0 0 0 1 1 30
+1 1 1 1 1 0 1 0 0 0 1 1 0 0 0 1 1 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 40
+0 0 1 0 1 0 0 0 0 0 0 1 1 0 0 1 1 80
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 100
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 110
+1 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0 0 200
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 270
+1 0 0 0 1 1 1 0 0 1 1 1 1 0 0 0 0 240
+1 0 0 1 1 1 1 0 1 1 1 1 0 0 1 1 0 210
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 200
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 140
+1 0 0 1 1 1 0 0 1 0 1 1 0 1 0 1 1 220
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 240
+0 0 0 1 1 1 1 0 1 1 1 1 0 0 0 1 1 240
+1 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 1 200
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 220
+0 1 0 1 1 1 1 0 1 1 1 0 0 0 1 1 1 160
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 50
+0 1 1 1 0 0 1 0 0 0 0 1 0 0 0 1 1 80
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 90
+1 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 90
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 90
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 90
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 60
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 40
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 25
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 130
+0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 0 180
+0 0 1 0 0 0 0 0 0 0 1 1 0 0 0 1 1 25
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 50
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 120
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 130
+0 0 1 1 1 0 0 0 0 0 1 1 0 0 1 1 1 100
+1 0 1 0 1 0 0 1 1 0 1 1 0 0 0 0 1 120
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 100
+0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 60
+1 0 1 1 1 0 0 1 0 1 1 0 0 0 1 1 0 60
+0 1 1 1 1 0 0 0 1 0 1 0 0 0 0 1 1 70
+0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 10
+0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 50
+0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 100
+0 0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 150
+1 1 0 0 1 1 1 0 0 0 1 1 0 0 0 1 0 150
+0 1 0 1 1 0 0 0 1 1 1 0 0 0 1 1 1 120
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 100
+0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 110
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 140
+0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 1 0 120
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 70
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 60
+0 1 1 0 0 0 1 0 1 0 1 1 0 0 0 1 1 70
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 5
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 50
+0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 70
+0 1 1 0 1 0 1 1 1 0 1 0 0 0 0 1 1 90
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 80
+1 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 10
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 140
+1 1 1 0 1 0 0 0 0 0 1 0 0 0 0 1 1 90
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 70
+1 0 0 0 1 1 0 0 1 0 1 1 0 0 0 1 1 90
+1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 130
+0 0 1 1 1 1 1 0 1 0 1 0 0 0 1 1 1 120
+0 1 0 0 1 0 0 0 1 0 1 0 0 0 0 1 1 100
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 40
+1 0 0 1 0 1 0 0 1 0 1 1 0 0 1 1 0 160
+0 1 1 1 1 0 0 0 1 0 1 0 0 0 0 1 0 120
+1 0 0 0 1 1 0 0 0 0 1 0 0 0 1 0 0 140
+0 0 0 0 1 1 1 0 0 0 1 0 0 0 1 1 0 180
+0 0 0 1 1 1 1 0 1 0 1 0 0 0 1 1 0 130
+1 0 0 0 1 1 0 0 0 0 0 1 0 0 1 0 0 100
+1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 120
+1 0 0 1 1 1 1 0 0 0 1 1 0 0 1 1 0 80
+0 0 1 1 1 0 1 0 1 0 1 0 0 0 0 1 1 140
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 150
+1 0 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 130
+1 1 1 1 1 0 0 0 0 0 1 0 0 0 0 1 1 60
+1 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 60
+0 0 1 1 1 0 0 0 0 0 1 0 0 0 0 1 1 80
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 40
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 20
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 60
+0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 280
+0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 0 0 80
+0 0 0 1 0 0 0 0 1 0 1 1 1 0 0 0 0 60
+0 0 0 1 1 0 1 0 1 1 0 1 0 0 0 1 1 45
+1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 40
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 60
+1 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 40
+0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 0 0 40
+0 1 0 0 0 0 1 0 0 0 1 1 0 0 0 1 1 60
+0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 1 20
+1 1 0 1 1 0 1 0 0 0 1 1 0 0 0 1 1 40
+1 0 0 1 0 1 0 0 0 0 1 1 0 0 0 1 0 40
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 50
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 15
+0 1 0 0 0 0 1 0 0 0 1 1 0 0 0 1 1 20
+1 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 50
+0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 60
+0 0 0 0 1 0 1 0 0 0 1 1 0 0 0 0 0 30
+0 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 50
+0 1 1 0 0 0 1 0 0 0 0 1 0 0 0 0 1 50
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20
+1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 80
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 15
+0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 50
+0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 70
+0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 40
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15
+0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 30
+1 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 1 10
+0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 10
diff --git a/demo/00Index b/demo/00Index
new file mode 100755
index 0000000..84beef3
--- /dev/null
+++ b/demo/00Index
@@ -0,0 +1,6 @@
+binom2.or Bivariate logistic model
+cqo Constrained auadratic ordination
+distributions Maximum likelihood estimation of some distributions
+lmsqreg LMS quantile regression
+vgam Vector generalized additive models
+zipoisson Zero inflated Poisson
diff --git a/demo/binom2.or.R b/demo/binom2.or.R
new file mode 100755
index 0000000..d68e1cd
--- /dev/null
+++ b/demo/binom2.or.R
@@ -0,0 +1,40 @@
+# Demo for binom2.or
+
+if(dev.cur() <= 1) get(getOption("device"))()
+
+opar <- par(ask = interactive() &&
+ (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+
+
+data(hunua)
+attach(hunua)
+y00 = (1-agaaus) * (1-kniexc)
+y01 = (1-agaaus) * kniexc
+y10 = agaaus * (1-kniexc)
+y11 = agaaus * kniexc
+detach(hunua)
+
+fit = vgam(cbind(y00,y01,y10,y11) ~ s(altitude, df=c(4,4,2.5)),
+ binom2.or(zero=NULL), data=hunua)
+par(mfrow=c(1,1))
+plot(fit, se=TRUE, scol="darkgreen", lcol="blue")
+summary(fit)
+
+
+# Plot the marginal functions together
+mycols = c("blue","red")
+plot(fit, which.cf=1:2, lcol=mycols, scol=mycols,
+ overlay=TRUE, se=TRUE, llwd=2, slwd=2)
+legend(x=100, y=-4, leg=c("Agathis australis", "Knightia excelsa"),
+ col=mycols, lty=1)
+
+
+# Plot the odds ratio
+o = order(fit at x[,2])
+plot(fit at x[o,2], exp(predict(fit)[o,"log(OR)"]),
+ log="y", xlab="Altitude (m)", ylab="Odds ratio (log scale)",
+ col="blue", type="b")
+abline(h=1, lty=2) # Denotes independence between species
+
+
+
diff --git a/demo/cqo.R b/demo/cqo.R
new file mode 100755
index 0000000..c719c58
--- /dev/null
+++ b/demo/cqo.R
@@ -0,0 +1,99 @@
+# Demo for canonical Gaussian ordination
+
+if(dev.cur() <= 1) get(getOption("device"))()
+
+opar <- par(ask = interactive() &&
+ (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # standardize environmental vars
+
+
+## Rank-1 model (unequal tolerances, deviance=1176.0)
+
+set.seed(123)
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ family = quasipoissonff, data = hspider,
+ Bestof=10, Crow1positive=FALSE, EqualTolerances=FALSE,
+ ITolerances=FALSE)
+
+lvplot(p1, lcol=1:12, llwd=2, llty=1:12, y=TRUE, pch=1:12, pcol=1:12,
+ las=1, main="Hunting spider data")
+
+print(ccoef(p1), digits=3)
+print(Coef(p1), digits=3)
+
+# trajectory plot
+trplot(p1, which=1:3, log="xy", type="b", lty=1,
+ col=c("blue","red","green"), lwd=2, label=TRUE) -> ii
+legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
+ lwd=2, lty=1, col=c("blue","red","green"))
+abline(a=0, b=1, lty="dashed")
+
+
+
+
+## Rank-2 model (equal tolerances, deviance=856.5)
+
+set.seed(111)
+r2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ family = quasipoissonff, data = hspider, Rank = 2,
+ Bestof=10, ITolerances = TRUE,
+ EqualTolerances = TRUE, Crow1positive = c(FALSE, FALSE))
+print(ccoef(r2), digits=3)
+print(Coef(r2), digits=3)
+
+clr = (1:(10+1))[-7] # Omit yellow colour
+adj = c(-0.1, -0.1, -0.1, 1.1, 1.1, 1.1, -0.1, -0.1, -0.1, 1.1)
+# With C arrows
+lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=FALSE, C=TRUE,
+ Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
+ las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+
+# With circular contours
+lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=TRUE, C=FALSE,
+ Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
+ las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+
+# With neither C arrows or circular contours
+lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=FALSE, C=FALSE,
+ Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
+ las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+
+# Perspective plot
+persp(r2, xlim=c(-5,5), ylim=c(-3,6), theta = 50, phi = 20)
+
+
+
+## Gaussian logit regression
+## Not recommended actually because the number of sites is far too low.
+## Deviance = 154.6, equal tolerances.
+
+attach(hspider)
+ybin = 0 + (cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) > 0) # Matrix of 0's and 1's
+detach(hspider)
+set.seed(1312)
+b1 = cqo(ybin[,-c(1,5)] ~ WaterCon + BareSand + FallTwig + CoveMoss +
+ CoveHerb + ReflLux, family = quasibinomialff(mv=TRUE),
+ Bestof=4, ITolerances=TRUE,
+ data = hspider, EqualTolerances=TRUE, Crow1positive=FALSE)
+lvplot(b1, type="predictors", llwd=2, las=1, ylab="logit mu",
+ ylim=c(-20,11), lcol=1:10)
+c1 = Coef(b1)
+cts = c("Trocterr", "Pardmont", "Alopfabr", "Arctlute")
+text(c1 at Optimum[1,cts], logit(c1 at Maximum[cts])+1.0, cts)
+
+round(t(Coef(b1, ITolerances=FALSE)@C), dig=3)
+
+# On the probability scale
+lvplot(b1, type="fitted", llwd=2, las=1, llty=1,
+ ylab="Probability of presence",
+ ylim=c(0,1), lcol=1:10)
+
+
diff --git a/demo/distributions.R b/demo/distributions.R
new file mode 100755
index 0000000..1dbfeb4
--- /dev/null
+++ b/demo/distributions.R
@@ -0,0 +1,30 @@
+# Demo for the maximum likelihood estimation of parameters from
+# some selected distributions
+# At the moment this is copied from some .Rd file
+
+## Negative binomial distribution
+## Data from Bliss and Fisher (1953).
+
+y = 0:7
+w = c(70, 38, 17, 10, 9, 3, 2, 1)
+fit = vglm(y ~ 1, negbinomial, weights=w)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+
+## Beta distribution
+
+set.seed(123)
+nn = 1000
+y = rbeta(nn, shape1=1, shape2=3)
+fit = vglm(y ~ 1, betaff(link="identity"), trace = TRUE, crit="c")
+fit = vglm(y ~ 1, betaff, trace = TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit) # Useful for intercept-only models
+
+Y = 5 + 8 * y # From 5 to 13, not 0 to 1
+fit = vglm(Y ~ 1, betaff(A=5, B=13), trace = TRUE)
+Coef(fit)
+fitted(fit)[1:4,]
+
diff --git a/demo/lmsqreg.R b/demo/lmsqreg.R
new file mode 100755
index 0000000..815ed3a
--- /dev/null
+++ b/demo/lmsqreg.R
@@ -0,0 +1,32 @@
+# Demo for lmsqreg
+# At the moment this is copied from lms.bcn.Rd
+
+if(dev.cur() <= 1) get(getOption("device"))()
+
+opar <- par(ask = interactive() &&
+ (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz, tr=TRUE)
+predict(fit)[1:3,]
+fitted(fit)[1:3,]
+bminz[1:3,]
+# Person 1 is near the lower quartile of BMI amongst people his age
+cdf(fit)[1:3]
+
+# Quantile plot
+par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
+qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
+ xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+
+# Density plot
+ygrid = seq(15, 43, len=100) # BMI ranges
+par(mfrow=c(1,1), lwd=2)
+a = deplot(fit, x0=20, y=ygrid,
+ main="Density functions at Age = 20, 42 and 55", xlab="BMI")
+a
+a = deplot(fit, x0=42, y=ygrid, add=TRUE, lty=2, col=2)
+a = deplot(fit, x0=55, y=ygrid, add=TRUE, lty=4, col=4, Attach=TRUE)
+a at post$deplot # Contains density function values
+
+
diff --git a/demo/vgam.R b/demo/vgam.R
new file mode 100755
index 0000000..6058bc8
--- /dev/null
+++ b/demo/vgam.R
@@ -0,0 +1,20 @@
+# Demo for vgam
+
+if(dev.cur() <= 1) get(getOption("device"))()
+
+opar <- par(ask = interactive() &&
+ (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+
+data(hunua)
+fit.h = vgam(agaaus ~ s(altitude), binomialff, hunua)
+plot(fit.h, se=TRUE, lcol="blue", scol="red", llwd=2, slwd=2, las=1)
+
+attach(hunua)
+n = nrow(hunua)
+o = order(altitude)
+plot(altitude[o], fitted(fit.h)[o], type="l", ylim=0:1,
+ lwd=2, col="blue", las=1)
+points(altitude, agaaus + (runif(n)-0.5)/30, col="red")
+detach(hunua)
+
+
diff --git a/demo/zipoisson.R b/demo/zipoisson.R
new file mode 100755
index 0000000..7b1f46c
--- /dev/null
+++ b/demo/zipoisson.R
@@ -0,0 +1,20 @@
+# Demo for Zero Inflated Poisson
+
+set.seed(111)
+n <- 1000
+phi <- 0.35 # Proportion that are zero by definition
+lambda <- 4 # Poisson parameter
+y <- ifelse(runif(n) < phi, 0, rpois(n, lambda))
+stem(y)
+
+fit <- vglm(y ~ 1, family=zipoisson, trace=TRUE, crit="c" )
+true.mean <- (1-phi)*lambda
+true.mean
+fitted(fit)[1:5,]
+fit at misc$prob0 # The estimate of P(Y=0)
+
+coef(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
new file mode 100644
index 0000000..9cd267f
--- /dev/null
+++ b/man/AA.Aa.aa.Rd
@@ -0,0 +1,64 @@
+\name{AA.Aa.aa}
+\alias{AA.Aa.aa}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The AA-Aa-aa Blood Group System }
+\description{
+ Estimates the parameter of the
+ AA-Aa-aa blood group system.
+}
+\usage{
+AA.Aa.aa(link = "logit", init.pA = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to \code{pA}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.pA}{ Optional initial value for \code{pA}. }
+}
+\details{
+ This one parameter model involves a probability called \code{pA}.
+ The probability of getting a count in the first column of the
+ input (an AA) is \code{pA*pA}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Weir, B. S. (1996)
+\emph{Genetic Data Analysis II: Methods for Discrete Population
+ Genetic Data},
+Sunderland, MA: Sinauer Associates, Inc.
+}
+\author{ T. W. Yee }
+\note{
+The input can be a 3-column matrix of counts, where the columns
+are AA, Ab and aa
+(in order).
+Alternatively, the input can be a 3-column matrix of
+proportions (so each row adds to 1) and the \code{weights}
+argument is used to specify the total number of counts for each row.
+}
+\seealso{
+\code{\link{AB.Ab.aB.ab}},
+\code{\link{AB.Ab.aB.ab2}},
+\code{\link{ABO}},
+\code{\link{G1G2G3}},
+\code{\link{MNSs}}.
+}
+\examples{
+y = cbind(53, 95, 38)
+fit = vglm(y ~ 1, AA.Aa.aa(link="logit"), trace=TRUE)
+fit = vglm(y ~ 1, AA.Aa.aa(link="probit"), trace=TRUE)
+fit = vglm(y ~ 1, AA.Aa.aa(link="cloglog", init.p=0.9), trace=TRUE)
+fit = vglm(y ~ 1, AA.Aa.aa(link="identity"), trace=TRUE)
+rbind(y, sum(y)*fitted(fit))
+Coef(fit) # Estimated pA
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
new file mode 100644
index 0000000..314c875
--- /dev/null
+++ b/man/AB.Ab.aB.ab.Rd
@@ -0,0 +1,64 @@
+\name{AB.Ab.aB.ab}
+\alias{AB.Ab.aB.ab}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The AB-Ab-aB-ab Blood Group System }
+\description{
+ Estimates the parameter of the
+ AB-Ab-aB-ab blood group system.
+}
+\usage{
+AB.Ab.aB.ab(link = "logit", init.p = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to \code{p}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.p}{ Optional initial value for \code{p}. }
+}
+\details{
+ This one parameter model involves a probability called \code{p}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Lange, K. (2002)
+\emph{Mathematical and Statistical Methods for Genetic Analysis},
+2nd ed. New York: Springer-Verlag.
+
+}
+\author{ T. W. Yee }
+\note{
+The input can be a 4-column matrix of counts, where the columns
+are AB, Ab, aB and ab
+(in order).
+Alternatively, the input can be a 4-column matrix of
+proportions (so each row adds to 1) and the \code{weights}
+argument is used to specify the total number of counts for each row.
+}
+
+\seealso{
+\code{\link{AA.Aa.aa}},
+\code{\link{AB.Ab.aB.ab2}},
+\code{\link{ABO}},
+\code{\link{G1G2G3}},
+\code{\link{MNSs}}.
+}
+
+\examples{
+y = cbind(1997, 906, 904, 32) # Data from Fisher (1925)
+fit = vglm(y ~ 1, AB.Ab.aB.ab(link="identity", init.p=0.9), trace=TRUE)
+fit = vglm(y ~ 1, AB.Ab.aB.ab, trace=TRUE)
+rbind(y, sum(y)*fitted(fit))
+Coef(fit) # Estimated p
+p = sqrt(4*(fitted(fit)[,4]))
+p*p
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
new file mode 100644
index 0000000..cd1827d
--- /dev/null
+++ b/man/AB.Ab.aB.ab2.Rd
@@ -0,0 +1,69 @@
+\name{AB.Ab.aB.ab2}
+\alias{AB.Ab.aB.ab2}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The AB-Ab-aB-ab2 Blood Group System }
+\description{
+ Estimates the parameter of the
+ the AB-Ab-aB-ab2 blood group system.
+}
+\usage{
+AB.Ab.aB.ab2(link = "logit", init.p = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to \code{p}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.p}{ Optional initial value for \code{p}. }
+}
+\details{
+ This one parameter model involves a probability called \code{p}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Elandt-Johnson, R. C. (1971)
+\emph{Probability Models and Statistical Methods in Genetics},
+New York: Wiley.
+}
+\author{ T. W. Yee }
+\note{
+The input can be a 4-column matrix of counts.
+Alternatively, the input can be a 4-column matrix of
+proportions (so each row adds to 1) and the \code{weights}
+argument is used to specify the total number of counts for each row.
+
+}
+
+\section{Warning}{
+There may be a bug in the \code{deriv} and \code{weight} slot of the
+family function.
+
+}
+\seealso{
+\code{\link{AA.Aa.aa}},
+\code{\link{AB.Ab.aB.ab}},
+\code{\link{ABO}},
+\code{\link{G1G2G3}},
+\code{\link{MNSs}}.
+}
+
+\examples{
+# See Elandt-Johnson, pp.430,427
+# Estimated variance is approx 0.0021
+y = cbind(68, 11, 13, 21)
+fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=logit), trace=TRUE, crit="coef")
+fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=probit), trace=TRUE, crit="coef")
+fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=identity), trace=TRUE, crit="coef")
+fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=cloglog), trace=TRUE, crit="coef")
+Coef(fit) # Estimated p
+rbind(y, sum(y)*fitted(fit))
+diag(vcov(fit))^0.5
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/ABO.Rd b/man/ABO.Rd
new file mode 100644
index 0000000..c30294a
--- /dev/null
+++ b/man/ABO.Rd
@@ -0,0 +1,70 @@
+\name{ABO}
+\alias{ABO}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The ABO Blood Group System }
+\description{
+ Estimates the two independent parameters of the
+ the ABO blood group system.
+}
+\usage{
+ABO(link = "logit", ir = NULL, ip = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to \code{p} and \code{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ir, ip}{ Optional initial value for \code{r} and \code{p}.
+ A \code{NULL} value means values are computed internally. }
+}
+\details{
+ The parameters \code{p} and \code{q} are probabilities, so that
+ \code{r=1-p-q} is the third probability.
+ The probabilities \code{p} and \code{r} correspond to
+ A and O respectively, so that \code{q} is the probability for B.
+ It is easier to make use of initial values for \code{r} than for
+ \code{q}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+ Lange, K. (2002)
+ \emph{Mathematical and Statistical Methods for Genetic Analysis},
+ 2nd ed. New York: Springer-Verlag.
+
+}
+\author{ T. W. Yee }
+\note{
+ The input can be a 4-column matrix of counts, where the columns
+ are A, B, AB, O (in order).
+ Alternatively, the input can be a 4-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
+}
+
+\seealso{
+ \code{\link{AA.Aa.aa}},
+ \code{\link{AB.Ab.aB.ab}},
+ \code{\link{AB.Ab.aB.ab2}},
+ \code{\link{G1G2G3}},
+ \code{\link{MNSs}}.
+}
+\examples{
+y = cbind(A=725, B=258, AB=72, O=1073) # Order matters, not the name
+fit = vglm(y ~ 1, ABO(link=logit), trace=TRUE, cri="coef")
+fit = vglm(y ~ 1, ABO(link=probit), trace=TRUE, cri="coef")
+fit = vglm(y ~ 1, ABO(link=cloglog), trace=TRUE, cri="coef")
+fit = vglm(y ~ 1, ABO(link=identity), trace=TRUE, cri="coef")
+coef(fit, matrix=TRUE)
+Coef(fit) # Estimated p and q
+rbind(y, sum(y)*fitted(fit))
+sqrt(diag(vcov(fit)))
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/BratUC.Rd b/man/BratUC.Rd
new file mode 100644
index 0000000..7d1dad4
--- /dev/null
+++ b/man/BratUC.Rd
@@ -0,0 +1,77 @@
+\name{Brat}
+\alias{Brat}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Inputting Data to fit a Bradley Terry Model }
+\description{
+ Takes in a square matrix of counts and outputs
+ them in a form that is accessible to the \code{\link{brat}}
+ and \code{\link{bratt}} family functions.
+}
+\usage{
+Brat(mat, ties=0*mat, string=c(" > "," == "))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{mat}{ Matrix of counts,
+which is considered \eqn{M} by \eqn{M} in dimension when
+there are ties, and \eqn{M+1} by \eqn{M+1}
+when there are no ties.
+The rows are winners and the columns are losers, e.g.,
+the 2-1 element is now many times Competitor 2 has beaten
+Competitor 1.
+The matrices are best labelled with the competitors' names.
+}
+ \item{ties}{ Matrix of counts. This should be the same
+dimension as \code{mat}. By default, there are no ties.
+The matrix must be symmetric, and the diagonal should contain
+\code{NA}s.
+}
+ \item{string}{ Character.
+The matrices are labelled with the first value of the descriptor, e.g.,
+\code{"NZ > Oz"} `means' NZ beats Australia in rugby.
+Suggested alternatives include \code{" beats "} or \code{" wins against "}.
+The second value is used to handle ties.
+
+}
+}
+\details{
+ In the \pkg{VGAM} package
+ it is necessary for each matrix to be represented as a single
+row of data by \code{\link{brat}} and \code{\link{bratt}}.
+Hence the non-diagonal elements of the \eqn{M+1} by \eqn{M+1} matrix
+are concatenated into \eqn{M(M+1)} values (no ties), while
+if there are ties, the non-diagonal elements of the \eqn{M} by \eqn{M} matrix
+are concatenated into \eqn{M(M-1)} values.
+
+}
+\value{
+ A matrix with 1 row and either \eqn{M(M+1)} or \eqn{M(M-1)} columns.
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+}
+\author{ T. W. Yee }
+\note{
+This is a data preprocessing function for
+\code{\link{brat}} and \code{\link{bratt}}.
+
+Yet to do: merge \code{InverseBrat} into \code{brat}.
+}
+\seealso{
+\code{\link{brat}},
+\code{\link{bratt}},
+\code{InverseBrat}.
+}
+\examples{
+journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
+m = matrix(c( NA, 33, 320, 284, 730, NA, 813, 276,
+ 498, 68, NA, 325, 221, 17, 142, NA), 4,4)
+dimnames(m) = list(winner = journal, loser = journal)
+Brat(m)
+vglm(Brat(m) ~ 1, brat, trace=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/Coef.Rd b/man/Coef.Rd
new file mode 100644
index 0000000..5647df4
--- /dev/null
+++ b/man/Coef.Rd
@@ -0,0 +1,79 @@
+\name{Coef}
+\alias{Coef}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Computes Model Coefficients and Quantities }
+\description{
+ \code{Coef} is a generic function which computes model
+ coefficients from objects returned by modelling functions.
+ It is an auxiliary function to \code{\link[stats]{coef}}
+ that enables extra capabilities for some specific models.
+}
+\usage{
+Coef(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the computation of other
+ types of model coefficients or quantities is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model.
+ }
+}
+\details{
+ This function can often be useful for \code{\link{vglm}}
+ objects with just an intercept term in the RHS of
+ the formula, e.g., \code{y ~ 1}. Then often this function
+ will apply the inverse link functions to the parameters.
+ See the example below.
+
+ For reduced-rank VGLMs, this function can return the
+ \bold{A}, \bold{C} matrices, etc.
+
+ For quadratic and additive ordination models, this function
+ can return
+ ecological meaningful quantities such as tolerances,
+ optima, maxima.
+
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+\section{Warning }{
+ This function may not work for \emph{all} \pkg{VGAM}
+ family functions. You should check your results on some
+ artificial data before applying it to models fitted to
+ real data.
+}
+
+\seealso{
+ \code{\link[stats]{coef}},
+ \code{\link{Coef.vlm}},
+ \code{\link{Coef.rrvglm}},
+ \code{\link{Coef.qrrvglm}}.
+}
+\examples{
+set.seed(123)
+nn = 1000
+y = rbeta(nn, shape1=1, shape2=3) # Original scale
+# parameters are estimated on a log scale:
+fit = vglm(y ~ 1, betaff, tr=TRUE, crit="c") # intercept-only model
+coef(fit, matrix=TRUE) # log scale
+Coef(fit) # On the original scale
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
new file mode 100644
index 0000000..8caec11
--- /dev/null
+++ b/man/Coef.qrrvglm-class.Rd
@@ -0,0 +1,123 @@
+\name{Coef.qrrvglm-class}
+\docType{class}
+\alias{Coef.qrrvglm-class}
+\title{Class ``Coef.qrrvglm'' }
+\description{
+ The most pertinent matrices and other quantities pertaining to a
+ QRR-VGLM (CQO model).
+
+}
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{Coef(object,
+...)} where \code{object} is an object of class \code{"qrrvglm"}
+(created by \code{\link{cqo}}).
+
+In this document, \eqn{R} is the \emph{rank}, \eqn{M} is the number of
+linear predictors and \eqn{n} is the number of observations.
+
+}
+\section{Slots}{
+ \describe{
+ \item{\code{A}:}{Of class \code{"matrix"}, \bold{A}, which are the
+ linear `coefficients' of the matrix of latent variables.
+ It is \eqn{M} by \eqn{R}. }
+ \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}.
+ These correspond to terms of the argument \code{Norrr}. }
+ \item{\code{C}:}{Of class \code{"matrix"}, \bold{C}, the
+ canonical coefficients. It has \eqn{R} columns. }
+ \item{\code{Constrained}:}{Logical. Whether the model is
+ a constrained ordination model. }
+ \item{\code{D}:}{Of class \code{"array"},
+ \code{D[,,j]} is an order-\code{Rank} matrix, for
+ \code{j} = 1,\dots,\eqn{M}.
+ Ideally, these are negative-definite in order to make the response
+ curves/surfaces bell-shaped.
+ }
+ \item{\code{Rank}:}{The rank (dimension, number of latent variables)
+ of the RR-VGLM. Called \eqn{R}. }
+ \item{\code{lv}:}{\eqn{n} by \eqn{R} matrix
+ of latent variable values. }
+ \item{\code{lvOrder}:}{Of class \code{"matrix"}, the permutation
+ returned when the function
+ \code{\link{order}} is applied to each column of \code{lv}.
+ This enables each column of \code{lv} to be easily sorted.
+ }
+ \item{\code{Maximum}:}{Of class \code{"numeric"}, the
+ \eqn{M} maximum fitted values. That is, the fitted values
+ at the optima for \code{Norrr = ~ 1} models.
+ If \code{Norrr} is not \code{~ 1} then these will be \code{NA}s. }
+ \item{\code{NOS}:}{Number of species.}
+ \item{\code{Optimum}:}{Of class \code{"matrix"}, the values
+ of the latent variables where the optima are.
+ If the curves are not bell-shaped, then the value will
+ be \code{NA} or \code{NaN}.}
+ \item{\code{OptimumOrder}:}{Of class \code{"matrix"}, the permutation
+ returned when the function
+ \code{\link{order}} is applied to each column of \code{Optimum}.
+ This enables each row of \code{Optimum} to be easily sorted.
+ }
+% \item{\code{Diagonal}:}{Vector of logicals: are the
+% \code{D[,,j]} diagonal? }
+ \item{\code{bellshaped}:}{Vector of logicals: is each
+ response curve/surface bell-shaped? }
+ \item{\code{dispersion}:}{Dispersion parameter(s). }
+ \item{\code{Dzero}:}{Vector of logicals, is each of the
+ response curves linear in the latent variable(s)?
+ It will be if and only if
+ \code{D[,,j]} equals \bold{O}, for
+ \code{j} = 1,\dots,\eqn{M} . }
+ \item{\code{Tolerance}:}{Object of class \code{"array"},
+ \code{Tolerance[,,j]} is an order-\code{Rank} matrix, for
+ \code{j} = 1,\dots,\eqn{M}, being the matrix of
+ tolerances (squared if on the diagonal).
+ These are denoted by \bold{T} in Yee (2004).
+ Ideally, these are positive-definite in order to make the response
+ curves/surfaces bell-shaped.
+The tolerance matrices satisfy
+\eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}.
+
+ }
+ }
+}
+
+%\section{Methods}{
+%No methods defined with class "Coef.qrrvglm" in the signature.
+%}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+}
+\author{ Thomas W. Yee }
+%\note{ ~~further notes~~ }
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{Coef.qrrvglm}},
+ \code{\link{cqo}},
+% \code{qrrvglm-class},
+ \code{print.Coef.qrrvglm}.
+}
+
+\examples{
+x1 = rnorm(n <- 100)
+x2 = rnorm(n)
+x3 = rnorm(n)
+lv1 = 0 + x2 - 2*x3
+lambda1 = exp(3 - 0.5 * (lv1-0)^2)
+lambda2 = exp(2 - 0.5 * (lv1-1)^2)
+lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2)
+y1 = rpois(n, lambda1)
+y2 = rpois(n, lambda2)
+y3 = rpois(n, lambda3)
+yy = cbind(y1,y2,y3)
+p1 = cqo(yy ~ x1 + x2 + x3, fam=poissonff, trace=FALSE)
+\dontrun{
+lvplot(p1, y=TRUE, lcol=1:3, pch=1:3, pcol=1:3)
+}
+print(Coef(p1), digits=3)
+}
+\keyword{classes}
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
new file mode 100644
index 0000000..5849177
--- /dev/null
+++ b/man/Coef.qrrvglm.Rd
@@ -0,0 +1,131 @@
+\name{Coef.qrrvglm}
+\alias{Coef.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Returns Important Matrices etc. of a QO Object }
+\description{
+ This methods function returns important matrices etc. of a
+ QO object.
+}
+\usage{
+Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A CQO or UQO object. The former has class \code{"qrrvglm"}. }
+ \item{varlvI}{
+ Logical indicating whether to scale the site scores (latent variables)
+ to have variance-covariance matrix equal to the rank-\eqn{R} identity
+ matrix. All models have uncorrelated site scores (latent variables),
+ and this option stretches or shrinks the ordination axes if \code{TRUE}.
+ See below for further details.
+
+ }
+ \item{reference}{
+ Integer or character.
+ Specifies the \emph{reference species}. By default, the reference
+ species is found by searching sequentially starting from the first
+ species until a positive-definite tolerance matrix is found. Then
+ this tolerance matrix is transformed to the identity matrix. Then
+ the sites scores (latent variables) are made uncorrelated.
+ See below for further details.
+
+% If \code{EqualTolerances=FALSE}, then transformations occur so that
+% the reference species has a tolerance matrix equal to the rank-\eqn{R}
+% identity matrix.
+
+ }
+ \item{\dots}{ Currently unused. }
+}
+\details{
+
+ If \code{ITolerances=TRUE} or \code{EqualTolerances=TRUE} (and its
+ estimated tolerance matrix is positive-definite) then all species'
+ tolerances are unity by transformation or by definition, and the spread
+ of the site scores can be compared to them. Vice versa, if one wishes
+ to compare the tolerances with the sites score variability then setting
+ \code{varlvI=TRUE} is more appropriate.
+
+ For rank-2 QRR-VGLMs, one of the species can be chosen so that the
+ angle of its major axis and minor axis is zero, i.e., parallel to
+ the ordination axes. This means the effect on the latent vars is
+ independent on that species, and that its tolerance matrix is diagonal.
+ The argument \code{reference} allows one to choose which is the reference
+ species, which must have a positive-definite tolerance matrix, i.e.,
+ is bell-shaped. If \code{reference} is not specified, then the code will
+ try to choose some reference species starting from the first species.
+ Although the \code{reference} argument could possibly be offered as
+ an option when fitting the model, it is currently available after
+ fitting the model, e.g., in the functions \code{\link{Coef.qrrvglm}} and
+ \code{\link{lvplot.qrrvglm}}.
+
+}
+\value{
+ The \bold{A}, \bold{B1}, \bold{C}, \bold{T}, \bold{D} matrices/arrays
+ are returned, along with other slots. For UQO, \bold{C} is undefined.
+ The returned object has class \code{"Coef.qrrvglm"}
+ (see \code{\link{Coef.qrrvglm-class}}).
+
+}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+\note{
+Consider an equal-tolerances Poisson/binomial CQO model with \code{Norrr = ~ 1}.
+For \eqn{R=1} it has about \eqn{2S+p_2}{2*S+p2} parameters.
+For \eqn{R=2} it has about \eqn{3S+2 p_2}{3*S+2*p_2} parameters.
+Here, \eqn{S} is the number of species, and \eqn{p_2=p-1}{p2=p-1} is
+the number of environmental variables making up the latent variable.
+For an unequal-tolerances Poisson/binomial CQO model with
+\code{Norrr = ~ 1}, it has about \eqn{3S -1 +p_2}{3*S-1+p2} parameters
+for \eqn{R=1}, and about \eqn{6S -3 +2p_2}{6*S -3 +2*p2} parameters
+for \eqn{R=2}.
+Since the total number of data points is \eqn{nS}{n*S}, where
+\eqn{n} is the number of sites, it pays to divide the number
+of data points by the number of parameters to get some idea
+about how much information the parameters contain.
+
+}
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+\seealso{
+\code{\link{cqo}},
+\code{\link{Coef.qrrvglm-class}},
+\code{print.Coef.qrrvglm},
+\code{\link{lvplot.qrrvglm}}.
+}
+
+\examples{
+\dontrun{
+set.seed(123)
+n = 100
+x1 = rnorm(n)
+x2 = rnorm(n)
+x3 = rnorm(n)
+lv1 = 0 + x2 - 2*x3
+lambda1 = exp(3 - 0.5 * (lv1-0)^2)
+lambda2 = exp(2 - 0.5 * (lv1-1)^2)
+lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2) # Unequal tolerances
+y1 = rpois(n, lambda1)
+y2 = rpois(n, lambda2)
+y3 = rpois(n, lambda3)
+set.seed(111)
+p1 = cqo(cbind(y1,y2,y3) ~ x1 + x2 + x3, poissonff, trace=FALSE)
+lvplot(p1, y=TRUE, lcol=1:3, pch=1:3, pcol=1:3)
+Coef(p1)
+print(Coef(p1), digits=3)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/Coef.rrvglm-class.Rd b/man/Coef.rrvglm-class.Rd
new file mode 100644
index 0000000..513f4fc
--- /dev/null
+++ b/man/Coef.rrvglm-class.Rd
@@ -0,0 +1,70 @@
+\name{Coef.rrvglm-class}
+\docType{class}
+\alias{Coef.rrvglm-class}
+\title{Class ``Coef.rrvglm'' }
+\description{ The most pertinent matrices and other quantities
+pertaining to a RR-VGLM. }
+\section{Objects from the Class}{
+Objects can be created by calls of the form
+\code{Coef(object, ...)} where \code{object} is an object
+of class \code{rrvglm} (see \code{\link{rrvglm-class}}).
+
+In this document, \eqn{M} is the number of linear predictors
+and \eqn{n} is the number of observations.
+}
+\section{Slots}{
+ \describe{
+ \item{\code{A}:}{Of class \code{"matrix"}, \bold{A}. }
+ \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}. }
+ \item{\code{C}:}{Of class \code{"matrix"}, \bold{C}. }
+ \item{\code{Rank}:}{The rank of the RR-VGLM. }
+ \item{\code{colx1.index}:}{Index of the columns of the
+ \code{"vlm"}-type model matrix corresponding to the variables
+ in \bold{x1}. These correspond to \bold{B1}.
+ }
+ \item{\code{colx2.index}:}{
+ Index of the columns of the
+ \code{"vlm"}-type model matrix corresponding to the variables
+ in \bold{x2}. These correspond to the reduced-rank regression.
+ }
+ \item{\code{Atilde}:}{Object of class \code{"matrix"}, the
+ \bold{A} matrix with the corner rows removed. Thus each of the
+ elements have been estimated. This matrix is returned only
+ if corner constraints were used.
+ }
+}
+}
+
+%\section{Methods}{
+%No methods defined with class "Coef.rrvglm" in the signature.
+%}
+
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+}
+\author{ Thomas W. Yee }
+%\note{ ~~further notes~~ }
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+\code{\link{Coef.rrvglm}},
+\code{\link{rrvglm}},
+\code{\link{rrvglm-class}},
+\code{print.Coef.rrvglm}.
+}
+
+\examples{
+# Rank-1 stereotype model of Anderson (1984)
+data(pneumo)
+n = nrow(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time), x1=runif(n), x2=runif(n))
+fit = rrvglm(cbind(normal,mild,severe) ~ let + x1 + x2, multinomial, pneumo)
+coef(fit, mat=TRUE)
+Coef(fit)
+print(Coef(fit), digits=3)
+}
+\keyword{classes}
diff --git a/man/Coef.rrvglm.Rd b/man/Coef.rrvglm.Rd
new file mode 100644
index 0000000..43aed7c
--- /dev/null
+++ b/man/Coef.rrvglm.Rd
@@ -0,0 +1,54 @@
+\name{Coef.rrvglm}
+\alias{Coef.rrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Returns Important Matrices etc. of a RR-VGLM Object }
+\description{
+ This methods function returns important matrices etc. of a
+ RR-VGLM object.
+}
+\usage{
+Coef.rrvglm(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object of class \code{"rrvglm"}. }
+ \item{\dots}{ Currently unused. }
+}
+\details{
+ The \bold{A}, \bold{B1}, \bold{C} matrices are returned,
+ along with other slots.
+ See \code{\link{rrvglm}} for details about RR-VGLMs.
+}
+\value{
+ An object of class \code{"Coef.rrvglm"}
+(see \code{\link{Coef.rrvglm-class}}).
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+}
+\author{ Thomas W. Yee }
+\note{ This function is an alternative to \code{coef.rrvglm}. }
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+\seealso{
+ \code{\link{Coef.rrvglm-class}},
+ \code{print.Coef.rrvglm},
+ \code{\link{rrvglm}}.
+}
+
+\examples{
+# Rank-1 stereotype model of Anderson (1984)
+data(pneumo)
+n = nrow(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time), x1=runif(n), x2=runif(n))
+fit = rrvglm(cbind(normal,mild,severe) ~ let + x1 + x2, multinomial, pneumo)
+coef(fit, matrix=TRUE)
+Coef(fit)
+print(Coef(fit), digits=3)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/Coef.vlm.Rd b/man/Coef.vlm.Rd
new file mode 100644
index 0000000..6259355
--- /dev/null
+++ b/man/Coef.vlm.Rd
@@ -0,0 +1,66 @@
+\name{Coef.vlm}
+\alias{Coef.vlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Extract Model Coefficients for VLM Objects }
+\description{
+ Amongst other things, this function applies inverse
+ link functions to the parameters of intercept-only
+ VGLMs.
+}
+\usage{
+Coef.vlm(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A fitted model. }
+ \item{\dots}{ Arguments which may be passed into
+ \code{\link[stats]{coef}}.
+ }
+}
+\details{
+ Most \pkg{VGAM} family functions apply a link function to
+ the parameters, e.g., positive parameter are often have a log
+ link, parameters between 0 and 1 have a logit link.
+ This function can back-transform the parameter estimate to
+ the original scale.
+}
+\value{
+ For intercept-only models (e.g., formula is \code{y ~ 1})
+ the back-transformed parameter estimates can be returned.
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+}
+\author{ Thomas W. Yee }
+
+%\note{ ~~further notes~~ }
+
+\section{Warning }{
+ This function may not work for \emph{all} \pkg{VGAM}
+ family functions. You should check your results on some
+ artificial data before applying it to models fitted to
+ real data.
+}
+
+\seealso{
+ \code{\link{Coef}},
+ \code{\link[stats]{coef}}.
+}
+
+\examples{
+set.seed(123)
+nn = 1000
+y = rbeta(nn, shape1=1, shape2=3)
+# parameters are estimated on a log scale
+fit = vglm(y ~ 1, betaff, tr=TRUE, crit="c") # intercept-only model
+coef(fit, matrix=TRUE) # log scale
+Coef(fit) # On the original scale
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/DagumUC.Rd b/man/DagumUC.Rd
new file mode 100644
index 0000000..2cf2ba8
--- /dev/null
+++ b/man/DagumUC.Rd
@@ -0,0 +1,58 @@
+\name{Dagum}
+\alias{Dagum}
+\alias{ddagum}
+\alias{pdagum}
+\alias{qdagum}
+\alias{rdagum}
+\title{The Dagum Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Dagum distribution with shape parameters \code{a}
+ and \code{p}, and scale parameter \code{scale}.
+}
+\usage{
+ddagum(x, a, scale, p.arg)
+pdagum(q, a, scale, p.arg)
+qdagum(p, a, scale, p.arg)
+rdagum(n, a, scale, p.arg)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{a, p.arg}{shape parameters.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{ddagum} gives the density,
+ \code{pdagum} gives the distribution function,
+ \code{qdagum} gives the quantile function, and
+ \code{rdagum} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{dagum}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The Dagum distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{dagum}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rdagum(n=3000, 4, 6, 2)
+fit = vglm(y ~ 1, dagum(init.a=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/FiskUC.Rd b/man/FiskUC.Rd
new file mode 100644
index 0000000..1804833
--- /dev/null
+++ b/man/FiskUC.Rd
@@ -0,0 +1,58 @@
+\name{Fisk}
+\alias{Fisk}
+\alias{dfisk}
+\alias{pfisk}
+\alias{qfisk}
+\alias{rfisk}
+\title{The Fisk Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Fisk distribution with shape parameter \code{a}
+ and scale parameter \code{scale}.
+}
+\usage{
+dfisk(x, a, scale)
+pfisk(q, a, scale)
+qfisk(p, a, scale)
+rfisk(n, a, scale)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{a}{shape parameter.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dfisk} gives the density,
+ \code{pfisk} gives the distribution function,
+ \code{qfisk} gives the quantile function, and
+ \code{rfisk} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{fisk}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The Fisk distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{fisk}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rfisk(n=1000, 4, 6)
+fit = vglm(y ~ 1, fisk, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
new file mode 100644
index 0000000..4f52dcf
--- /dev/null
+++ b/man/G1G2G3.Rd
@@ -0,0 +1,75 @@
+\name{G1G2G3}
+\alias{G1G2G3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The G1G2G3 Blood Group System }
+\description{
+ Estimates the three independent parameters of the
+ the G1G2G3 blood group system.
+}
+\usage{
+G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to \code{p1}, \code{p2} and \code{f}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ip1, ip2, iF}{
+ Optional initial value for \code{p1}, \code{p2} and \code{f}.
+
+ }
+}
+\details{
+ The parameters \code{p1} and \code{p2} are probabilities, so that
+ \code{p3=1-p1-p2} is the third probability.
+ The parameter \code{f} is the third independent parameter.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Lange, K. (2002)
+\emph{Mathematical and Statistical Methods for Genetic Analysis},
+2nd ed. New York: Springer-Verlag.
+
+}
+\author{ T. W. Yee }
+\note{
+The input can be a 6-column matrix of counts,
+with columns corresponding to
+\code{G_1/G_1},
+\code{G_1/G_2},
+\code{G_1/G_3},
+\code{G_2/G_2},
+\code{G_2/G_3},
+\code{G_3/G_3} (in order).
+Alternatively, the input can be a 6-column matrix of
+proportions (so each row adds to 1) and the \code{weights}
+argument is used to specify the total number of counts for each row.
+
+}
+
+\seealso{
+\code{\link{AA.Aa.aa}},
+\code{\link{AB.Ab.aB.ab}},
+\code{\link{AB.Ab.aB.ab2}},
+\code{\link{ABO}},
+\code{\link{MNSs}}.
+}
+\examples{
+y = cbind(108, 196, 429, 143, 513, 559)
+fit = vglm(y ~ 1, G1G2G3(link=probit), trace=TRUE, crit="coef")
+fit = vglm(y ~ 1, G1G2G3(link=logit, .3, .3, .02), trace=TRUE, crit="coef")
+fit = vglm(y ~ 1, G1G2G3(link="identity"), trace=TRUE)
+Coef(fit) # Estimated p1, p2 and f
+rbind(y, sum(y)*fitted(fit))
+sqrt(diag(vcov(fit)))
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
new file mode 100644
index 0000000..8c97b52
--- /dev/null
+++ b/man/Inv.gaussian.Rd
@@ -0,0 +1,59 @@
+\name{Inv.gaussian}
+\alias{Inv.gaussian}
+\alias{dinv.gaussian}
+\alias{pinv.gaussian}
+\title{The Inverse Gaussian Distribution}
+\description{
+ Density and distribution function
+ for the inverse Gaussian distribution with parameters
+ \code{mu} and \code{lambda}.
+}
+\usage{
+dinv.gaussian(x, mu, lambda)
+pinv.gaussian(q, mu, lambda)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+%%\item{p}{vector of probabilities.}
+%%\item{n}{number of observations. If \code{length(n) > 1}, the length
+%% is taken to be the number required.}
+ \item{mu}{the mean parameter.}
+ \item{lambda}{the \eqn{\lambda}{lambda} parameter.}
+}
+\value{
+ \code{dinv.gaussian} gives the density,
+ \code{pinv.gaussian} gives the distribution function.
+% \code{qinv.gaussian} gives the quantile function, and
+% \code{rinv.gaussian} generates random deviates.
+}
+\references{
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
+\emph{Continuous Univariate Distributions},
+2nd edition,
+Volume 1,
+New York: Wiley.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{inv.gaussianff}}, the \pkg{VGAM} family function
+ for estimating both parameters by maximum likelihood estimation,
+ for the formula of the probability density function.
+}
+\note{
+ Currently \code{qinv.gaussian} and \code{rinv.gaussian} are unavailable.
+}
+\seealso{
+ \code{\link{inv.gaussianff}}.
+}
+\examples{
+\dontrun{
+x = seq(-0.05, 4, len=300)
+plot(x, dinv.gaussian(x, mu=1, lambda=1), type="l", col="blue", las=1,
+ main="blue is density, red is cumulative distribution function")
+abline(h=0, col="blue", lty=2)
+lines(x, pinv.gaussian(x, mu=1, lambda=1), type="l", col="red")
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/InvlomaxUC.Rd b/man/InvlomaxUC.Rd
new file mode 100644
index 0000000..9866379
--- /dev/null
+++ b/man/InvlomaxUC.Rd
@@ -0,0 +1,59 @@
+\name{Invlomax}
+\alias{Invlomax}
+\alias{dinvlomax}
+\alias{pinvlomax}
+\alias{qinvlomax}
+\alias{rinvlomax}
+\title{The Inverse Lomax Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the inverse Lomax distribution with shape parameter
+ \code{p} and scale parameter \code{scale}.
+
+}
+\usage{
+dinvlomax(x, scale, p.arg)
+pinvlomax(q, scale, p.arg)
+qinvlomax(p, scale, p.arg)
+rinvlomax(n, scale, p.arg)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{p.arg}{shape parameter.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dinvlomax} gives the density,
+ \code{pinvlomax} gives the distribution function,
+ \code{qinvlomax} gives the quantile function, and
+ \code{rinvlomax} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{invlomax}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The inverse Lomax distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{invlomax}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rinvlomax(n=1000, 6, 2)
+fit = vglm(y ~ 1, invlomax, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/InvparalogisticUC.Rd b/man/InvparalogisticUC.Rd
new file mode 100644
index 0000000..67c3167
--- /dev/null
+++ b/man/InvparalogisticUC.Rd
@@ -0,0 +1,59 @@
+\name{Invparalogistic}
+\alias{Invparalogistic}
+\alias{dinvparalogistic}
+\alias{pinvparalogistic}
+\alias{qinvparalogistic}
+\alias{rinvparalogistic}
+\title{The Inverse Paralogistic Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the inverse paralogistic distribution with
+ shape parameters \code{a} and \code{p}, and scale parameter \code{scale}.
+
+}
+\usage{
+dinvparalogistic(x, a, scale)
+pinvparalogistic(q, a, scale)
+qinvparalogistic(p, a, scale)
+rinvparalogistic(n, a, scale)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{a}{shape parameter.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dinvparalogistic} gives the density,
+ \code{pinvparalogistic} gives the distribution function,
+ \code{qinvparalogistic} gives the quantile function, and
+ \code{rinvparalogistic} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{invparalogistic}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The inverse paralogistic distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{invparalogistic}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rinvparalogistic(n=3000, 4, 6)
+fit = vglm(y ~ 1, invparalogistic(init.a=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/Links.Rd b/man/Links.Rd
new file mode 100644
index 0000000..b6c0590
--- /dev/null
+++ b/man/Links.Rd
@@ -0,0 +1,182 @@
+\name{Links}
+\alias{Links}
+\alias{TypicalVGAMlinkFunction}
+\title{Link functions for VGLM/VGAM/etc. families}
+\description{
+ The \pkg{VGAM} package provides a number of (parameter) link functions
+ which are described in general here. Collectively they offer the user
+ considerable flexibility for modelling data.
+
+}
+\usage{
+TypicalVGAMlinkFunction(theta, earg=list(), inverse=FALSE,
+ deriv=0, short=TRUE, tag=FALSE)
+}
+\arguments{
+ All \pkg{VGAM} link functions have the same argument list as given
+ above. In the following we have \eqn{\eta=g(\theta)}{eta=g(theta)}
+ where \eqn{g} is the link function, \eqn{\theta}{theta} is the parameter
+ and \eqn{\eta}{eta} is the linear/additive predictor.
+
+ \item{theta}{
+ Numeric or character.
+ Actually this can be \eqn{\theta}{theta} (default) or \eqn{\eta}{eta},
+ depending on the other arguments.
+ If \code{theta} is character then \code{inverse} and
+ \code{deriv} are ignored.
+
+ }
+ \item{earg}{
+ List.
+ Extra argument allowing for additional information, specific to the
+ link function. For example, for \code{\link{logoff}}, this will
+ contain the offset value. The argument \code{earg} is
+ always a list with \emph{named} components. See each specific link
+ function to find the component names for the list.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse link value
+ \eqn{\theta}{theta} is returned, hence the argument
+ \code{theta} is really \eqn{\eta}{eta}.
+
+ }
+ \item{deriv}{
+ Integer. Either 0, 1, or 2 specifying the order of the derivative.
+
+ }
+ \item{short, tag}{
+ Logical.
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+ Used only if \code{theta} is character, and gives the formula
+ for the link in character form.
+ If \code{tag=TRUE} then the result contains a little more information.
+
+ }
+}
+\value{
+ Returns one of the link function value or its first or second derivative,
+ the inverse link or its first or second derivative,
+ or a character description of the link.
+
+ Here are the general details.
+ If \code{inverse=FALSE} and \code{deriv=0} (default) then the ordinary link
+ function \eqn{\eta=g(\theta)}{eta=g(theta)} is returned.
+ If \code{inverse=FALSE} and \code{deriv=1} then it is
+ \eqn{d\theta / d\eta}{d theta / d eta}
+ \emph{as a function of} \eqn{\theta}{theta}.
+ If \code{inverse=FALSE} and \code{deriv=2} then it is
+ \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
+ \emph{as a function of} \eqn{\theta}{theta}.
+
+ If \code{inverse=TRUE} and \code{deriv=0} then the inverse
+ link function is returned, hence \code{theta} is really
+ \eqn{\eta}{eta}.
+ If \code{inverse=TRUE} and \code{deriv} is positive then the
+ \emph{reciprocal} of the same link function with
+ \code{(theta=theta, earg=earg, inverse=TRUE, deriv=deriv)}
+ is returned.
+
+}
+\details{
+ The following is a brief enumeration of all \pkg{VGAM} link functions.
+
+ For parameters lying between 0 and 1 (e.g., probabilities):
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}},
+ \code{\link{loglog}},
+ \code{fsqrt},
+ \code{\link{logc}},
+ \code{\link{golf}},
+ \code{\link{polf}},
+ \code{\link{nbolf}}.
+
+ For positive parameters (i.e., greater than 0):
+ \code{\link{loge}},
+ \code{\link{nloge}},
+ \code{powl}.
+
+ For parameters greater than 1:
+ \code{\link{loglog}}.
+
+ For parameters between \eqn{-1} and \eqn{1}:
+ \code{\link{fisherz}},
+ \code{\link{rhobit}}.
+
+ For parameters between \eqn{A} and \eqn{B}:
+ \code{\link{elogit}},
+ \code{\link{logoff}} (\eqn{B=\infty}{B=Inf}).
+
+ For unrestricted parameters (i.e., any value):
+ \code{\link{identity}},
+ \code{\link{nidentity}},
+ \code{\link{reciprocal}},
+ \code{\link{nreciprocal}}.
+
+% Other links:
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+
+\seealso{
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}}.
+ \code{\link{cqo}},
+ \code{\link{cao}},
+ \code{\link{uqo}}.
+
+}
+\author{T. W. Yee}
+\note{
+ From October 2006 onwards,
+ all \pkg{VGAM} family functions will only
+ contain one default value for each link argument rather than giving a
+ vector of choices.
+ For example, rather than
+ \code{binomialff(link=c("logit", "probit", "cloglog",
+ "cauchit", "identity"), ...)}
+ it is now
+ \code{binomialff(link="logit", ...)}
+ No checking will be done to see if the user's choice is reasonable.
+ This means that the user can write his/her own \pkg{VGAM} link function
+ and use it within any \pkg{VGAM} family function.
+ Altogether this provides greater flexibility.
+ The downside is that the user must specify the \emph{full} name of the
+ link function, by either assigning the link argument the full name as
+ a character string, or just the name itself. See the examples below.
+
+}
+\examples{
+logit("a")
+logit("a", short=FALSE)
+logit("a", short=FALSE, tag=TRUE)
+
+logoff(2:5, earg=list(offset=1)) # Same as log(2:5 + 1)
+powl(2:5, earg=list(power=2)) # Same as (2:5)^2
+
+data(hunua)
+fit1 = vgam(agaaus ~ altitude, binomialff(link=cloglog), hunua) # ok
+fit2 = vgam(agaaus ~ altitude, binomialff(link="cloglog"), hunua) # ok
+
+\dontrun{
+# This no longer works since "clog" is not a valid VGAM link function:
+fit3 = vgam(agaaus ~ altitude, binomialff(link="clog"), hunua) # not ok
+
+par(mfrow=c(2,2))
+p = seq(0.01, 0.99, len=200)
+x = seq(-4, 4, len=200)
+plot(p, logit(p), type="l", col="blue")
+plot(x, logit(x, inverse=TRUE), type="l", col="blue")
+plot(p, logit(p, deriv=1), type="l", col="blue") # reciprocal!
+plot(p, logit(p, deriv=2), type="l", col="blue") # reciprocal!
+}
+}
+\keyword{models}
+
diff --git a/man/LomaxUC.Rd b/man/LomaxUC.Rd
new file mode 100644
index 0000000..a1f11f6
--- /dev/null
+++ b/man/LomaxUC.Rd
@@ -0,0 +1,58 @@
+\name{Lomax}
+\alias{Lomax}
+\alias{dlomax}
+\alias{plomax}
+\alias{qlomax}
+\alias{rlomax}
+\title{The Lomax Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Lomax distribution with scale parameter \code{scale}
+ and shape parameter \code{q}.
+}
+\usage{
+dlomax(x, scale, q.arg)
+plomax(q, scale, q.arg)
+qlomax(p, scale, q.arg)
+rlomax(n, scale, q.arg)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{q.arg}{shape parameter.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dlomax} gives the density,
+ \code{plomax} gives the distribution function,
+ \code{qlomax} gives the quantile function, and
+ \code{rlomax} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{lomax}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The Lomax distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{lomax}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rlomax(n=2000, 6, 2)
+fit = vglm(y ~ 1, lomax(init.q=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/MNSs.Rd b/man/MNSs.Rd
new file mode 100644
index 0000000..8d2d0e3
--- /dev/null
+++ b/man/MNSs.Rd
@@ -0,0 +1,72 @@
+\name{MNSs}
+\alias{MNSs}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The MNSs Blood Group System }
+\description{
+ Estimates the three independent parameters of the
+ the MNSs blood group system.
+}
+\usage{
+MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to the three parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{imS, ims, inS}{
+ Optional initial value for \code{mS}, \code{ms}
+ and \code{nS} respectively.
+ A \code{NULL} means they are computed internally.
+
+ }
+}
+\details{
+ There are three independent
+ parameters: \code{m_S}, \code{m_s}, \code{n_S}, say, so that
+ \code{n_s = 1 - m_S - m_s - n_S}.
+ We let the eta vector (transposed) be
+ \code{(g(m_S), g(m_s), g(n_S))} where \code{g} is the
+ link function.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+ Elandt-Johnson, R. C. (1971)
+ \emph{Probability Models and Statistical Methods in Genetics},
+ New York: Wiley.
+
+}
+\author{ T. W. Yee }
+\note{
+ The input can be a 6-column matrix of counts, where the columns are
+ MS, Ms, MNS, MNs, NS, Ns (in order).
+ Alternatively, the input can be a 6-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
+}
+\seealso{
+ \code{\link{AA.Aa.aa}},
+ \code{\link{AB.Ab.aB.ab}},
+ \code{\link{AB.Ab.aB.ab2}},
+ \code{\link{ABO}},
+ \code{\link{G1G2G3}}.
+}
+\examples{
+# Order matters only:
+y = cbind(MS=295, Ms=107, MNS=379, MNs=322, NS=102, Ns=214)
+fit = vglm(y ~ 1, MNSs("logit", .25, .28, .08), trace=TRUE)
+fit = vglm(y ~ 1, MNSs(link=logit), trace=TRUE, cri="coef")
+Coef(fit)
+rbind(y, sum(y)*fitted(fit))
+diag(vcov(fit))^0.5
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/Max.Rd b/man/Max.Rd
new file mode 100644
index 0000000..b26a993
--- /dev/null
+++ b/man/Max.Rd
@@ -0,0 +1,88 @@
+\name{Max}
+\alias{Max}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Maxima }
+\description{
+ Generic function for the \emph{maxima} (maximums) of a model.
+}
+\usage{
+Max(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the computation or
+ extraction of
+ a maximum (or maxima) is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. Sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Different models can define a maximum in different ways.
+ Many models have no such notion or definition.
+
+ Maxima occur in quadratic and additive ordination,
+ e.g., CQO or UQO or CAO.
+ For these models the maximum is the fitted value at the
+ optimum. For quadratic ordination models there is a formula
+ for the optimum but for additive ordination models the
+ optimum must be searched for numerically. If it occurs
+ on the boundary, then the optimum is undefined. For
+ a valid optimum, the fitted value at the optimum is the maximum.
+
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+%\section{Warning }{
+%}
+
+\seealso{
+ \code{Max.qrrvglm},
+ \code{\link{Tol}},
+ \code{\link{Opt}}.
+}
+
+\examples{
+\dontrun{
+data(hspider)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ Bestof = 2,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+
+Max(p1)
+
+index = 1:ncol(p1 at y)
+persp(p1, col=index, las=1, lwd=2)
+abline(h=Max(p1), lty=2, col=index)
+
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/MaxwellUC.Rd b/man/MaxwellUC.Rd
new file mode 100644
index 0000000..16aa4ba
--- /dev/null
+++ b/man/MaxwellUC.Rd
@@ -0,0 +1,70 @@
+\name{Maxwell}
+\alias{Maxwell}
+\alias{dmaxwell}
+\alias{pmaxwell}
+\alias{qmaxwell}
+\alias{rmaxwell}
+\title{The Maxwell Distribution}
+\description{
+ Density, and distribution function
+ for the Maxwell distribution.
+
+}
+\usage{
+dmaxwell(x, a)
+pmaxwell(q, a)
+qmaxwell(p, a)
+rmaxwell(n, a)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. A single positive integer. }
+ \item{a}{the parameter.}
+}
+\value{
+ \code{dmaxwell} gives the density,
+ \code{pmaxwell} gives the distribution function,
+ \code{qmaxwell} gives the quantile function, and
+ \code{rmaxwell} generates random deviates.
+}
+\references{
+ Balakrishnan, N. and Nevzorov, V. B. (2003)
+ \emph{A Primer on Statistical Distributions}.
+ Hoboken, New Jersey: Wiley.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{maxwell}}, the \pkg{VGAM} family function
+ for estimating the parameter \eqn{a} by maximum likelihood estimation,
+ for the formula of the probability density function.
+}
+\note{
+ The Maxwell distribution is related to the Rayleigh distribution.
+}
+\seealso{
+ \code{\link{maxwell}},
+ \code{\link{Rayleigh}},
+ \code{\link{rayleigh}}.
+}
+\examples{
+\dontrun{
+a = 3
+x = seq(-0.5, 3, len=100)
+plot(x, dmaxwell(x, a=a), type="l", col="blue", las=1, ylab="",
+ main="blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles")
+abline(h=0, col="blue", lty=2)
+lines(x, pmaxwell(x, a=a), type="l", col="red")
+probs = seq(0.1, 0.9, by=0.1)
+Q = qmaxwell(probs, a=a)
+lines(Q, dmaxwell(Q, a), col="purple", lty=3, type="h")
+lines(Q, pmaxwell(Q, a), col="purple", lty=3, type="h")
+abline(h=probs, col="purple", lty=3)
+pmaxwell(Q, a) - probs # Should be all zero
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/Opt.Rd b/man/Opt.Rd
new file mode 100644
index 0000000..26376bf
--- /dev/null
+++ b/man/Opt.Rd
@@ -0,0 +1,90 @@
+\name{Opt}
+\alias{Opt}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Maxima }
+\description{
+ Generic function for the \emph{optima} (or optimums) of a model.
+}
+\usage{
+Opt(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the computation or
+ extraction of an optimum (or optima) is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. Sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Different models can define an optimum in different ways.
+ Many models have no such notion or definition.
+
+ Optima occur in quadratic and additive ordination,
+ e.g., CQO or UQO or CAO.
+ For these models the optimum is the value of the latent
+ variable where the maximum occurs, i.e., where the fitted value
+ achieves its highest value.
+ For quadratic ordination models there is a formula
+ for the optimum but for additive ordination models the
+ optimum must be searched for numerically. If it occurs
+ on the boundary, then the optimum is undefined.
+ At an optimum, the fitted value of the response is
+ called the \emph{maximum}.
+
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+In ordination, the optimum of a species is sometimes
+called the \emph{species score}.
+}
+%\section{Warning }{
+%}
+
+\seealso{
+ \code{Opt.qrrvglm},
+ \code{\link{Max}},
+ \code{\link{Tol}}.
+}
+
+\examples{
+data(hspider)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ Bestof = 2,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+Opt(p1)
+
+\dontrun{
+index = 1:ncol(p1 at y)
+persp(p1, col=index, las=1, lwd=2, main="Vertical lines at the optima")
+abline(v=Opt(p1), lty=2, col=index)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/ParalogisticUC.Rd b/man/ParalogisticUC.Rd
new file mode 100644
index 0000000..75ea652
--- /dev/null
+++ b/man/ParalogisticUC.Rd
@@ -0,0 +1,58 @@
+\name{Paralogistic}
+\alias{Paralogistic}
+\alias{dparalogistic}
+\alias{pparalogistic}
+\alias{qparalogistic}
+\alias{rparalogistic}
+\title{The Paralogistic Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the paralogistic distribution with shape parameter \code{a}
+ and scale parameter \code{scale}.
+}
+\usage{
+dparalogistic(x, a, scale)
+pparalogistic(q, a, scale)
+qparalogistic(p, a, scale)
+rparalogistic(n, a, scale)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{a}{shape parameter.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dparalogistic} gives the density,
+ \code{pparalogistic} gives the distribution function,
+ \code{qparalogistic} gives the quantile function, and
+ \code{rparalogistic} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{paralogistic}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The paralogistic distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{paralogistic}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rparalogistic(n=3000, 4, 6)
+fit = vglm(y ~ 1, paralogistic(init.a=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/Pareto.Rd b/man/Pareto.Rd
new file mode 100644
index 0000000..642b0ad
--- /dev/null
+++ b/man/Pareto.Rd
@@ -0,0 +1,68 @@
+\name{Pareto}
+\alias{Pareto}
+\alias{dpareto}
+\alias{ppareto}
+\alias{qpareto}
+\alias{rpareto}
+\title{The Pareto Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Pareto(I) distribution with parameters
+ \code{location} and \code{shape}.
+
+}
+\usage{
+dpareto(x, location, shape)
+ppareto(q, location, shape)
+qpareto(p, location, shape)
+rpareto(n, location, shape)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{location, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.}
+}
+\value{
+ \code{dpareto} gives the density,
+ \code{ppareto} gives the distribution function,
+ \code{qpareto} gives the quantile function, and
+ \code{rpareto} generates random deviates.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{pareto1}}, the \pkg{VGAM} family function
+ for estimating the parameter \eqn{k} by maximum likelihood estimation,
+ for the formula of the probability density function and the
+ range restrictions imposed on the parameters.
+}
+%%\note{
+%% The Pareto distribution is
+%%}
+\seealso{
+ \code{\link{pareto1}},
+ \code{\link{ParetoIV}}.
+
+}
+\examples{
+alpha = 3; k = exp(1); x = seq(2.8, 8, len=300)
+\dontrun{
+plot(x, dpareto(x, location=alpha, shape=k), type="l",
+ main="Pareto density split into 10 equal areas")
+abline(h=0, col="blue", lty=2)
+qq = qpareto(seq(0.1,0.9,by=0.1),location=alpha,shape=k)
+lines(qq, dpareto(qq, loc=alpha, shape=k), col="purple", lty=3, type="h")
+}
+pp = seq(0.1,0.9,by=0.1)
+qq = qpareto(pp, location=alpha, shape=k)
+ppareto(qq, location=alpha, shape=k)
+qpareto(ppareto(qq,loc=alpha,shape=k),loc=alpha,shape=k) - qq # Should be 0
+}
+\keyword{distribution}
+
+
diff --git a/man/ParetoIVUC.Rd b/man/ParetoIVUC.Rd
new file mode 100644
index 0000000..7a1c04d
--- /dev/null
+++ b/man/ParetoIVUC.Rd
@@ -0,0 +1,104 @@
+\name{ParetoIV}
+\alias{ParetoIV}
+\alias{dparetoIV}
+\alias{pparetoIV}
+\alias{qparetoIV}
+\alias{rparetoIV}
+\alias{ParetoIII}
+\alias{dparetoIII}
+\alias{pparetoIII}
+\alias{qparetoIII}
+\alias{rparetoIII}
+\alias{ParetoII}
+\alias{dparetoII}
+\alias{pparetoII}
+\alias{qparetoII}
+\alias{rparetoII}
+\alias{ParetoI}
+\alias{dparetoI}
+\alias{pparetoI}
+\alias{qparetoI}
+\alias{rparetoI}
+\title{The Pareto(IV/III/II) Distributions}
+\description{
+ Density, distribution function, quantile function and random generation
+ for the Pareto(IV/III/II) distributions.
+
+}
+\usage{
+dparetoIV(x, location=0, scale=1, inequality=1, shape=1)
+pparetoIV(q, location=0, scale=1, inequality=1, shape=1)
+qparetoIV(p, location=0, scale=1, inequality=1, shape=1)
+rparetoIV(n, location=0, scale=1, inequality=1, shape=1)
+dparetoIII(x, location=0, scale=1, inequality=1)
+pparetoIII(q, location=0, scale=1, inequality=1)
+qparetoIII(p, location=0, scale=1, inequality=1)
+rparetoIII(n, location=0, scale=1, inequality=1)
+dparetoII(x, location=0, scale=1, shape=1)
+pparetoII(q, location=0, scale=1, shape=1)
+qparetoII(p, location=0, scale=1, shape=1)
+rparetoII(n, location=0, scale=1, shape=1)
+dparetoI(x, scale=1, shape=1)
+pparetoI(q, scale=1, shape=1)
+qparetoI(p, scale=1, shape=1)
+rparetoI(n, scale=1, shape=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles. }
+ \item{p}{vector of probabilities. }
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{location}{the location parameter. }
+ \item{scale, shape, inequality}{the (positive) scale,
+ inequality and shape parameters. }
+}
+\value{
+ Functions beginning with the letter \code{d} give the density,
+ functions beginning with the letter \code{p} give the distribution function,
+ functions beginning with the letter \code{q} give the quantile function, and
+ functions beginning with the letter \code{r} generates random deviates.
+}
+\references{
+Brazauskas, V. (2003)
+Information matrix for Pareto(IV), Burr, and related
+distributions.
+\emph{Comm.\ Statist.\ Theory and Methods}
+\bold{32}, 315--325.
+
+Arnold, B. C. (1983)
+\emph{Pareto Distributions}.
+Fairland, Maryland: International Cooperative Publishing House.
+
+}
+\author{ T. W. Yee }
+\details{
+ For the formulas and other details
+ see \code{\link{paretoIV}}.
+
+}
+\note{
+ The functions \code{[dpqr]paretoI} are the same as \code{[dpqr]pareto1}
+ except for a slight change in notation: \eqn{s=k} and
+ \eqn{b=\alpha}{b=alpha}; see \code{\link{Pareto}}.
+
+}
+\seealso{
+ \code{\link{paretoIV}},
+ \code{\link{Pareto}}.
+}
+\examples{
+\dontrun{
+x = seq(-0.2, 4, by=0.01)
+loc = 0; Scale = 1; ineq = 1; shape = 1.0;
+plot(x, dparetoIV(x, loc, Scale, ineq, shape), type="l", col="blue",
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", ylim=0:1, las=1, ylab="")
+abline(h=0, col="blue", lty=2)
+Q = qparetoIV(seq(0.05,0.95,by=0.05), loc, Scale, ineq, shape)
+lines(Q, dparetoIV(Q, loc, Scale, ineq, shape), col="purple", lty=3, type="h")
+lines(x, pparetoIV(x, loc, Scale, ineq, shape), col="red")
+abline(h=0, lty=2)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/RayleighUC.Rd b/man/RayleighUC.Rd
new file mode 100644
index 0000000..0f7ba58
--- /dev/null
+++ b/man/RayleighUC.Rd
@@ -0,0 +1,65 @@
+\name{Rayleigh}
+\alias{Rayleigh}
+\alias{drayleigh}
+\alias{prayleigh}
+\alias{qrayleigh}
+\alias{rrayleigh}
+\title{The Rayleigh Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Rayleigh distribution with parameter
+ \code{a}.
+}
+\usage{
+drayleigh(x, a)
+prayleigh(q, a)
+qrayleigh(p, a)
+rrayleigh(n, a)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{a}{the parameter \eqn{a}.}
+}
+\value{
+ \code{drayleigh} gives the density,
+ \code{prayleigh} gives the distribution function,
+ \code{qrayleigh} gives the quantile function, and
+ \code{rrayleigh} generates random deviates.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{rayleigh}}, the \pkg{VGAM} family function
+ for estimating the parameter \eqn{a} by maximum likelihood estimation,
+ for the formula of the probability density function and range restrictions
+ on the parameter \eqn{a}.
+}
+\note{
+ The Rayleigh distribution is related to the Maxwell distribution.
+}
+\seealso{
+ \code{\link{rayleigh}},
+ \code{\link{maxwell}}.
+}
+\examples{
+\dontrun{
+a = 2
+x = seq(-1, 8, by=0.1)
+plot(x, drayleigh(x, a=a), type="l", ylim=c(0,1), las=1, ylab="",
+ main="Rayleigh density divided into 10 equal areas; red=cdf")
+abline(h=0, col="blue", lty=2)
+qq = qrayleigh(seq(0.1,0.9,by=0.1),a=a)
+lines(qq, drayleigh(qq, a=a), col="purple", lty=3, type="h")
+lines(x, prayleigh(x, a=a), col="red")
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/SinmadUC.Rd b/man/SinmadUC.Rd
new file mode 100644
index 0000000..c105a9a
--- /dev/null
+++ b/man/SinmadUC.Rd
@@ -0,0 +1,58 @@
+\name{Sinmad}
+\alias{Sinmad}
+\alias{dsinmad}
+\alias{psinmad}
+\alias{qsinmad}
+\alias{rsinmad}
+\title{The Singh-Maddala Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Singh-Maddala distribution with shape parameters \code{a}
+ and \code{q}, and scale parameter \code{scale}.
+}
+\usage{
+dsinmad(x, a, scale, q.arg)
+psinmad(q, a, scale, q.arg)
+qsinmad(p, a, scale, q.arg)
+rsinmad(n, a, scale, q.arg)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. If \code{length(n) > 1}, the length
+ is taken to be the number required.}
+ \item{a, q.arg}{shape parameters.}
+ \item{scale}{scale parameter.}
+}
+\value{
+ \code{dsinmad} gives the density,
+ \code{psinmad} gives the distribution function,
+ \code{qsinmad} gives the quantile function, and
+ \code{rsinmad} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{sinmad}}, which is the \pkg{VGAM} family function
+ for estimating the parameters by maximum likelihood estimation.
+}
+\note{
+ The Singh-Maddala distribution is a special case of the 4-parameter
+ generalized beta II distribution.
+}
+\seealso{
+ \code{\link{sinmad}},
+ \code{\link{genbetaII}}.
+}
+\examples{
+y = rsinmad(n=3000, 4, 6, 2)
+fit = vglm(y ~ 1, sinmad(init.a=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+}
+\keyword{distribution}
diff --git a/man/Tol.Rd b/man/Tol.Rd
new file mode 100644
index 0000000..830181e
--- /dev/null
+++ b/man/Tol.Rd
@@ -0,0 +1,96 @@
+\name{Tol}
+\alias{Tol}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Tolerances }
+\description{
+ Generic function for the \emph{tolerances} of a model.
+}
+\usage{
+Tol(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the computation or
+ extraction of a tolerance or tolerances is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. Sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Different models can define an optimum in different ways.
+ Many models have no such notion or definition.
+
+ Tolerances occur in quadratic ordination, i.e., CQO or UQO.
+ They have ecological meaning because a high tolerance
+ for a species means the species can survive over a large
+ environmental range (stenoecous species), whereas a
+ small tolerance means the species' niche is small
+ (eurycous species).
+ Mathematically, the tolerance is like the variance of
+ a normal distribution.
+
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+
+\note{
+ Tolerances are undefined for `linear' and additive
+ ordination models.
+ They are well-defined for quadratic ordination models.
+}
+\section{Warning }{
+ There is a direct inverse relationship between the scaling of
+ the latent variables (site scores) and the tolerances.
+ One normalization is for the latent variables to have unit
+ variance.
+ Another normalization is for all the tolerances to be unit.
+ These two normalization cannot simultaneously hold in general.
+ For rank-\emph{R>1} models it becomes more complicated because
+ the latent variables are also uncorrelated. An important
+ argument when fitting quadratic ordination models is whether
+ \code{EqualTolerances} is \code{TRUE} or \code{FALSE}.
+ See Yee (2004) for details.
+
+}
+
+\seealso{
+ \code{Tol.qrrvglm}.
+ \code{\link{Max}},
+ \code{\link{Opt}}.
+}
+
+\examples{
+data(hspider)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ Bestof = 2,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+
+Tol(p1)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
new file mode 100644
index 0000000..3787e12
--- /dev/null
+++ b/man/VGAM-package.Rd
@@ -0,0 +1,444 @@
+\name{VGAM-package}
+\alias{VGAM-package}
+\alias{VGAM}
+\docType{package}
+\title{
+Vector Generalized Linear and Additive Models
+}
+\description{
+Vector generalized linear and additive models, and associated models
+(Reduced-Rank VGLMs, Quadratic RR-VGLMs, Reduced-Rank VGAMs). This package
+fits many models and distribution by maximum likelihood estimation (MLE)
+or penalized MLE. Also fits constrained ordination models in ecology.
+
+}
+\details{
+\tabular{ll}{
+Package: \tab VGAM\cr
+Version: \tab 0.7-1\cr
+Date: \tab 2006-10-24\cr
+Depends: \tab R (>= 2.4.0), splines, methods, stats, stats4\cr
+License: \tab GPL version 2\cr
+URL: \tab http://www.stat.auckland.ac.nz/~yee/VGAM\cr
+LazyLoad: \tab yes\cr
+LazyData: \tab yes\cr
+Packaged: \tab Tue Oct 24 15:36:03 2006; yee\cr
+Built: \tab R 2.4.0; i686-pc-linux-gnu; 2006-10-24 15:44:11; unix\cr
+}
+
+Index:
+\preformatted{
+AA.Aa.aa The AA-Aa-aa Blood Group System
+AB.Ab.aB.ab The AB-Ab-aB-ab Blood Group System
+AB.Ab.aB.ab2 The AB-Ab-aB-ab2 Blood Group System
+ABO The ABO Blood Group System
+Benini The Benini Distribution
+Betabin The Beta-Binomial Distribution
+Betageom The Beta-Geometric Distribution
+Betanorm The Beta-Normal Distribution
+Bisa The Birnbaum-Saunders Distribution
+Brat Inputting Data to fit a Bradley Terry Model
+Coef Computes Model Coefficients and Quantities
+Coef.qrrvglm Returns Important Matrices etc. of a QO Object
+Coef.qrrvglm-class Class "Coef.qrrvglm"
+Coef.rrvglm Returns Important Matrices etc. of a RR-VGLM
+ Object
+Coef.rrvglm-class Class "Coef.rrvglm"
+Coef.vlm Extract Model Coefficients for VLM Objects
+Dagum The Dagum Distribution
+Fisk The Fisk Distribution
+Frank Frank's Bivariate Distribution
+Frechet The Frechet Distribution
+G1G2G3 The G1G2G3 Blood Group System
+Hzeta Haight's Zeta Function
+Inv.gaussian The Inverse Gaussian Distribution
+Invlomax The Inverse Lomax Distribution
+Invparalogistic The Inverse Paralogistic Distribution
+Links Link functions for VGLM/VGAM/etc. families
+Lino The Generalized Beta Distribution (Libby and
+ Novick, 1982)
+Log Logarithmic Distribution
+Lomax The Lomax Distribution
+MNSs The MNSs Blood Group System
+Max Maxima
+Maxwell The Maxwell Distribution
+Nakagami Nakagami Distribution
+Opt Maxima
+Paralogistic The Paralogistic Distribution
+Pareto The Pareto Distribution
+ParetoIV The Pareto(IV/III/II) Distributions
+Polono The Poisson Lognormal Distribution
+Posbinom Positive-Binomial Distribution
+Posnorm The Positive-Normal Distribution
+Pospois Positive-Poisson Distribution
+Rayleigh The Rayleigh Distribution
+Sinmad The Singh-Maddala Distribution
+Tikuv A Short-tailed Symmetric Distribution
+Tol Tolerances
+Tpareto The Truncated Pareto Distribution
+Zeta The Zeta Distribution
+Zibinom Zero-Inflated Binomial Distribution
+Zipf The Zipf Distribution
+Zipois Zero-Inflated Poisson Distribution
+acat Ordinal Regression with Adjacent Categories
+ Probabilities
+auuc Auckland University Undergraduate Counts
+benini Benini Distribution Family Function
+betaII Beta Distribution of the Second Kind
+betabin.ab Beta-binomial Distribution Family Function
+betabinomial Beta-binomial Distribution Family Function
+betaff The Two-parameter Beta Distribution Family
+ Function
+betageometric Beta-geometric Distribution Family Function
+betaprime The Beta-Prime Distribution
+bilogis4 Bivariate Logistic Distribution
+bilogistic4 Bivariate Logistic Distribution Family Function
+binom2.or Bivariate Logistic Regression
+binom2.rho Bivariate Probit Model
+binomialff Binomial Family Function
+bisa Birnbaum-Saunders Distribution Family Function
+bminz Body Mass Index of New Zealand Adults
+brat Bradley Terry Model
+bratt Bradley Terry Model With Ties
+calibrate Model Calibrations
+calibrate.qrrvglm Calibration for CQO, UQO and CAO models
+calibrate.qrrvglm.control
+ Control function for CQO/UQO/CAO calibration
+cao Fitting Constrained Additive Ordination (CAO)
+cao.control Control Function for RR-VGAMs (CAO)
+cauchit Cauchit Link Function
+cauchy1 Cauchy Distribution Family Function
+ccoef Extract Model Constrained/Canonical
+ Coefficients
+cdf.lmscreg Cumulative Distribution Function for LMS
+ Quantile Regression
+cexpon Censored Exponential Distribution
+cgo Redirects the user to cqo
+cgumbel Censored Gumbel Distribution
+chest Chest Pain in NZ Adults
+chisq Chi-squared Distribution
+clo Redirects the user to rrvglm
+cloglog Complementary Log-log Link Function
+cnormal1 Censored Normal Distribution
+coalminers Breathlessness and Wheeze Amongst Coalminers
+constraints Constraint Matrices
+cqo Fitting Constrained Quadratic Ordination (CQO)
+cratio Ordinal Regression with Continuation Ratios
+cumulative Ordinal Regression with Cumulative
+ Probabilities
+dagum Dagum Distribution Family Function
+dcnormal1 Univariate Normal Distribution with Double
+ Censoring
+deplot.lmscreg Density Plot for LMS Quantile Regression
+dgumbel The Gumbel Distribution
+dirichlet Fitting a Dirichlet Distribution
+dirmul.old Fitting a Dirichlet-Multinomial Distribution
+dirmultinomial Fitting a Dirichlet-Multinomial Distribution
+dlaplace The Laplace Distribution
+enzyme Enzyme data
+erf Error Function
+erlang Erlang Distribution
+expexp Exponentiated Exponential Distribution
+expexp1 Exponentiated Exponential Distribution
+exponential Exponential Distribution
+fff F Distribution Family Function
+fill Creates a Matrix of Appropriate Dimension
+fisherz Fisher's Z Link Function
+fisk Fisk Distribution family function
+fitted.vlm Fitted Values of a VLM object
+frank Frank's Bivariate Distribution Family Function
+frechet2 Frechet Distribution Family Function
+freund61 Freund's (1961) Bivariate Extension of the
+ Exponential Distribution
+gamma1 1-parameter Gamma Distribution
+gamma2 2-parameter Gamma Distribution
+gamma2.ab 2-parameter Gamma Distribution
+gammahyp Gamma Hyperbola Bivariate Distribution
+garma GARMA (Generalized Autoregressive
+ Moving-Average) Models
+gaussianff Gaussian (normal) Family Function
+genbetaII Generalized Beta Distribution of the Second
+ Kind
+genpoisson Generalized Poisson distribution
+geometric Geometric Distribution
+get.smart Retrieve One Component of ".smart.prediction"
+get.smart.prediction Retrieves ".smart.prediction"
+gev Generalized Extreme Value Distribution Family
+ Function
+gevUC The Generalized Extreme Value Distribution
+gew General Electric and Westinghouse Data
+ggamma Generalized Gamma distribution family function
+ggammaUC The Generalized Gamma Distribution
+golf Gamma-Ordinal Link Function
+gpd Generalized Pareto Distribution Family Function
+gpdUC The Generalized Pareto Distribution
+grc Fitting Goodman's RC Association Model
+gumbel Gumbel Distribution Family Function
+guplot Gumbel Plot
+hspider Hunting Spider Data
+hunua Hunua Ranges data
+hyper Hypergeometric Family Function
+hzeta Haight's Zeta Family Function
+iam Index from Array to Matrix
+identity Identity Link Function
+inv.gaussianff Inverse Gaussian Distribution Family Function
+invlomax Inverse Lomax Distribution Family Function
+invparalogistic Inverse Paralogistic Distribution Family
+ Function
+is.smart Test For a Smart Object
+leipnik Leipnik Distribution Family Function
+lerch Lerch Phi Function
+levy Levy Distribution Family Function
+lgammaUC The Log-Gamma Distribution
+lgammaff Log-gamma Distribution Family Function
+lino Generalized Beta Distribution Family Function
+lirat Low-iron Rat Teratology Data
+lms.bcg LMS Quantile Regression with a Box-Cox
+ transformation to a Gamma Distribution
+lms.bcn LMS Quantile Regression with a Box-Cox
+ Transformation to Normality
+lms.yjn LMS Quantile Regression with a Yeo-Johnson
+ Transformation to Normality
+logc Complementary-log Link Function
+loge Log link function
+logff Logarithmic Distribution
+logistic Logistic Distribution Family Function
+logit Logit Link Function
+loglinb2 Loglinear Model for Two Binary Responses
+loglinb3 Loglinear Model for Three Binary Responses
+loglog Log-log Link Function
+lognormal Lognormal Distribution
+logoff Log link function with an offset
+lomax Lomax Distribution Family Function
+lv Latent Variables
+lvplot Latent Variable Plot
+lvplot.qrrvglm Latent Variable Plot for QO models
+lvplot.rrvglm Latent Variable Plot for RR-VGLMs
+maxwell Maxwell Distribution Family Function
+mccullagh89 McCullagh (1989) Distribution Family Function
+mckaygamma2 McKay's Bivariate Gamma Distribution
+meplot Mean Excess Plot
+micmen Michaelis-Menten Model
+mix2normal1 Mixture of Two Univariate Normal Distributions
+mix2poisson Mixture of Two Poisson Distributions
+model.framevlm Construct the Model Frame of a VLM Object
+model.matrixvlm Construct the Design Matrix of a VLM Object
+multinomial Multinomial Logit Model
+nakagami Nakagami Distribution Family Function
+nbolf Negative Binomial-Ordinal Link Function
+negbinomial Negative Binomial Distribution Family Function
+normal1 Univariate normal distribution
+notdocumentedyet Undocumented and Internally Used Functions and
+ Classes
+nzc Chinese Population in New Zealand 1867-2001
+oxtemp Oxford Temperature Data
+paralogistic Paralogistic Distribution Family Function
+pareto1 Pareto and Truncated Pareto Distribution Family
+ Functions
+paretoIV Pareto(IV/III/II) Distribution Family Functions
+persp.qrrvglm Perspective plot for QRR-VGLMs
+plotdeplot.lmscreg Density Plot for LMS Quantile Regression
+plotqrrvglm Model Diagnostic Plots for QRR-VGLMs
+plotqtplot.lmscreg Quantile Plot for LMS Quantile Regression
+plotvgam Default VGAM Plotting
+plotvgam.control Control Function for plotvgam()
+pneumo Pneumoconiosis amongst a group of coalminers
+poissonff Poisson Family Function
+polf Poisson-Ordinal Link Function
+posbinomial Positive Binomial Distribution Family Function
+posnegbinomial Positive Negative Binomial Distribution Family
+ Function
+posnormal1 Positive Normal Distribution Family Function
+pospoisson Positive Poisson Distribution Family Function
+predict.vglm Predict Method for a VGLM fit
+prentice74 Prentice (1974) Log-gamma Distribution
+probit Probit Link Function
+put.smart Adds a List to the End of the List
+ ".smart.prediction"
+qrrvglm.control Control function for QRR-VGLMs (CQO)
+qtplot.gumbel Quantile Plot for Gumbel Regression
+qtplot.lmscreg Quantile Plot for LMS Quantile Regression
+quasibinomialff Quasi-Binomial Family Function
+quasipoissonff Quasi-Poisson Family Function
+rayleigh Rayleigh Distribution Family Function
+rcqo Constrained Quadratic Ordination
+rdiric The Dirichlet distribution
+recexp1 Upper Record Values from a 1-parameter
+ Exponential Distribution
+reciprocal Reciprocal link function
+recnormal1 Upper Record Values from a Univariate Normal
+ Distribution
+rhobit Rhobit Link Function
+rig Reciprocal Inverse Gaussian distribution
+rlplot.egev Return Level Plot for GEV Fits
+rposnegbin Positive-negative binomial distribution random
+ variates
+rrar Nested reduced-rank autoregressive models for
+ multiple time series
+rrvglm Fitting Reduced-Rank Vector Generalized Linear
+ Models (RR-VGLMs)
+rrvglm-class Class "rrvglm"
+rrvglm.control Control function for rrvglm
+rrvglm.optim.control Control function for rrvglm() calling optim()
+s Defining smooths in VGAM formulae
+setup.smart Smart Prediction Setup
+simplex Simplex distribution
+sinmad Singh-Maddala Distribution Family Function
+skewnormal1 Univariate Skew-Normal Distribution Family
+ Function
+smart.expression S Expression for Smart Functions
+smart.mode.is Determine What Mode the Smart Prediction is In
+smartpred Smart Prediction
+snorm Skew-Normal Distribution
+sratio Ordinal Regression with Stopping Ratios
+studentt Student t Distribution
+tikuv Short-tailed Symmetric Distribution Family
+ Function
+tobit Tobit Model
+trplot Trajectory Plot
+trplot.qrrvglm Trajectory plot for QRR-VGLMs
+uqo Fitting Unconstrained Quadratic Ordination
+ (UQO)
+uqo.control Control Function for UQO models
+usagrain USA grain prices
+venice Venice Maximum Sea Levels
+vgam Fitting Vector Generalized Additive Models
+vgam-class Class "vgam"
+vgam.control Control function for vgam
+vglm Fitting Vector Generalized Linear Models
+vglm-class Class "vglm"
+vglm.control Control function for vglm
+vglmff-class Class "vglmff"
+vonmises von Mises Distribution Family Function
+vsmooth.spline Vector cubic smoothing spline
+waitakere Waitakere Ranges data
+wald Wald Distribution Family Function
+weibull Weibull Distribution Family Function
+weightsvglm Prior and Working Weights of a VGLM fit
+wrapup.smart Cleans Up After Smart Prediction
+yeo.johnson Yeo-Johnson Transformation
+yip88 Zero-Inflated Poisson Distribution (Yip (1988)
+ algorithm)
+zanegbinomial Zero-Altered Negative Binomial Distribution
+zapoisson Zero-Altered Poisson Distribution
+zero The zero Argument in VGAM Family Functions
+zeta Riemann's Zeta Function
+zetaff Zeta Distribution Family Function
+zibinomial Zero-Inflated Binomial Distribution Family
+ Function
+zipf Zipf Distribution Family Function
+zipoisson Zero-Inflated Poisson Distribution Family
+ Function
+}
+
+%~~ An overview of how to use the package, including the most important ~~
+%~~ functions ~~
+
+
+}
+\author{
+Thomas W. Yee <t.yee at auckland.ac.nz>
+
+Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
+}
+\references{
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+
+\keyword{ package }
+\keyword{models}
+\keyword{regression}
+%\seealso{
+%~~ Optional links to other man pages, e.g. ~~
+%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
+%}
+\examples{
+# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo))
+fit at y # Sample proportions
+weights(fit, type="prior") # Number of observations
+coef(fit, matrix=TRUE)
+constraints(fit) # Constraint matrices
+
+
+# Fit a two species GAM simultaneously
+data(hunua)
+fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude),
+ fam = binomialff(mv=TRUE), hunua)
+coef(fit2, mat=TRUE) # Not really interpretable
+\dontrun{
+plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2)
+attach(hunua)
+o = order(altitude)
+matplot(altitude[o], fitted(fit2)[o,], type="l", lwd=2, las=1,
+ xlab="Altitude (m)", ylab="Probability of presence",
+ main="Two plant species' response curves", ylim=c(0,.8))
+rug(altitude)
+detach(hunua)
+}
+
+
+
+# LMS quantile regression
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz, tr=TRUE)
+predict(fit)[1:3,]
+fitted(fit)[1:3,]
+bminz[1:3,]
+# Person 1 is near the lower quartile of BMI amongst people his age
+cdf(fit)[1:3]
+
+\dontrun{
+# Quantile plot
+par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
+qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
+ xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+
+# Density plot
+ygrid = seq(15, 43, len=100) # BMI ranges
+par(mfrow=c(1,1), lwd=2)
+a = deplot(fit, x0=20, y=ygrid, xlab="BMI", col="black",
+ main="Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
+a
+a = deplot(fit, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
+a = deplot(fit, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+a at post$deplot # Contains density function values
+}
+
+
+# GEV distribution for extremes
+data(oxtemp)
+(fit = vglm(maxtemp ~ 1, egev, data=oxtemp, trace=TRUE))
+fitted(fit)[1:3,]
+coef(fit, mat=TRUE)
+Coef(fit)
+vcov(fit)
+vcov(fit, untransform=TRUE)
+sqrt(diag(vcov(fit))) # Approximate standard errors
+\dontrun{ rlplot(fit) }
+}
+
+
diff --git a/man/acat.Rd b/man/acat.Rd
new file mode 100644
index 0000000..81dc9ae
--- /dev/null
+++ b/man/acat.Rd
@@ -0,0 +1,113 @@
+\name{acat}
+\alias{acat}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Ordinal Regression with Adjacent Categories Probabilities }
+\description{
+ Fits an adjacent categories regression model to an ordered
+ (preferably) factor response.
+}
+\usage{
+acat(link = "loge", parallel = FALSE, reverse = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response \eqn{Y} is assumed to be a factor
+ with ordered values \eqn{1,2,\ldots,M+1}, so that
+ \eqn{M} is the number of linear/additive predictors
+ \eqn{\eta_j}{eta_j}.
+
+ \item{link}{
+ Link function applied to the ratios of the
+ adjacent categories probabilities.
+ See \code{\link{Links}} for more choices.
+
+ }
+
+ \item{parallel}{
+ A logical, or formula specifying which terms have
+ equal/unequal coefficients.
+
+ }
+ \item{reverse}{
+ Logical.
+ By default, the linear/additive predictors used are
+ \eqn{\eta_j = \log(P[Y=j+1]/P[Y=j])}{eta_j = log(P[Y=j+1]/P[Y=j])}
+ for \eqn{j=1,\ldots,M}.
+ If \code{reverse} is \code{TRUE}, then
+ \eqn{\eta_j = \log(P[Y=j]/P[Y=j+1])}{eta_j=log(P[Y=j]/P[Y=j+1])}
+ will be used.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+
+ }
+}
+\details{
+ By default, the log link is used because the ratio of two probabilities
+ is positive.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Simonoff, J. S. (2003)
+\emph{Analyzing Categorical Data},
+New York: Springer-Verlag.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a matrix of counts (with row sums that are
+ all positive), or a factor. In both cases, the \code{y} slot returned
+ by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts.
+
+ For a nominal (unordered) factor response, the multinomial logit model
+ (\code{\link{multinomial}}) is more appropriate.
+
+ Here is an example of the usage of the \code{parallel} argument.
+ If there are covariates \code{x1}, \code{x2} and \code{x3}, then
+ \code{parallel = TRUE ~ x1 + x2 -1} and \code{parallel = FALSE ~
+ x3} are equivalent. This would constrain the regression coefficients
+ for \code{x1} and \code{x2} to be equal; those of the intercepts and
+ \code{x3} would be different.
+
+}
+\section{Warning }{
+ No check is made to verify that the response is ordinal.
+}
+
+\seealso{
+ \code{\link{cumulative}},
+ \code{\link{cratio}},
+ \code{\link{sratio}},
+ \code{\link{multinomial}},
+ \code{\link{pneumo}}.
+}
+\examples{
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal,mild,severe) ~ let, acat, pneumo))
+coef(fit, matrix=TRUE)
+constraints(fit)
+model.matrix(fit)
+}
+\keyword{models}
+\keyword{regression}
+
+%pneumo$let = log(pneumo$exposure.time)
diff --git a/man/auuc.Rd b/man/auuc.Rd
new file mode 100644
index 0000000..2c1a7f5
--- /dev/null
+++ b/man/auuc.Rd
@@ -0,0 +1,39 @@
+\name{auuc}
+\alias{auuc}
+\docType{data}
+\title{ Auckland University Undergraduate Counts}
+\description{
+Undergraduate student enrolments
+at the University of Auckland in 1990.
+}
+\usage{data(auuc)}
+\format{
+ A data frame with 4 observations on the following 5 variables.
+ \describe{
+ \item{Commerce}{a numeric vector of counts.}
+ \item{Arts}{a numeric vector of counts.}
+ \item{SciEng}{a numeric vector of counts.}
+ \item{Law}{a numeric vector of counts.}
+ \item{Medicine}{a numeric vector of counts.}
+ }
+}
+\details{
+Each student is cross-classified by their
+colleges (Science and Engineering have been combined) and
+the socio-economic status (SES) of their fathers (1 = highest,
+down to 4 = lowest).
+}
+\source{
+ Dr Tony Morrison.
+}
+\references{
+ Wild, C. J. and Seber, G. A. F. (2000)
+\emph{Chance Encounters: A First Course in Data Analysis and Inference},
+New York: Wiley.
+}
+\examples{
+data(auuc)
+round(fitted(grc(auuc)))
+round(fitted(grc(auuc, Rank=2)))
+}
+\keyword{datasets}
diff --git a/man/benini.Rd b/man/benini.Rd
new file mode 100644
index 0000000..eb9c251
--- /dev/null
+++ b/man/benini.Rd
@@ -0,0 +1,93 @@
+\name{benini}
+\alias{benini}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Benini Distribution Family Function }
+\description{
+ Estimating the parameter of the Benini distribution by maximum
+ likelihood estimation.
+
+}
+\usage{
+benini(y0=stop("argument \"y0\" must be specified"),
+ lshape="loge", ishape=NULL, method.init=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{y0}{ Positive scale parameter.
+ }
+ \item{lshape}{
+ Parameter link function applied to the parameter \eqn{b},
+ which is the shape parameter.
+ See \code{\link{Links}} for more choices.
+ A log link is the default because \eqn{b} is positive.
+
+ }
+ \item{ishape}{
+ Optional initial value for the shape parameter.
+ The default is to compute the value internally.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value, or else specify a value for \code{ishape}.
+
+ }
+}
+\details{
+ The Benini distribution
+ has a probability density function that can be written
+ \deqn{f(y) = 2 b \exp(-b[(\log(y/y_0))^2]) \log(y/y_0) / y }{%
+ f(y) = 2*b*exp(-b * [(log(y/y0))^2]) * log(y/y0) / y}
+ for \eqn{y_0>0}, \eqn{y_0<y}{y0<y}, and \eqn{b>0}.
+ The cumulative distribution function for \eqn{Y} is
+ \deqn{F(y) = 1 - \exp(-b[(\log(y/y_0))^2]).}{%
+ F(y) = 1 - exp(-b * [(log(y/y0))^2]). }
+ Here, Newton-Raphson and Fisher scoring coincide.
+
+ On fitting, the \code{extra} slot has a component called \code{y0} which
+ contains the value of the \code{y0} argument.
+
+}
+\section{Warning}{
+ The mean of \eqn{Y}, which are returned as the fitted values,
+ may be incorrect.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+\note{
+ Yet to do: the 2-parameter Benini distribution estimates \eqn{y_0}{y0}
+ as well, and the 3-parameter Benini distribution estimates another
+ shape parameter \eqn{a}{a} too.
+
+}
+\seealso{
+ \code{\link{Benini}}.
+}
+\examples{
+y = rbenini(n <- 3000, y0=1, shape=exp(2))
+fit = vglm(y ~ 1, benini(y0=1), trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit at extra$y0
+
+# Apparent discrepancy:
+fitted(fit)[1:5]
+mean(y)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
new file mode 100644
index 0000000..00d607d
--- /dev/null
+++ b/man/beniniUC.Rd
@@ -0,0 +1,71 @@
+\name{Benini}
+\alias{Benini}
+\alias{dbenini}
+\alias{pbenini}
+\alias{qbenini}
+\alias{rbenini}
+\title{The Benini Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Benini distribution with parameter
+ \code{shape}.
+}
+\usage{
+dbenini(x, shape, y0)
+pbenini(q, shape, y0)
+qbenini(p, shape, y0)
+rbenini(n, shape, y0)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape}{the shape parameter \eqn{b}.}
+ \item{y0}{the scale parameter \eqn{y_0}{y0}.}
+}
+\value{
+ \code{dbenini} gives the density,
+ \code{pbenini} gives the distribution function,
+ \code{qbenini} gives the quantile function, and
+ \code{rbenini} generates random deviates.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{benini}}, the \pkg{VGAM} family function
+ for estimating the parameter \eqn{b} by maximum likelihood estimation,
+ for the formula of the probability density function and other details.
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{benini}}.
+}
+\examples{
+\dontrun{
+y0 = 1
+shape = exp(1)
+x = seq(-0.0, 4, len=101)
+plot(x, dbenini(x, y0=y0,shape=shape), type="l", col="blue", las=1,
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles", ylim=0:1,
+ ylab="")
+abline(h=0, col="blue", lty=2)
+lines(x, pbenini(x, y0=y0,shape=shape), col="red")
+probs = seq(0.1, 0.9, by=0.1)
+Q = qbenini(probs, y0=y0,shape=shape)
+lines(Q, dbenini(Q, y0=y0,shape=shape), col="purple", lty=3, type="h")
+pbenini(Q, y0=y0,shape=shape) - probs # Should be all zero
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/betaII.Rd b/man/betaII.Rd
new file mode 100644
index 0000000..b241c03
--- /dev/null
+++ b/man/betaII.Rd
@@ -0,0 +1,94 @@
+\name{betaII}
+\alias{betaII}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Beta Distribution of the Second Kind }
+\description{
+ Maximum likelihood estimation of the 3-parameter
+ beta II distribution.
+}
+\usage{
+betaII(link.scale = "loge", link.p = "loge", link.q = "loge",
+ init.scale = NULL, init.p = 1, init.q = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.scale, link.p, link.q}{
+ Parameter link functions applied to the
+ (positive) parameters \code{scale}, \code{p} and \code{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.scale, init.p, init.q}{
+ Optional initial values for \code{scale}, \code{p} and \code{q}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2,3\} which correspond to
+ \code{scale}, \code{p}, \code{q}, respectively.
+
+ }
+}
+\details{
+ The 3-parameter beta II is the 4-parameter
+ \emph{generalized} beta II distribution with shape parameter \eqn{a=1}.
+ It is also known as the Pearson VI distribution.
+ Other distributions which are special cases of the 3-parameter beta II include
+ the Lomax (\eqn{p=1}) and inverse Lomax (\eqn{q=1}).
+ More details can be found in Kleiber and Kotz (2003).
+
+The beta II distribution has density
+ \deqn{f(y) = y^{p-1} / [b^p B(p,q) \{1 + y/b\}^{p+q}]}{%
+ f(y) = y^(p-1) / [b^p B(p,q) (1 + y/b)^(p+q)]}
+ for \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and the others are shape parameters.
+The mean is
+ \deqn{E(Y) = b \, \Gamma(p + 1) \, \Gamma(q - 1) / (\Gamma(p) \, \Gamma(q))}{%
+ E(Y) = b gamma(p + 1) gamma(q - 1) / ( gamma(p) gamma(q))}
+provided \eqn{q > 1}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ If the self-starting initial values fail, try experimenting with
+ the initial value arguments, especially those whose default value
+ is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{betaff}},
+ \code{\link{genbetaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rsinmad(n=2000, a=1, 6, 2) # Not genuine data!
+fit = vglm(y ~ 1, betaII, trace=TRUE)
+fit = vglm(y ~ 1, betaII(init.p=0.7, init.q=0.7), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/betabin.ab.Rd b/man/betabin.ab.Rd
new file mode 100644
index 0000000..22f15af
--- /dev/null
+++ b/man/betabin.ab.Rd
@@ -0,0 +1,217 @@
+\name{betabin.ab}
+\alias{betabin.ab}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Beta-binomial Distribution Family Function }
+\description{
+ Fits a beta-binomial distribution by maximum likelihood estimation.
+ The two parameters here are the shape parameters of the underlying
+ beta distribution.
+
+}
+\usage{
+betabin.ab(link.shape12 = "loge", i1 = 1, i2 = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.shape12}{
+ Link function applied to both (positive) shape parameters
+ of the beta distribution.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{i1, i2}{
+ Initial value for the shape parameters.
+ The first must be positive, and is recyled to the necessary length.
+ The second is optional.
+ If a failure to converge occurs, try assigning a different value
+ to \code{i1} and/or using \code{i2}.
+
+ }
+ \item{zero}{
+ An integer specifying which linear/additive predictor is to be modelled
+ as an intercept only. If assigned, the single value should be either
+ \code{1} or \code{2}. The default is to model both shape parameters
+ as functions of the covariates. If a failure to converge occurs,
+ try \code{zero=2}.
+
+ }
+}
+\details{
+ There are several parameterizations of the beta-binomial distribution.
+ This family function directly models the two shape parameters of the
+ associated beta distribution rather than the probability of
+ success (however, see \bold{Note} below).
+ The model can be written
+ \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)}
+ where \eqn{P} has a beta distribution with shape parameters
+ \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Here,
+ \eqn{N} is the number of trials (e.g., litter size),
+ \eqn{T=NY} is the number of successes, and
+ \eqn{p} is the probability of a success (e.g., a malformation).
+ That is, \eqn{Y} is the \emph{proportion} of successes. Like
+ \code{\link{binomialff}}, the fitted values are the
+ estimated probability
+ of success (i.e., \eqn{E[Y]} and not \eqn{E[T]})
+ and the prior weights \eqn{N} are attached separately on the
+ object in a slot.
+
+ The probability function is
+ \deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
+ {B(\alpha, \beta)}}{%
+ P(T=t) = choose(N,t) B(alpha+t, beta+N-t) / B(alpha, beta)}
+ where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the beta function
+ with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
+ Recall \eqn{Y = T/N} is the real response being modelled.
+
+ The default model is \eqn{\eta_1 = \log(\alpha)}{eta1 = log(alpha)}
+ and \eqn{\eta_2 = \log(\beta)}{eta2 = log(beta)} because both
+ parameters are positive.
+ The mean (of \eqn{Y}) is
+ \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)}
+ and the variance (of \eqn{Y}) is
+ \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}.
+ Here, the correlation \eqn{\rho}{rho} is given by
+ \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)}
+ and is the correlation between the \eqn{N} individuals
+ within a litter. A \emph{litter effect} is typically reflected by a
+ positive value of \eqn{\rho}{rho}. It is known as the
+ \emph{over-dispersion parameter}.
+
+ This family function uses Fisher scoring. The two diagonal
+ elements of the second-order expected
+ derivatives with respect to \eqn{\alpha}{alpha} and
+ \eqn{\beta}{beta} are computed numerically, which may
+ fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
+ \eqn{N} or else take a long time.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}.
+
+ Suppose \code{fit} is a fitted beta-binomial model. Then
+ \code{fit at y} contains the sample proportions \eqn{y},
+ \code{fitted(fit)} returns estimates of \eqn{E(Y)}, and
+ \code{weights(fit, type="prior")} returns the number
+ of trials \eqn{N}.
+
+}
+\references{
+ Moore, D. F. and Tsiatis, A. (1991)
+ Robust estimation of the variance in moment methods for
+ extra-binomial and extra-Poisson variation.
+ \emph{Biometrics},
+ \bold{47}, 383--401.
+
+ Prentice, R. L. (1986)
+ Binary regression using an extended beta-binomial distribution,
+ with discussion of correlation induced by
+ covariate measurement errors.
+ \emph{Journal of the American Statistical Association},
+ \bold{81}, 321--327.
+}
+
+\author{ T. W. Yee }
+\note{
+ This function processes the input in the same way
+ as \code{\link{binomialff}}. But it does not handle
+ the case \eqn{N=1} very well because there are two
+ parameters to estimate, not one, for each row of the input.
+ Cases where \eqn{N=1} can be omitted via the
+ \code{subset} argument of \code{\link{vglm}}.
+
+ Although the two linear/additive predictors given
+ above are in terms of \eqn{\alpha}{alpha} and \eqn{\beta}{beta},
+ basic algebra shows that the default amounts to
+ fitting a logit link to the probability of success; subtracting
+ the second linear/additive predictor from the first gives that
+ logistic regression linear/additive predictor.
+ That is, \eqn{logit(p) = \eta_1 - \eta_2}{logit(p) = eta1 - eta2}.
+ This is illustated in one of the examples below.
+
+ The \emph{extended} beta-binomial distribution of Prentice (1986)
+ is currently not implemented in the \pkg{VGAM} package as it has
+ range-restrictions for the correlation parameter that are currently
+ too difficult to handle in this package.
+
+}
+\section{Warning }{
+ This family function is prone to numerical difficulties
+ due to the expected information matrices not being positive-definite
+ or ill-conditioned over some regions of the parameter space.
+ If problems occur try setting \code{i1} to be some other
+ positive value, using \code{i2} and/or setting \code{zero=2}.
+
+ This family function may be renamed in the future.
+
+}
+\seealso{
+ \code{\link{betabinomial}},
+ \code{\link{Betabin}},
+ \code{\link{binomialff}},
+ \code{\link{betaff}},
+ \code{\link{dirmultinomial}},
+ \code{\link{lirat}}.
+
+}
+\examples{
+# Example 1
+N = 10; s1=exp(1); s2=exp(2)
+y = rbetabin.ab(n=100, size=N, shape1=s1, shape2=s2)
+fit = vglm(cbind(y,N-y) ~ 1, betabin.ab, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit at misc$rho[1:4] # The correlation parameter
+cbind(fit at y, weights(fit, type="prior"))[1:5,]
+
+
+# Example 2
+data(lirat)
+fit = vglm(cbind(R,N-R) ~ 1, betabin.ab, data=lirat,
+ trace=TRUE, subset=N>1)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit at misc$rho # The correlation parameter
+t(fitted(fit))
+t(fit at y)
+t(weights(fit, type="prior"))
+# A loge link for the 2 shape parameters results in
+# a logistic regression:
+all.equal(c(fitted(fit)),
+c(logit(predict(fit)[,1] - predict(fit)[,2], inverse=TRUE)))
+
+
+# Example 3, which is more complicated
+lirat = transform(lirat, fgrp = factor(grp))
+summary(lirat) # Only 5 litters in group 3
+fit2 = vglm(cbind(R,N-R) ~ fgrp + hb, betabin.ab(zero=2),
+ data=lirat, trace=TRUE, subset=N>1)
+coef(fit2, matrix=TRUE)
+Coef(fit2)
+coef(fit2, matrix=TRUE)[,1] - coef(fit2, matrix=TRUE)[,2] # logit(p)
+\dontrun{
+plot(lirat$hb[lirat$N>1], fit2 at misc$rho,
+ xlab="Hemoglobin", ylab="Estimated rho",
+ pch=as.character(lirat$grp[lirat$N>1]),
+ col=lirat$grp[lirat$N>1])
+}
+\dontrun{
+data(lirat)
+attach(lirat)
+# cf. Figure 3 of Moore and Tsiatis (1991)
+plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
+ xlab="Hemoglobin level", ylab="Proportion Dead",
+ main="Fitted values (lines)")
+detach(lirat)
+
+smalldf = lirat[lirat$N>1,]
+for(gp in 1:4) {
+ xx = smalldf$hb[smalldf$grp==gp]
+ yy = fitted(fit2)[smalldf$grp==gp]
+ o = order(xx)
+ lines(xx[o], yy[o], col=gp)
+}
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/betabinUC.Rd b/man/betabinUC.Rd
new file mode 100644
index 0000000..438cd58
--- /dev/null
+++ b/man/betabinUC.Rd
@@ -0,0 +1,112 @@
+\name{Betabin}
+\alias{Betabin}
+\alias{dbetabin}
+\alias{pbetabin}
+%\alias{qbetabin}
+\alias{rbetabin}
+\alias{dbetabin.ab}
+\alias{pbetabin.ab}
+%\alias{qbetabin.ab}
+\alias{rbetabin.ab}
+\title{The Beta-Binomial Distribution}
+\description{
+ Density, distribution function, and random
+ generation for the beta-binomial distribution.
+
+}
+\usage{
+dbetabin(x, size, prob, rho, log=FALSE)
+pbetabin(q, size, prob, rho, log.p=FALSE)
+rbetabin(n, size, prob, rho)
+dbetabin.ab(x, size, shape1, shape2, log=FALSE)
+pbetabin.ab(q, size, shape1, shape2, log.p=FALSE)
+rbetabin.ab(n, size, shape1, shape2)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+ \item{size}{number of trials.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{prob}{
+ the probability of success \eqn{\mu}{mu}.
+ Must be in the unit open interval \eqn{(0,1)}.
+
+ }
+ \item{rho}{
+ the correlation parameter \eqn{\rho}{rho}.
+ Must be in the unit open interval \eqn{(0,1)}.
+
+ }
+ \item{shape1, shape2}{
+ the two (positive) shape parameters of the standard
+ beta distribution. They are called \code{a} and \code{b} in
+ \code{\link[base:Special]{beta}} respectively.
+
+ }
+ \item{log, log.p}{
+ Logical.
+ If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
+
+ }
+}
+\value{
+ \code{dbetabin} and \code{dbetabin.ab} give the density,
+ \code{pbetabin} and \code{pbetabin.ab} give the distribution function, and
+% \code{qbetabin} and \code{qbetabin.ab} gives the quantile function, and
+ \code{rbetabin} and \code{rbetabin.ab} generate random deviates.
+
+}
+\author{ T. W. Yee }
+\details{
+ The beta-binomial distribution is a binomial distribution whose
+ probability of success is not a constant but it is generated from a
+ beta distribution with parameters \code{shape1} and \code{shape2}.
+ Note that the mean of this beta distribution is
+ \code{mu=shape1/(shape1+shape2)}, which therefore is the
+ mean or the probability of success.
+
+ See \code{\link{betabinomial}} and \code{\link{betabin.ab}},
+ the \pkg{VGAM} family functions for
+ estimating the parameters, for the formula of the probability density
+ function and other details.
+
+}
+\note{
+ \code{pbetabin} and \code{pbetabin.ab} can be particularly slow.
+ The functions here ending in \code{.ab} are called from those
+ functions which don't.
+ The simple transformations
+ \eqn{\mu=\alpha / (\alpha + \beta)}{mu=alpha/(alpha+beta)} and
+ \eqn{\rho=1/(1 + \alpha + \beta)}{rho=1/(1+alpha+beta)} are used,
+ where \eqn{\alpha}{alpha} and \eqn{\beta}{beta} are the two
+ shape parameters.
+
+}
+\seealso{
+ \code{\link{betabinomial}},
+ \code{\link{betabin.ab}}.
+
+}
+\examples{
+\dontrun{
+N = 9; x = 0:N; s1=2; s2=3
+dy = dbetabin.ab(x, size=N, shape1=s1, shape2=s2)
+plot(x, dy, type="h", col="red", ylim=c(0,0.25), ylab="Probability",
+ main=paste("Beta-binomial (size=",N,", shape1=",s1,
+ ", shape2=",s2,")", sep=""))
+lines(x+0.1, dbinom(x, size=N, prob=s1/(s1+s2)), type="h", col="blue")
+sum(dy*x) # Check expected values are equal
+sum(dbinom(x, size=N, prob=s1/(s1+s2))*x)
+cumsum(dy) - pbetabin.ab(x, N, shape1=s1, shape2=s2)
+
+y = rbetabin.ab(n=10000, size=N, shape1=s1, shape2=s2)
+ty = table(y)
+lines(as.numeric(names(ty))+0.2, ty/sum(ty), type="h", col="green")
+legend(5, 0.25, leg=c("beta-binomial","binomial", "random generated"),
+ col=c("red","blue","green"), lty=1)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
new file mode 100644
index 0000000..ce1f93d
--- /dev/null
+++ b/man/betabinomial.Rd
@@ -0,0 +1,200 @@
+\name{betabinomial}
+\alias{betabinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Beta-binomial Distribution Family Function }
+\description{
+ Fits a beta-binomial distribution by maximum likelihood estimation.
+ The two parameters here are the mean and correlation coefficient.
+
+}
+\usage{
+betabinomial(lmu="logit", lrho="logit", irho=0.5, zero=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, lrho}{
+ Link functions applied to the two parameters.
+ See \code{\link{Links}} for more choices.
+ The defaults ensure the parameters remain in \eqn{(0,1)}.
+
+ }
+ \item{irho}{
+ Optional initial value for the correlation parameter.
+ If given, it must be in \eqn{(0,1)}, and is recyled to the necessary
+ length. Assign this argument a value if a convergence failure occurs.
+ Setting \code{irho=NULL} means an initial value is obtained internally,
+ though this can give unsatisfactory results.
+
+ }
+ \item{zero}{
+ An integer specifying which
+ linear/additive predictor is to be modelled as an intercept only.
+ If assigned, the single value should be either \code{1} or \code{2}.
+ The default is to have a single correlation parameter.
+ To model both parameters as functions of the covariates assign
+ \code{zero=NULL}.
+
+ }
+}
+\details{
+ There are several parameterizations of the beta-binomial distribution.
+ This family function directly models the mean and correlation
+ parameter, i.e.,
+ the probability of success.
+ The model can be written
+ \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)}
+ where \eqn{P} has a beta distribution with shape parameters
+ \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Here,
+ \eqn{N} is the number of trials (e.g., litter size),
+ \eqn{T=NY} is the number of successes, and
+ \eqn{p} is the probability of a success (e.g., a malformation).
+ That is, \eqn{Y} is the \emph{proportion} of successes. Like
+ \code{\link{binomialff}}, the fitted values are the
+ estimated probability
+ of success (i.e., \eqn{E[Y]} and not \eqn{E[T]})
+ and the prior weights \eqn{N} are attached separately on the
+ object in a slot.
+
+ The probability function is
+ \deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
+ {B(\alpha, \beta)}}{%
+ P(T=t) = choose(N,t) B(alpha+t, beta+N-t) / B(alpha, beta)}
+ where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the beta function
+ with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
+ Recall \eqn{Y = T/N} is the real response being modelled.
+
+ The default model is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)}
+ and \eqn{\eta_2 = logit(\rho)}{eta2 = logit(rho)} because both
+ parameters lie between 0 and 1.
+ The mean (of \eqn{Y}) is
+ \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)}
+ and the variance (of \eqn{Y}) is
+ \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}.
+ Here, the correlation \eqn{\rho}{rho} is given by
+ \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)}
+ and is the correlation between the \eqn{N} individuals
+ within a litter. A \emph{litter effect} is typically reflected by a
+ positive value of \eqn{\rho}{rho}. It is known as the
+ \emph{over-dispersion parameter}.
+
+ This family function uses Fisher scoring.
+ Elements of the second-order expected
+ derivatives with respect to \eqn{\alpha}{alpha} and
+ \eqn{\beta}{beta} are computed numerically, which may
+ fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
+ \eqn{N} or else take a long time.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}.
+
+ Suppose \code{fit} is a fitted beta-binomial model. Then
+ \code{fit at y} contains the sample proportions \eqn{y},
+ \code{fitted(fit)} returns estimates of \eqn{E(Y)}, and
+ \code{weights(fit, type="prior")} returns the number
+ of trials \eqn{N}.
+
+}
+\references{
+ Moore, D. F. and Tsiatis, A. (1991)
+ Robust estimation of the variance in moment methods for
+ extra-binomial and extra-Poisson variation.
+ \emph{Biometrics},
+ \bold{47}, 383--401.
+
+ Prentice, R. L. (1986)
+ Binary regression using an extended beta-binomial distribution,
+ with discussion of correlation induced by
+ covariate measurement errors.
+ \emph{Journal of the American Statistical Association},
+ \bold{81}, 321--327.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ This function processes the input in the same way
+ as \code{\link{binomialff}}. But it does not handle
+ the case \eqn{N=1} very well because there are two
+ parameters to estimate, not one, for each row of the input.
+ Cases where \eqn{N=1} can be omitted via the
+ \code{subset} argument of \code{\link{vglm}}.
+
+ The \emph{extended} beta-binomial distribution of Prentice (1986)
+ is currently not implemented in the \pkg{VGAM} package as it has
+ range-restrictions for the correlation parameter that are currently
+ too difficult to handle in this package.
+
+}
+\section{Warning }{
+ This family function is prone to numerical difficulties
+ due to the expected information matrices not being positive-definite
+ or ill-conditioned over some regions of the parameter space.
+ If problems occur try setting \code{irho} to some other
+ value, or else use the \code{etastart} argument of
+ \code{\link{vglm}}, etc.
+
+}
+\seealso{
+ \code{\link{betabin.ab}},
+ \code{\link{Betabin}},
+ \code{\link{binomialff}},
+ \code{\link{betaff}},
+ \code{\link{dirmultinomial}},
+ \code{\link{lirat}}.
+
+}
+\examples{
+# Example 1
+N = 10; mu = 0.5; rho = 0.8
+y = rbetabin(n=100, size=N, prob=mu, rho=rho)
+fit = vglm(cbind(y,N-y) ~ 1, betabinomial, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+cbind(fit at y, weights(fit, type="prior"))[1:5,]
+
+
+# Example 2
+data(lirat)
+fit = vglm(cbind(R,N-R) ~ 1, betabinomial, data=lirat,
+ trace=TRUE, subset=N>1)
+coef(fit, matrix=TRUE)
+Coef(fit)
+t(fitted(fit))
+t(fit at y)
+t(weights(fit, type="prior"))
+
+
+# Example 3, which is more complicated
+lirat = transform(lirat, fgrp = factor(grp))
+summary(lirat) # Only 5 litters in group 3
+fit2 = vglm(cbind(R,N-R) ~ fgrp + hb, betabinomial(zero=2),
+ data=lirat, trace=TRUE, subset=N>1)
+coef(fit2, matrix=TRUE)
+\dontrun{
+plot(lirat$hb[lirat$N>1], fit2 at misc$rho,
+ xlab="Hemoglobin", ylab="Estimated rho",
+ pch=as.character(lirat$grp[lirat$N>1]),
+ col=lirat$grp[lirat$N>1])
+}
+\dontrun{
+data(lirat)
+attach(lirat)
+# cf. Figure 3 of Moore and Tsiatis (1991)
+plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
+ xlab="Hemoglobin level", ylab="Proportion Dead",
+ main="Fitted values (lines)")
+detach(lirat)
+
+smalldf = lirat[lirat$N>1,]
+for(gp in 1:4) {
+ xx = smalldf$hb[smalldf$grp==gp]
+ yy = fitted(fit2)[smalldf$grp==gp]
+ o = order(xx)
+ lines(xx[o], yy[o], col=gp)
+}
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/betaff.Rd b/man/betaff.Rd
new file mode 100644
index 0000000..6278870
--- /dev/null
+++ b/man/betaff.Rd
@@ -0,0 +1,140 @@
+\name{betaff}
+\alias{betaff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Two-parameter Beta Distribution Family Function }
+\description{
+ Estimation of the shape parameters of the
+ two-parameter Beta distribution.
+
+}
+\usage{
+betaff(link = "loge", i1 = NULL, i2 = NULL, trim = 0.05,
+ A = 0, B = 1, earg=list(), zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the two shape parameters.
+ See \code{\link{Links}} for more choices.
+ A log link (default) ensures that the parameters are positive.
+
+ }
+ \item{i1, i2}{
+ Initial value for the first and second shape parameters respectively.
+ A \code{NULL} value means it is obtained in the \code{initialize} slot.
+
+ }
+ \item{trim}{
+ An argument which is fed into \code{mean()}; it is the fraction (0
+ to 0.5) of observations to be trimmed from each end of the response
+ \code{y} before the mean is computed. This is used when computing
+ initial values, and guards against outliers.
+
+ }
+ \item{A, B}{
+ Lower and upper limits of the distribution.
+ The defaults correspond to the \emph{standard beta distribution}
+ where the response lies between 0 and 1.
+
+ }
+ \item{earg}{
+ List. Extra argument associated with \code{link}
+ containing any extra information.
+ See \code{\link{Links}} for general information about \pkg{VGAM} link
+ functions.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ If used, the value must be from the set \{1,2\} which correspond to
+ the first and second shape parameters respectively.
+
+ }
+}
+\details{
+ The two-parameter Beta distribution is given by
+ \eqn{f(y) =}
+ \deqn{(y-A)^{shape1-1} \times (B-y)^{shape2-1} / [Beta(shape1,shape2)
+ \times (B-A)^{shape1+shape2-1}]}{%
+ (y-A)^(shape1-1) * (B-y)^(shape2-1) / [Beta(shape1,shape2) *
+ (B-A)^(shape1+shape2-1)]}
+ for \eqn{A < y < B}, and \eqn{Beta(.,.)} is the beta function
+ (see \code{\link[base:Special]{beta}}).
+ The shape parameters are positive, and
+ here, the limits \eqn{A} and \eqn{B} are known.
+ The mean of \eqn{Y} is \eqn{E(Y) = A + (B-A) \times shape1 /
+ (shape1 + shape2)}{E(Y) = A + (B-A) * shape1 /
+ (shape1 + shape2)}, and these are the fitted values of the object.
+
+ For the standard beta distribution the variance of \eqn{Y} is
+ \eqn{shape1 \times shape2 / [(1+shape1+shape2) \times (shape1+shape2)^2]}{
+ shape1 * shape2 / ((1+shape1+shape2) * (shape1+shape2)^2)}.
+ If \eqn{\sigma^2= 1 / (1+shape1+shape2)}
+ then the variance of \eqn{Y} can be written
+ \eqn{\sigma^2 \mu (1-\mu)}{mu*(1-mu)*sigma^2} where
+ \eqn{\mu=shape1 / (shape1 + shape2)}{mu=shape1 / (shape1 + shape2)}
+ is the mean of \eqn{Y}.
+
+ If \eqn{A} and \eqn{B} are unknown, then the \pkg{VGAM} family function
+ \code{beta4()} can be used to estimate these too.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+ Chapter 25 of:
+ \emph{Continuous Univariate Distributions},
+ 2nd edition, Volume 2, New York: Wiley.
+
+ Gupta, A. K. and Nadarajah, S. (2004)
+ \emph{Handbook of Beta Distribution and Its Applications},
+ NY: Marcel Dekker, Inc.
+
+%Evans, M., Hastings, N. and Peacock, B. (2000)
+%\emph{Statistical Distributions},
+%New York: Wiley-Interscience, Third edition.
+
+ Documentation accompanying the \pkg{VGAM} package at
+ \url{http://www.stat.auckland.ac.nz/~yee}
+ contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response must have values in the interval (\eqn{A}, \eqn{B}).
+
+}
+
+\seealso{
+ \code{\link[stats:Beta]{Beta}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{betabin.ab}},
+ \code{\link{betageometric}},
+ \code{\link{betaprime}},
+ \code{\link{rbetageom}},
+ \code{\link{rbetanorm}},
+ \code{beta4}.
+}
+\examples{
+y = rbeta(n=1000, shape1=1, shape2=3)
+fit = vglm(y ~ 1, betaff(link="identity"), trace = TRUE, crit="c")
+fit = vglm(y ~ 1, betaff, trace = TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit) # Useful for intercept-only models
+
+Y = 5 + 8 * y # From 5 to 13, not 0 to 1
+fit = vglm(Y ~ 1, betaff(A=5, B=13), trace = TRUE)
+Coef(fit)
+fitted(fit)[1:4,]
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/betageomUC.Rd b/man/betageomUC.Rd
new file mode 100644
index 0000000..52dcbb0
--- /dev/null
+++ b/man/betageomUC.Rd
@@ -0,0 +1,76 @@
+\name{Betageom}
+\alias{Betageom}
+\alias{dbetageom}
+\alias{pbetageom}
+%\alias{qbetageom}
+\alias{rbetageom}
+\title{The Beta-Geometric Distribution}
+\description{
+ Density, distribution function, and random
+ generation for the beta-geometric distribution.
+
+}
+\usage{
+dbetageom(x, shape1, shape2, log=FALSE)
+pbetageom(q, shape1, shape2, log.p=FALSE)
+rbetageom(n, shape1, shape2)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape1, shape2}{
+ the two (positive) shape parameters of the standard
+ beta distribution. They are called \code{a} and \code{b} in
+ \code{\link[base:Special]{beta}} respectively.
+
+ }
+ \item{log, log.p}{
+ Logical.
+ If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
+
+ }
+}
+\value{
+ \code{dbetageom} gives the density,
+ \code{pbetageom} gives the distribution function, and
+% \code{qbetageom} gives the quantile function, and
+ \code{rbetageom} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ The beta-geometric distribution is a geometric distribution whose
+ probability of success is not a constant but it is generated from a
+ beta distribution with parameters \code{shape1} and \code{shape2}.
+ Note that the mean of this beta distribution is
+ \code{shape1/(shape1+shape2)}, which therefore is the
+ mean of the probability of success.
+
+
+% See zz code{link{betageomzz}}, the \pkg{VGAM} family function
+% for estimating the parameters,
+% for the formula of the probability density function and other details.
+
+}
+\note{
+ \code{pbetageom} can be particularly slow.
+}
+\seealso{
+ \code{\link{geometric}},
+ \code{\link{betaff}},
+ \code{\link[stats:Beta]{Beta}}.
+}
+\examples{
+\dontrun{
+shape1 = 1; shape2 = 2; y = 0:30
+proby = dbetageom(y, shape1, shape2, log=FALSE)
+plot(y, proby, type="h", col="blue", ylab="P[Y=y]",
+ main=paste("Y ~ Beta-geometric(shape1=",shape1,", shape2=",shape2,")",
+ sep=""))
+sum(proby)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
new file mode 100644
index 0000000..cd84b18
--- /dev/null
+++ b/man/betageometric.Rd
@@ -0,0 +1,123 @@
+\name{betageometric}
+\alias{betageometric}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Beta-geometric Distribution Family Function }
+\description{
+ Maximum likelihood estimation for the beta-geometric distribution.
+
+}
+\usage{
+betageometric(lprob="logit", lshape="loge",
+ iprob = NULL, ishape = 0.1,
+ moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lprob, lshape}{
+ Parameter link functions applied to the
+ parameters \eqn{p}{prob} and \eqn{\phi}{phi}
+ (called \code{prob} and \code{shape} below).
+ The former lies in the unit interval and the latter is positive.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iprob, ishape}{
+ Numeric.
+ Initial values for the two parameters.
+ A \code{NULL} means a value is computed internally.
+
+ }
+ \item{moreSummation}{
+ Integer, of length 2.
+ When computing the expected information matrix a series summation from
+ 0 to \code{moreSummation[1]*max(y)+moreSummation[2]} is made, in which the
+ upper limit is an approximation to infinity.
+ Here, \code{y} is the response.
+
+ }
+ \item{tolerance}{
+ Positive numeric.
+ When all terms are less than this then the series is deemed to have
+ converged.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ If used, the value must be from the set \{1,2\}.
+
+ }
+}
+\details{
+ A random variable \eqn{Y} has a 2-parameter beta-geometric distribution
+ if \eqn{P(Y=y) = p (1-p)^y}{P(Y=y) = prob * (1-prob)^y}
+ for \eqn{y=0,1,2,\ldots}{y=0,1,2,...} where
+ \eqn{p}{prob} are generated from a standard beta distribution with
+ shape parameters \code{shape1} and \code{shape2}.
+ The parameterization here is to focus on the parameters
+ \eqn{p}{prob} and
+ \eqn{\phi = 1/(shape1+shape2)}{phi = 1/(shape1+shape2)},
+ where \eqn{\phi}{phi} is \code{shape}.
+ The default link functions for these ensure that the appropriate range
+ of the parameters is maintained.
+ The mean of \eqn{Y} is
+ \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) =
+ shape2 / (shape1-1) = (1-prob) / (prob-phi)}.
+
+ The geometric distribution is a special case of the beta-geometric
+ distribution with \eqn{\phi=0}{phi=0} (see \code{\link{geometric}}).
+ However, fitting data from a geometric distribution may result in
+ numerical problems because the estimate of \eqn{\log(\phi)}{log(phi)}
+ will 'converge' to \code{-Inf}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Paul, S. R. (2005)
+ Testing goodness of fit of the geometric distribution:
+ an application to human fecundability data.
+ \emph{Journal of Modern Applied Statistical Methods}, \bold{4}, 425--433.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ The first iteration may be very slow;
+ if practical, it is best for the \code{weights} argument of
+ \code{\link{vglm}} etc. to be used rather than inputting a very
+ long vector as the response, i.e., \code{vglm(y ~ 1, ..., weights=wts)}
+ is to be preferred over \code{vglm(rep(y, wts) ~ 1, ...)}.
+ If convergence problems occur try inputting some values of argument
+ \code{ishape}.
+
+ If an intercept-only model is fitted then the \code{misc} slot of the
+ fitted object has list components \code{shape1} and \code{shape2}.
+
+}
+
+\seealso{
+ \code{\link{geometric}},
+ \code{\link{betaff}},
+ \code{\link{rbetageom}}.
+
+}
+\examples{
+y = 0:11; wts = c(227,123,72,42,21,31,11,14,6,4,7,28)
+fit = vglm(y ~ 1, fam=betageometric, weight=wts, trace=TRUE)
+fitg = vglm(y ~ 1, fam= geometric, weight=wts, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+diag(vcov(fit, untrans=TRUE))^0.5
+fit at misc$shape1
+fit at misc$shape2
+# Very strong evidence of a beta-geometric:
+1-pchisq(2*(logLik(fit)-logLik(fitg)), df=1)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/betanormUC.Rd b/man/betanormUC.Rd
new file mode 100644
index 0000000..e080c25
--- /dev/null
+++ b/man/betanormUC.Rd
@@ -0,0 +1,86 @@
+\name{Betanorm}
+\alias{Betanorm}
+\alias{dbetanorm}
+\alias{pbetanorm}
+\alias{qbetanorm}
+\alias{rbetanorm}
+\title{The Beta-Normal Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the univariate beta-normal distribution.
+
+}
+\usage{
+dbetanorm(x, shape1, shape2, mean=0, sd=1, log.arg=FALSE)
+pbetanorm(q, shape1, shape2, mean=0, sd=1, lower.tail=TRUE, log.p=FALSE)
+qbetanorm(p, shape1, shape2, mean=0, sd=1)
+rbetanorm(n, shape1, shape2, mean=0, sd=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape1, shape2}{
+ the two (positive) shape parameters of the standard
+ beta distribution. They are called \code{a} and \code{b} in
+ \code{\link[base:Special]{beta}}.
+
+ }
+ \item{mean, sd}{
+ the mean and standard deviation of the univariate
+ normal distribution.
+
+ }
+ \item{log.arg, log.p}{
+ Logical.
+ If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
+
+ }
+ \item{lower.tail}{
+ Logical. If \code{TRUE} then the upper tail is returned, i.e., one minus
+ the usual answer.
+
+ }
+}
+\value{
+ \code{dbetanorm} gives the density,
+ \code{pbetanorm} gives the distribution function,
+ \code{qbetanorm} gives the quantile function, and
+ \code{rbetanorm} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ The function \code{betanormal1}, the \pkg{VGAM} family function
+ for estimating the parameters,
+ has not yet been written.
+
+% for the formula of the probability density function and other details.
+}
+%\note{
+%}
+%\seealso{
+% zz code{link{betanormal1}}.
+%}
+\examples{
+\dontrun{
+shape1 = 0.1; shape2 = 4; m = 1
+x = seq(-10, 2, len=501)
+plot(x, dbetanorm(x, shape1, shape2, m=m), type="l", ylim=0:1, las=1,
+ ylab=paste("betanorm(",shape1,", ",shape2,", m=",m, ", sd=1)", sep=""),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles", col="blue")
+lines(x, pbetanorm(x, shape1, shape2, m=m), col="red")
+abline(h=0)
+probs = seq(0.1, 0.9, by=0.1)
+Q = qbetanorm(probs, shape1, shape2, m=m)
+lines(Q, dbetanorm(Q, shape1, shape2, m=m), col="purple",
+ lty=3, type="h")
+lines(Q, pbetanorm(Q, shape1, shape2, m=m), col="purple", lty=3, type="h")
+abline(h=probs, col="purple", lty=3)
+pbetanorm(Q, shape1, shape2, m=m) - probs # Should be all 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
new file mode 100644
index 0000000..29b36ca
--- /dev/null
+++ b/man/betaprime.Rd
@@ -0,0 +1,115 @@
+\name{betaprime}
+\alias{betaprime}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Beta-Prime Distribution }
+\description{
+ Estimation of the two shape parameters of the beta-prime distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the two (positive) shape parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{i1, i2}{
+ Initial values for the first and second shape parameters.
+ A \code{NULL} value means it is obtained in the \code{initialize} slot.
+ Note that \code{i2} is obtained using \code{i1}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The value must be from the set
+ \{1,2\} corresponding respectively to \code{shape1} and \code{shape2}
+ respectively. If \code{zero=NULL} then both parameters are modelled
+ with the explanatory variables.
+
+ }
+}
+
+%% what is the mean if shape2 < 1?
+
+\details{
+ The beta-prime distribution is given by
+ \deqn{f(y) = y^{shape1-1} (1+y)^{-shape1-shape2} /
+ B(shape1,shape2)}{%
+ f(y) = y^(shape1-1) * (1+y)^(-shape1-shape2) /
+ B(shape1,shape2) }
+ for \eqn{y > 0}.
+ The shape parameters are positive, and
+ here, \eqn{B} is the beta function.
+ The mean of \eqn{Y} is \eqn{shape1 / (shape2-1)} provided \eqn{shape2>1}.
+
+ If \eqn{Y} has a \eqn{Beta(shape1,shape2)} distribution then
+ \eqn{Y/(1-Y)} and \eqn{(1-Y)/Y} have a \eqn{Betaprime(shape1,shape2)}
+ and \eqn{Betaprime(shape2,shape1)} distribution respectively.
+ Also, if \eqn{Y_1}{Y1} has a \eqn{gamma(shape1)} distribution
+ and \eqn{Y_2}{Y2} has a \eqn{gamma(shape2)} distribution
+ then \eqn{Y_1/Y_2}{Y1/Y2} has a \eqn{Betaprime(shape1,shape2)}
+ distribution.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+
+%% zz not sure about the JKB reference.
+\references{
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+Chapter 25 of:
+\emph{Continuous Univariate Distributions},
+2nd edition,
+Volume 2,
+New York: Wiley.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response must have positive values only.
+
+ The beta-prime distribution is also known as the
+ \emph{beta distribution of the second kind} or the
+ \emph{inverted beta distribution}.
+
+}
+
+\seealso{
+ \code{\link{betaff}}.
+}
+\examples{
+yb = rbeta(n <- 1000, shape1=exp(1), shape2=exp(3))
+y1 = (1-yb)/yb
+y2 = yb/(1-yb)
+y3 = rgamma(n, exp(3)) / rgamma(n, exp(2))
+
+fit1 = vglm(y1 ~ 1, betaprime, trace=TRUE)
+coef(fit1, matrix=TRUE)
+
+fit2 = vglm(y2 ~ 1, betaprime, trace=TRUE)
+coef(fit2, matrix=TRUE)
+
+fit3 = vglm(y3 ~ 1, betaprime, trace=TRUE)
+coef(fit3, matrix=TRUE)
+
+# Compare the fitted values
+mean(y3)
+fitted(fit3)[1:5]
+Coef(fit3) # Useful for intercept-only models
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/bilogis4UC.Rd b/man/bilogis4UC.Rd
new file mode 100644
index 0000000..1b2b905
--- /dev/null
+++ b/man/bilogis4UC.Rd
@@ -0,0 +1,70 @@
+\name{bilogis4}
+\alias{bilogis4}
+\alias{dbilogis4}
+\alias{pbilogis4}
+\alias{rbilogis4}
+\title{Bivariate Logistic Distribution}
+\description{
+ Density, distribution function, quantile function and random generation
+ for the 4-parameter bivariate logistic distribution.
+
+}
+\usage{
+dbilogis4(x1, x2, loc1=0, scale1=1, loc2=0, scale2=1)
+pbilogis4(q1, q2, loc1=0, scale1=1, loc2=0, scale2=1)
+rbilogis4(n, loc1=0, scale1=1, loc2=0, scale2=1)
+}
+\arguments{
+ \item{x1, x2, q1, q2}{vector of quantiles.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{loc1, loc2}{the location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}.}
+ \item{scale1, scale2}{the scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.}
+}
+\value{
+ \code{dbilogis4} gives the density,
+ \code{pbilogis4} gives the distribution function, and
+ \code{rbilogis4} generates random deviates (a two-column matrix).
+}
+\references{
+
+Gumbel, E. J. (1961)
+Bivariate logistic distributions.
+\emph{Journal of the American Statistical Association},
+\bold{56}, 335--349.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{bilogis4}}, the \pkg{VGAM} family function for
+ estimating the four parameters by maximum likelihood estimation, for
+ the formula of the cumulative distribution function and other details.
+
+}
+%\note{
+%}
+\seealso{
+ \code{\link{bilogistic4}}.
+}
+\examples{
+\dontrun{
+par(mfrow=c(1,3))
+n = 2000
+ymat = rbilogis4(n, loc1=5, loc2=7, scale2=exp(1))
+myxlim = c(-2,15)
+myylim = c(-10,30)
+plot(ymat, xlim=myxlim, ylim=myylim)
+
+N = 100
+x1 = seq(myxlim[1], myxlim[2], len=N)
+x2 = seq(myylim[1], myylim[2], len=N)
+ox = expand.grid(x1, x2)
+z = dbilogis4(ox[,1], ox[,2], loc1=5, loc2=7, scale2=exp(1))
+contour(x1, x2, matrix(z, N, N), main="density")
+z = pbilogis4(ox[,1], ox[,2], loc1=5, loc2=7, scale2=exp(1))
+contour(x1, x2, matrix(z, N, N), main="cdf")
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
new file mode 100644
index 0000000..a8cbc8a
--- /dev/null
+++ b/man/bilogistic4.Rd
@@ -0,0 +1,128 @@
+\name{bilogistic4}
+\alias{bilogistic4}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bivariate Logistic Distribution Family Function }
+\description{
+ Estimates the four parameters of the bivariate logistic distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+bilogistic4(llocation="identity", lscale="loge",
+ iloc1=NULL, iscale1=NULL, iloc2=NULL, iscale2=NULL,
+ method.init=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation}{
+ Link function applied to both location parameters
+ \eqn{l_1}{l1} and \eqn{l_2}{l2}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lscale}{
+ Parameter link function applied to both
+ (positive) scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iloc1, iloc2}{ Initial values for the location parameters.
+ By default, initial values are chosen internally using
+ \code{method.init}. Assigning values here will override
+ the argument \code{method.init}. }
+ \item{iscale1, iscale2}{ Initial values for the scale parameters.
+ By default, initial values are chosen internally using
+ \code{method.init}. Assigning values here will override
+ the argument \code{method.init}. }
+ \item{method.init}{ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value. }
+ \item{zero}{ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose values from the set \{1,2,3,4\}.
+ }
+}
+\details{
+ The four-parameter bivariate logistic distribution
+ has a density that can be written as
+ \deqn{f(y_1,y_2;l_1,s_1,l_2,s_2) = 2 \frac{\exp[-(y_1-l_1)/s_1 -
+ (y_2-l_2)/s_2]}{
+ s_1 s_2 \left( 1 + \exp[-(y_1-l_1)/s_1] + \exp[-(y_2-l_2)/s_2]
+ \right)^3}}{%
+ f(y1,y2;l1,s1,l2,s2) = 2 * exp[-(y1-l1)/s1 - (y1-l1)/s1] /
+ [s1 * s2 * ( 1 + exp[-(y1-l1)/s1] + exp[-(y2-l2)/s2] )^3]
+ }
+ where \eqn{s_1>0}{s1>0} \eqn{s_2>0}{s2>0} are the scale parameters,
+ and \eqn{l_1}{l1} and \eqn{l_2}{l2} are the location parameters.
+ Each of the two responses are unbounded, i.e.,
+ \eqn{-\infty<y_j<\infty}{-Inf<y_j<Inf}.
+ The mean of \eqn{Y_1}{Y1} is \eqn{l_1}{l1} etc.
+ The fitted values are returned in a 2-column matrix.
+ The cumulative distribution function is
+ \deqn{F(y_1,y_2;l_1,s_1,l_2,s_2) =
+ \left( 1 + \exp[-(y_1-l_1)/s_1] + \exp[-(y_2-l_2)/s_2]
+ \right)^{-1}}{%
+ F(y1,y2;l1,s1,l2,s2) = 1 / (1 + exp[-(y1-l1)/s1] + exp[-(y2-l2)/s2])
+ }
+ The marginal distribution of \eqn{Y_1}{Y1} is
+ \deqn{P(Y_1 \leq y_1) = F(y_1;l_1,s_1) =
+ \left( 1 + \exp[-(y_1-l_1)/s_1] \right)^{-1} .}{%
+ P(Y1 <= y1) = F(y1;l1,s1) = 1 / (1 + exp[-(y1-l1)/s1]).
+ }
+
+ By default, \eqn{\eta_1=l_1}{eta1=l1},
+ \eqn{\eta_2=\log(s_1)}{eta2=log(s1)},
+ \eqn{\eta_3=l_2}{eta3=l2},
+ \eqn{\eta_4=\log(s_2)}{eta4=log(s2)} are the linear/additive
+ predictors.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+}
+\references{
+Gumbel, E. J. (1961)
+Bivariate logistic distributions.
+\emph{Journal of the American Statistical Association},
+\bold{56}, 335--349.
+
+Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
+\emph{Extreme Value and Related Models with Applications in
+ Engineering and Science},
+Hoboken, N.J.: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+\note{
+ This family function uses the BFGS quasi-Newton update formula for the
+ working weight matrices. Consequently the estimated variance-covariance
+ matrix may be inaccurate or simply wrong! The standard errors must be
+ therefore treated with caution; these are computed in functions such
+ as \code{vcov()} and \code{summary()}.
+
+}
+%\section{Warning }{
+%}
+
+\seealso{
+ \code{\link{logistic}},
+ \code{\link{rbilogis4}}.
+}
+\examples{
+ymat = rbilogis4(n <- 1000, loc1=5, loc2=7, scale2=exp(1))
+\dontrun{plot(ymat)}
+fit = vglm(ymat ~ 1, fam=bilogistic4, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fitted(fit)[1:4,]
+vcov(fit)
+weights(fit, type="w")[1:4,]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
new file mode 100644
index 0000000..d20c27c
--- /dev/null
+++ b/man/binom2.or.Rd
@@ -0,0 +1,168 @@
+\name{binom2.or}
+\alias{binom2.or}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bivariate Logistic Regression }
+\description{
+ Fits a Palmgren (bivariate logistic regression) model to two binary
+ responses. Actually, a bivariate logistic/probit/cloglog/cauchit
+ model can be fitted.
+
+}
+\usage{
+binom2.or(lp = "logit", lp1 = lp, lp2 = lp, lor = "loge",
+ zero = 3, exchangeable = FALSE, tol = 0.001)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lp}{
+ Link function applied to the two marginal probabilities.
+ See \code{\link{Links}} for more choices.
+ See the note below.
+
+ }
+ \item{lp1}{
+ Link function applied to the first of the two marginal probabilities.
+
+ }
+ \item{lp2}{
+ Link function applied to the second of the two marginal probabilities.
+
+ }
+ \item{lor}{
+ Link function applied to the odds ratio.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{zero}{
+ Which linear/additive predictor is modelled as an intercept only? A
+ \code{NULL} means none.
+
+ }
+ \item{exchangeable}{
+ Logical. If \code{TRUE}, the two marginal probabilities are constrained
+ to be equal.
+
+ }
+ \item{tol}{
+ Tolerance for testing independence. Should be some
+ small positive numerical value.
+
+ }
+}
+\details{
+ Known also as the \emph{Palmgren model}, the bivariate logistic model is
+ a full-likelihood based model defined as two logistic regressions plus
+ \code{log(OR) = eta3} where \code{eta3} is the third linear/additive
+ predictor relating the odds ratio to explanatory variables.
+ Explicitly, the default model is
+ \deqn{logit[P(Y_j=1)] = \eta_j,\ \ \ j=1,2}{%
+ logit[P(Y_j=1)] = eta_j,\ \ \ j=1,2}
+ for the marginals, and
+ \deqn{\log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = \eta_3,}{%
+ log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = eta_3,}
+ specifies the dependency between the two responses. Here, the responses
+ equal 1 for a success and a 0 for a failure, and the odds ratio is often
+ written \eqn{\psi=p_{00}p_{11}/(p_{10}p_{01})}{psi=p00 p11 / (p10 p01)}.
+ The model is fitted by maximum likelihood estimation since the full
+ likelihood is specified. The two binary responses are independent
+ if and only if the odds ratio is unity, or equivalently, the log odds
+ ratio is zero.
+
+ The default models \eqn{\eta_3}{eta3} as a single parameter only,
+ i.e., an intercept-only model, but this can be circumvented by setting
+ \code{zero=NULL} to model the odds ratio as a function of all the
+ explanatory variables.
+ The function \code{binom2.or} can handle \code{\link{probit}},
+ \code{\link{cloglog}} and \code{\link{cauchit}} links as well, so is
+ quite general. In fact, the two marginal probabilities can each have
+ a different link function. A similar model is the \emph{bivariate
+ probit model} (\code{\link{binom2.rho}}), which is based on a standard
+ bivariate normal distribution, but the bivariate probit model is less
+ interpretable and flexible.
+
+ The \code{exchangeable} argument should be used when the error structure
+ is exchangeable, e.g., with eyes or ears data.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0),
+ (0,1), (1,0), (1,1), respectively.
+ These estimated probabilities should be extracted with the \code{fitted}
+ generic function.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+ le Cessie, S. and van Houwelingen, J. C. (1994)
+ Logistic regression for correlated binary data.
+ \emph{Applied Statistics},
+ \bold{43}, 95--108.
+
+ Palmgren, J. (1989)
+ \emph{Regression Models for Bivariate Binary Responses}.
+ Technical Report no. 101, Department of Biostatistics,
+ University of Washington, Seattle.
+
+ Documentation accompanying the \pkg{VGAM} package at
+ \url{http://www.stat.auckland.ac.nz/~yee}
+ contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a 4-column matrix of counts (whose
+ columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
+ (1,1) respectively), or a two-column matrix where each column has two
+ distinct values.
+
+ By default, a constant odds ratio is fitted because \code{zero=3}.
+ Set \code{zero=NULL} if you want the odds ratio to be modelled as a
+ function of the explanatory variables; however, numerical problems
+ are more likely to occur.
+
+ The argument \code{lp}, which is actually redundant, is used for
+ convenience and for upward compatibility: specifying \code{lp} only
+ means the link function will be applied to \code{lp1} and \code{lp2}.
+ Users who want a different link function for each of the two marginal
+ probabilities should use the \code{lp1} and \code{lp2} arguments,
+ and the argument \code{lp} is then ignored. It doesn't make sense
+ to specify \code{exchangeable=TRUE} and have different link functions
+ for the two marginal probabilities.
+
+}
+\seealso{
+ \code{\link{binom2.rho}},
+ \code{\link{loglinb2}},
+ \code{\link{coalminers}},
+ \code{\link{binomialff}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}}.
+}
+\examples{
+# Fit the model in Table 6.7 in McCullagh and Nelder (1989)
+data(coalminers)
+coalminers = transform(coalminers, Age = (age - 42) / 5)
+fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero=NULL), coalminers)
+fitted(fit)
+summary(fit)
+coef(fit, matrix=TRUE)
+\dontrun{
+attach(coalminers)
+matplot(Age, fitted(fit), type="l", las=1, xlab="(age - 42) / 5",
+ main=paste("B=Breathlessness, W=Wheeze; 1=(B=0,W=0),",
+ "2=(B=0,W=1), 3=(B=1,W=0), 4=(B=1,W=1)"))
+matpoints(Age, fit at y, col=1:4)
+detach(coalminers)
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
new file mode 100644
index 0000000..0bbfe82
--- /dev/null
+++ b/man/binom2.rho.Rd
@@ -0,0 +1,107 @@
+\name{binom2.rho}
+\alias{binom2.rho}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bivariate Probit Model }
+\description{
+ Fits a bivariate probit model to two binary responses.
+}
+\usage{
+binom2.rho(lrho = "rhobit", init.rho = 0.4, zero = 3, exchangeable = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lrho}{
+ Link function applied to the \eqn{\rho}{rho} association parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.rho}{
+ Initial value for \eqn{\rho}{rho}.
+ This should lie between \eqn{-1} and \eqn{1}.
+
+ }
+ \item{zero}{
+ Which linear/additive predictor is modelled as an
+ intercept only? A \code{NULL} means none.
+
+ }
+ \item{exchangeable}{
+ Logical.
+ If \code{TRUE}, the two marginal probabilities are constrained to
+ be equal.
+
+ }
+}
+\details{
+ The \emph{bivariate probit model} was one of the earliest regression
+ models to handle two binary responses jointly. It has a probit
+ link for each of the two marginal probabilities, and models the
+ association between the responses by the \eqn{\rho}{rho} parameter
+ of a standard bivariate normal distribution (with zero means and
+ unit variances). One can think of the joint probabilities being
+ \eqn{\Phi(\eta_1,\eta_2;\rho)}{Phi(eta1,eta2;rho)} where \eqn{\Phi}{Phi}
+ is the cumulative distribution function of a standard bivariate normal
+ distribution with correlation parameter \eqn{\rho}{rho}.
+
+ The bivariate probit model should not be confused with a \emph{bivariate
+ logit model} with a probit link (see \code{\link{binom2.or}}).
+ The latter uses the odds ratio to quantify the association. Actually,
+ the bivariate logit model is recommended over the bivariate probit
+ model because the odds ratio is a more natural way of measuring the
+ association between two binary responses.
+
+ }
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ four joint probabilities, labelled as
+ \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively.
+
+}
+
+\references{
+Ashford, J. R. and Sowden, R. R. (1970)
+ Multi-variate probit analysis.
+ \emph{Biometrics}, \bold{26}, 535--546.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a 4-column matrix of counts (whose
+ columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
+ (1,1) respectively), or a two-column matrix where each column has two
+ distinct values.
+
+ By default, a constant \eqn{\rho}{rho} is fitted because \code{zero=3}.
+ Set \code{zero=NULL} if you want the \eqn{\rho}{rho} parameter to
+ be modelled as a function of the explanatory variables. The value
+ \eqn{\rho}{rho} lies in the interval \eqn{(-1,1)}{(-1,1)}, therefore
+ a \code{\link{rhobit}} link is default.
+
+ If converge problems occur, try setting \code{init.rho} to some
+ value, e.g., a negative number such as \eqn{-0.5}{-0.5}.
+}
+\seealso{
+ \code{\link{binom2.or}},
+ \code{\link{loglinb2}},
+ \code{\link{coalminers}},
+ \code{\link{binomialff}},
+ \code{\link{rhobit}},
+ \code{\link{fisherz}}.
+}
+\examples{
+data(coalminers)
+coalminers = transform(coalminers, Age = (age - 42) / 5)
+fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.rho, coalminers)
+summary(fit)
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
new file mode 100644
index 0000000..785c670
--- /dev/null
+++ b/man/binomialff.Rd
@@ -0,0 +1,163 @@
+\name{binomialff}
+%\alias{binomial}
+\alias{binomialff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Binomial Family Function }
+\description{
+ Family function for fitting generalized linear models to binomial
+ responses, where the dispersion parameter may be known or unknown.
+
+}
+\usage{
+binomialff(link = "logit", dispersion = 1, mv = FALSE, onedpar = !mv,
+ parallel = FALSE, earg = NULL, zero = NULL)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ The notation \eqn{M} is used to denote the number of linear/additive
+ predictors.
+
+ \item{link}{
+ Link function. See \code{\link{Links}} for more choices.
+
+ }
+ \item{dispersion}{
+ Dispersion parameter. By default, maximum likelihood is used to
+ estimate the model because it is known. However, the user can specify
+ \code{dispersion = 0} to have it estimated, or else specify a known
+ positive value (or values if \code{mv} is \code{TRUE}).
+
+ }
+ \item{mv}{
+ Multivariate response? If \code{TRUE}, then the response is interpreted
+ as \eqn{M} binary responses, where \eqn{M} is the number of columns
+ of the response matrix. In this case, the response matrix should have
+ zero/one values only.
+
+ If \code{FALSE} and the response is a (2-column) matrix, then the number
+ of successes is given in the first column, and the second column is
+ the number of failures.
+
+ }
+ \item{onedpar}{
+ One dispersion parameter? If \code{mv}, then a separate dispersion
+ parameter will be computed for each response (column), by default.
+ Setting \code{onedpar=TRUE} will pool them so that there is only one
+ dispersion parameter to be estimated.
+
+ }
+ \item{parallel}{
+ A logical or formula. Used only if \code{mv} is \code{TRUE}. This
+ argument allows for the parallelism assumption whereby the regression
+ coefficients for a variable is constrained to be equal over the \eqn{M}
+ linear/additive predictors.
+
+ }
+ \item{earg}{
+ Extra argument optionally used by the link function.
+ See \code{\link{Links}} for more information.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
+ matrix response.
+
+ }
+}
+\details{
+ If the dispersion parameter is unknown, then the resulting estimate
+ is not fully a maximum likelihood estimate (see pp.124--8 of McCullagh
+ and Nelder, 1989).
+
+ A dispersion parameter that is less/greater than unity corresponds to
+ under-/over-dispersion relative to the binomial model. Over-dispersion
+ is more common in practice.
+
+ Setting \code{mv=TRUE} is necessary when fitting a Quadratic RR-VGLM
+ (see \code{\link{cqo}}) because the response is a matrix of \eqn{M}
+ columns (e.g., one column per species). Then there will be \eqn{M}
+ dispersion parameters (one per column of the response matrix).
+
+ When used with \code{\link{cqo}} and \code{\link{cao}}, it may be
+ preferable to use the \code{\link{cloglog}} link.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ and \code{\link{cao}}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ If \code{mv} is \code{FALSE} (default), then the response can be of one
+ of three formats: a factor (first level taken as success), a vector of
+ proportions of success, or a 2-column matrix (first column = successes)
+ of counts. The argument \code{weights} in the modelling function can
+ also be specified. In particular, for a general vector of proportions,
+ you will need to specify \code{weights} because the number of trials
+ is needed.
+
+ If \code{mv} is \code{TRUE}, then the matrix response can only be of
+ one format: a matrix of 1's and 0's (1=success).
+
+ The call \code{binomialff(dispersion=0, ...)} is equivalent to
+ \code{quasibinomialff(...)}. The latter was written so that R users
+ of \code{quasibinomial()} would only need to add a ``\code{ff}''
+ to the end of the family function name.
+
+ Regardless of whether the dispersion parameter is to be estimated or
+ not, its value can be seen from the output from the \code{summary()}
+ of the object.
+
+% With the introduction of name spaces for the \pkg{VGAM} package,
+% \code{"ff"} can be dropped for this family function.
+
+}
+\seealso{
+ \code{\link{quasibinomialff}},
+ \code{\link{Links}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ \code{\link{cao}},
+ \code{\link{zibinomial}},
+ \code{\link[stats:Binomial]{binomial}}.
+}
+\section{Warning }{
+ With a multivariate response, assigning a known dispersion parameter
+ for \emph{each} response is not handled well yet. Currently, only
+ a single known dispersion parameter is handled well.
+
+}
+
+\examples{
+quasibinomialff()
+quasibinomialff(link="probit")
+
+data(hunua)
+fit = vgam(agaaus ~ poly(altitude, 2), binomialff(link=cloglog), hunua)
+\dontrun{
+attach(hunua)
+plot(altitude, agaaus, col="blue", ylab="P(agaaus=1)",
+ main="Presence/absence of Agathis australis", las=1)
+o = order(altitude)
+lines(altitude[o], fitted(fit)[o], col="red", lwd=2)
+detach(hunua)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/biplot-methods.Rd b/man/biplot-methods.Rd
new file mode 100644
index 0000000..76dcabf
--- /dev/null
+++ b/man/biplot-methods.Rd
@@ -0,0 +1,39 @@
+\name{biplot-methods}
+\docType{methods}
+\alias{biplot,rrvglm-method}
+\alias{biplot,qrrvglm-method}
+\title{ Biplot of Constrained Regression Models }
+\description{
+ \code{biplot} is a generic function applied to RR-VGLMs and QRR-VGLMs etc.
+ These apply to rank-1 and rank-2 models of these only.
+ For RR-VGLMs these plot the second latent variable scores against the
+ first latent variable scores.
+
+}
+%\usage{
+% \S4method{biplot}{cao,Coef.cao}(object, ...)
+%}
+
+\section{Methods}{
+\describe{
+
+\item{x}{
+ The object from which the latent variables are
+ extracted and/or plotted.
+
+}
+}
+}
+\note{
+ See \code{\link{lvplot}} which is very much related to biplots.
+
+}
+
+\keyword{methods}
+\keyword{classes}
+%\keyword{ ~~ other possible keyword(s)}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/bisa.Rd b/man/bisa.Rd
new file mode 100644
index 0000000..2300428
--- /dev/null
+++ b/man/bisa.Rd
@@ -0,0 +1,140 @@
+\name{bisa}
+\alias{bisa}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Birnbaum-Saunders Distribution Family Function }
+\description{
+ Estimates the shape and scale parameters of the
+ Birnbaum-Saunders distribution by maximum likelihood estimation.
+
+}
+\usage{
+bisa(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = 1, method.init = 1,
+ fsmax=9001, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lscale, lshape}{
+ Parameter link functions applied to the shape and scale parameters
+ (\eqn{a} and \eqn{b} below).
+ See \code{\link{Links}} for more choices.
+ A log link is the default for both because they are positive.
+
+ }
+ \item{iscale, ishape}{
+ Initial values for \eqn{a} and \eqn{b}.
+ A \code{NULL} means an initial value is chosen internally using
+ \code{method.init}.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value, or else specify a value for
+ \code{ishape} and/or \code{iscale}.
+
+ }
+ \item{fsmax}{
+ Integer. If the formula is an intercept-only or if the number of
+ observations \eqn{n} is less than \code{fsmax} then Fisher scoring is
+ used (recommended), else a BFGS quasi-Newton update formula for the
+ working weight matrices is used.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose one value from the set \{1,2\}.
+
+ }
+}
+\details{
+ The (two-parameter) Birnbaum-Saunders distribution
+ has a cumulative distribution function that can be written as
+ \deqn{F(y;a,b) = \Phi[ \xi(y/b)/a] }{%
+ F(y;a,k) = pnorm[xi(y/b)/a] }
+ where \eqn{\Phi(\cdot)}{pnorm()} is the
+ cumulative distribution function of a standard normal
+ (see \code{\link[stats]{pnorm}}),
+ \eqn{\xi(t) = \sqrt{t} - 1 / \sqrt{t}}{xi(t) = t^(0.5) - t^(-0.5)},
+ \eqn{y > 0},
+ \eqn{a>0} is the shape parameter,
+ \eqn{b>0} is the scale parameter.
+ The mean of \eqn{Y} (which is the fitted value) is
+ \eqn{b(1 + a^2/2)}{b*(1 + a*a/2)}.
+ and the variance is
+ \eqn{a^2 b^2 (1 + \frac{5}{4}a^2)}{a^2 b^2 (1 + (5/4)*a^2)}.
+ By default, \eqn{\eta_1=\log(a)}{eta1=log(a)} and
+ \eqn{\eta_2=\log(b)}{eta2=log(b)} for this family function.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+
+Birnbaum, Z. W. and Saunders, S. C. (1969).
+A new family of life distributions.
+\emph{Journal of Applied Probability}, \bold{6}, 319--327.
+
+Birnbaum, Z. W. and Saunders, S. C. (1969).
+Estimation for a family of life distributions with applications to fatigue.
+\emph{Journal of Applied Probability}, \bold{6}, 328--347.
+
+Engelhardt, M. and Bain, L. J. and Wright, F. T. (1981).
+Inferences on the parameters of the Birnbaum-Saunders fatigue
+life distribution based on maximum likelihood estimation.
+\emph{Technometrics}, \bold{23}, 251--256.
+
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+\emph{Continuous Univariate Distributions},
+2nd edition,
+Volume 2,
+New York: Wiley.
+
+
+}
+\author{ T. W. Yee }
+\note{
+ If the formula is an intercept-only or \eqn{n} is sufficiently small,
+ then this family function implements Fisher scoring. This involves
+ computing an integral numerically.
+ Fisher scoring is generally recommended here provided the integrals
+ can be computed successfully and it does not take too long.
+
+ For \eqn{n} large and non-intercept-only formulas the BFGS quasi-Newton
+ update formula for the working weight matrices is used by default.
+ This is more numerically fraught.
+ Additionally, the estimated variance-covariance matrix may be inaccurate
+ or simply wrong! The standard errors must be therefore treated with
+ caution; these are computed in functions such as \code{vcov()} and
+ \code{summary()}.
+
+}
+%\section{Warning }{
+%}
+
+\seealso{
+ \code{\link{pbisa}},
+ \code{\link{inv.gaussianff}}.
+
+}
+\examples{
+y = rbisa(n=1000, shape=exp(-0.5), scale=exp(0.5))
+fit1 = vglm(y ~ 1, bisa, trace=TRUE)
+coef(fit1, matrix=TRUE)
+mean(y)
+fitted(fit1)[1:4]
+
+\dontrun{hist(y, prob=TRUE)
+x = seq(0, max(y), len=200)
+lines(x, dbisa(x, Coef(fit1)[1], Coef(fit1)[2]), col="red")
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
new file mode 100644
index 0000000..85ca26b
--- /dev/null
+++ b/man/bisaUC.Rd
@@ -0,0 +1,81 @@
+\name{Bisa}
+\alias{Bisa}
+\alias{dbisa}
+\alias{pbisa}
+\alias{qbisa}
+\alias{rbisa}
+\title{The Birnbaum-Saunders Distribution}
+\description{
+ Density, distribution function, and random
+ generation for the Birnbaum-Saunders distribution.
+
+}
+\usage{
+dbisa(x, shape, scale=1, log=FALSE)
+pbisa(q, shape, scale=1)
+qbisa(p, shape, scale=1)
+rbisa(n, shape, scale=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape, scale}{
+ the (positive) shape and scale parameters.
+
+ }
+ \item{log}{
+ Logical.
+ If \code{TRUE} then the logarithm of the density is returned.
+
+ }
+}
+\value{
+ \code{dbisa} gives the density,
+ \code{pbisa} gives the distribution function, and
+ \code{qbisa} gives the quantile function, and
+ \code{rbisa} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ The Birnbaum-Saunders distribution
+ is a distribution which is used in survival analysis.
+ See \code{\link{bisa}}, the \pkg{VGAM} family function
+ for estimating the parameters,
+ for more details.
+
+}
+%\note{
+%}
+\seealso{
+ \code{\link{bisa}}.
+}
+\examples{
+\dontrun{
+x = seq(0, 6, len=400)
+plot(x, dbisa(x, shape=1), type="l", col="blue", ylab="Density", lwd=2,
+ main="X ~ Birnbaum-Saunders(shape, scale=1)", ylim=c(0,1.3), lty=3)
+lines(x, dbisa(x, shape=2), col="red", lty=2, lwd=2)
+lines(x, dbisa(x, shape=0.5), col="green", lty=1, lwd=2)
+legend(x=3, y=0.9, legend=paste("shape =",c(0.5,1,2)),
+ col=c("green","blue","red"), lty=1:3, lwd=2)
+
+shape = 1
+x = seq(0.0, 4, len=401)
+plot(x, dbisa(x, shape=shape), type="l", col="blue", las=1, ylab="",
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles", ylim=0:1)
+abline(h=0, col="blue", lty=2)
+lines(x, pbisa(x, shape=shape), col="red")
+probs = seq(0.1, 0.9, by=0.1)
+Q = qbisa(probs, shape=shape)
+lines(Q, dbisa(Q, shape=shape), col="purple", lty=3, type="h")
+pbisa(Q, shape=shape) - probs # Should be all zero
+abline(h=probs, col="purple", lty=3)
+lines(Q, pbisa(Q, shape), col="purple", lty=3, type="h")
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/bminz.Rd b/man/bminz.Rd
new file mode 100644
index 0000000..b7b5d77
--- /dev/null
+++ b/man/bminz.Rd
@@ -0,0 +1,48 @@
+\name{bminz}
+\alias{bminz}
+\docType{data}
+\title{ Body Mass Index of New Zealand Adults }
+\description{
+ The body mass indexes and ages from an approximate random
+ sample of 700 New Zealand adults.
+}
+\usage{data(bminz)}
+\format{
+ A data frame with 700 observations on the following 2 variables.
+ \describe{
+ \item{age}{a numeric vector; their age (years). }
+ \item{BMI}{a numeric vector; their body mass indexes, which is
+ their weight divided by the square of their height (kg /
+ \eqn{m^2}{m^2} ).}
+ }
+}
+\details{
+ They are a random sample from the Fletcher Challenge/Auckland Heart and
+ Health survey conducted in the early 1990s.
+
+ There are some outliers in the data set.
+
+ A variable \code{gender} would be useful, and may be added later.
+}
+\source{
+ Clinical Trials Research Unit, University of Auckland, New Zealand.
+}
+\references{
+MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
+Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
+Fletcher Challenge-University of Auckland Heart &
+Health Study: design and baseline findings.
+\emph{New Zealand Medical Journal},
+\bold{108}, 499--502.
+
+}
+\examples{
+\dontrun{
+data(bminz)
+attach(bminz); plot(age, BMI, col="blue"); detach(bminz)
+
+fit = vgam(BMI ~ s(age, df=c(2,4,2)), fam=lms.yjn, data=bminz, tr=TRUE)
+qtplot(fit, pcol="blue", tcol="brown", lcol="brown")
+}
+}
+\keyword{datasets}
diff --git a/man/brat.Rd b/man/brat.Rd
new file mode 100644
index 0000000..5df3a16
--- /dev/null
+++ b/man/brat.Rd
@@ -0,0 +1,124 @@
+\name{brat}
+\alias{brat}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bradley Terry Model }
+\description{
+ Fits a Bradley Terry model (intercept-only model) by maximum
+ likelihood estimation.
+}
+\usage{
+brat(refgp = "last", refvalue = 1, init.alpha = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{refgp}{ Integer whose value must be from the set
+ \{1,\ldots,\eqn{M+1}\},
+ where there are \eqn{M+1} competitors. The default value indicates
+ the last competitor is used---but don't input a character string, in general.
+ }
+ \item{refvalue}{ Numeric. A positive value for the reference group. }
+ \item{init.alpha}{ Initial values for the \eqn{\alpha}{alpha}s.
+ These are recycled to the appropriate length. }
+}
+\details{
+ The Bradley Terry model involves \eqn{M+1} competitors who either
+win or lose against each other (no draws/ties allowed in this
+implementation--see \code{\link{bratt}} if there are ties).
+The probability that Competitor \eqn{i} beats Competitor \eqn{j} is
+\eqn{\alpha_i / (\alpha_i+\alpha_j)}{alpha_i / (alpha_i + alpha_j)},
+where all the \eqn{\alpha}{alpha}s are positive.
+Loosely, the \eqn{\alpha}{alpha}s can be thought of as the
+competitors' `abilities'.
+For identifiability, one of the \eqn{\alpha_i}{alpha_i} is set to a
+known value \code{refvalue}, e.g., 1.
+By default, this function chooses the last competitor to
+have this reference value.
+The data can be represented in the form of a
+\eqn{M+1} by \eqn{M+1} matrix of counts,
+where winners are the rows and losers are the columns.
+However, this is not the way the data should be inputted (see below).
+
+Excluding the reference value/group, this function chooses
+\eqn{\log(\alpha_j)}{log(alpha_j)} as the \eqn{M} linear predictors.
+The log link ensures that the \eqn{\alpha}{alpha}s are positive.
+
+The Bradley Terry model can be fitted by logistic regression, but this
+approach is not taken here. The Bradley Terry model can be fitted
+with covariates, e.g., a home advantage variable, but unfortunately,
+this lies outside the VGLM theoretical framework and therefore cannot
+be handled with this code.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}.
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Stigler, S. (1994)
+Citation patterns in the journals of statistics and probability.
+\emph{Statistical Science},
+\bold{9}, 94--108.
+
+The \code{BradleyTerry} package has more comprehensive capabilities
+than this function.
+}
+\author{ T. W. Yee }
+\note{
+The function \code{\link{Brat}} is useful for coercing a \eqn{M+1}
+by \eqn{M+1} matrix of counts into a one-row matrix suitable for
+\code{brat}.
+Diagonal elements are skipped, and the usual S order of \code{c(a.matrix)}
+of elements is used. There should be no missing
+values apart from the diagonal elements of the square matrix.
+The matrix should have winners as the rows, and losers
+as the columns.
+In general, the response should be a 1-row matrix with \eqn{M(M+1)} columns.
+
+Only an intercept model is recommended with \code{brat}.
+It doesn't make
+sense really to include covariates because of the limited
+VGLM framework.
+
+Notationally, note that the \pkg{VGAM} family function \code{\link{brat}}
+has \eqn{M+1} contestants, while \code{bratt} has \eqn{M} contestants.
+
+}
+\section{Warning }{
+Presently, the residuals are wrong, and the prior
+weights are not handled correctly.
+Ideally, the total number of counts should be the
+prior weights, after the response has been converted to
+proportions. This would make it similar to family
+functions such as \code{\link{multinomial}} and
+\code{\link{binomialff}}.
+}
+
+\seealso{
+\code{\link{bratt}},
+\code{\link{Brat}},
+\code{\link{multinomial}},
+\code{\link{binomialff}}.
+}
+\examples{
+# citation statistics: being cited is a 'win'; citing is a 'loss'
+journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
+m = matrix(c( NA, 33, 320, 284,
+ 730, NA, 813, 276,
+ 498, 68, NA, 325,
+ 221, 17, 142, NA), 4,4)
+dimnames(m) = list(winner = journal, loser = journal)
+fit = vglm(Brat(m) ~ 1, brat(refgp=1), trace=TRUE)
+fit = vglm(Brat(m) ~ 1, brat(refgp=1), trace=TRUE, cri="c")
+summary(fit)
+c(0, coef(fit)) # log-abilities (in order of "journal")
+c(1, Coef(fit)) # abilities (in order of "journal")
+fitted(fit) # probabilities of winning in awkward form
+(check = InverseBrat(fitted(fit))) # probabilities of winning
+check + t(check) # Should be 1's in the off-diagonals
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/bratt.Rd b/man/bratt.Rd
new file mode 100644
index 0000000..e3b51bd
--- /dev/null
+++ b/man/bratt.Rd
@@ -0,0 +1,137 @@
+\name{bratt}
+\alias{bratt}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Bradley Terry Model With Ties }
+\description{
+ Fits a Bradley Terry model with ties (intercept-only model) by maximum
+likelihood estimation.
+}
+\usage{
+bratt(refgp = "last", refvalue = 1, init.alpha = 1, i0 = 0.01)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{refgp}{ Integer whose value must be from the set
+ \{1,\ldots,\eqn{M}\},
+ where there are \eqn{M} competitors. The default value indicates
+ the last competitor is used---but don't input a character string, in general.
+ }
+ \item{refvalue}{ Numeric. A positive value for the reference group. }
+ \item{init.alpha}{ Initial values for the \eqn{\alpha}{alpha}s.
+ These are recycled to the appropriate length. }
+ \item{i0}{ Initial value for \eqn{\alpha_0}{alpha_0}.
+ If convergence fails, try another positive value. }
+}
+\details{
+ There are several models that extend the ordinary Bradley Terry model
+ to handle ties. This family function implements one of these models.
+ It involves \eqn{M} competitors who either
+ win or lose or tie against each other.
+ (If there are no draws/ties then use \code{\link{brat}}).
+The probability that Competitor \eqn{i} beats Competitor \eqn{j} is
+\eqn{\alpha_i / (\alpha_i+\alpha_j+\alpha_0)}{alpha_i / (alpha_i +
+alpha_j + alpha_0)},
+where all the \eqn{\alpha}{alpha}s are positive.
+ The probability that Competitor \eqn{i} ties with Competitor \eqn{j} is
+\eqn{\alpha_0 / (\alpha_i+\alpha_j+\alpha_0)}{alpha_0 / (alpha_i +
+alpha_j + alpha_0)}.
+ Loosely, the \eqn{\alpha}{alpha}s can be thought of as the
+competitors' `abilities', and \eqn{\alpha_0}{alpha_0} is an
+added parameter to model ties.
+For identifiability, one of the \eqn{\alpha_i}{alpha_i} is set to a
+known value \code{refvalue}, e.g., 1.
+By default, this function chooses the last competitor to
+have this reference value.
+The data can be represented in the form of a
+\eqn{M} by \eqn{M} matrix of counts,
+where winners are the rows and losers are the columns.
+However, this is not the way the data should be inputted (see below).
+
+Excluding the reference value/group, this function chooses
+\eqn{\log(\alpha_j)}{log(alpha_j)} as the first \eqn{M-1} linear predictors.
+The log link ensures that the \eqn{\alpha}{alpha}s are positive.
+The last linear predictor is \eqn{\log(\alpha_0)}{log(alpha_0)}.
+
+The Bradley Terry model can be fitted with covariates, e.g., a home
+advantage variable, but unfortunately, this lies outside the VGLM
+theoretical framework and therefore cannot be handled with this code.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}.
+}
+\references{
+
+ Torsney, B. (2004)
+ Fitting Bradley Terry models using a multiplicative algorithm.
+ In: Antoch, J. (ed.)
+ \emph{Proceedings in Computational Statistics COMPSTAT 2004},
+ Physica-Verlag: Heidelberg. Pages 513--526.
+
+}
+\author{ T. W. Yee }
+\note{
+The function \code{\link{Brat}} is useful for coercing a \eqn{M}
+by \eqn{M} matrix of counts into a one-row matrix suitable for
+\code{bratt}.
+Diagonal elements are skipped, and the usual S order of \code{c(a.matrix)}
+of elements is used. There should be no missing
+values apart from the diagonal elements of the square matrix.
+The matrix should have winners as the rows, and losers
+as the columns.
+In general, the response should be a matrix with \eqn{M(M-1)} columns.
+
+Also, a symmetric matrix of ties should be passed into
+\code{\link{Brat}}. The diagonal of this matrix should be all
+\code{NA}s.
+
+Only an intercept model is recommended with \code{bratt}.
+It doesn't make
+sense really to include covariates because of the limited
+VGLM framework.
+
+Notationally, note that the \pkg{VGAM} family function \code{\link{brat}}
+has \eqn{M+1} contestants, while \code{bratt} has \eqn{M} contestants.
+
+
+}
+
+\seealso{
+\code{\link{brat}},
+\code{\link{Brat}},
+\code{\link{binomialff}}.
+}
+\examples{
+# citation statistics: being cited is a 'win'; citing is a 'loss'
+journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
+m = matrix(c( NA, 33, 320, 284,
+ 730, NA, 813, 276,
+ 498, 68, NA, 325,
+ 221, 17, 142, NA), 4,4)
+dimnames(m) = list(winner = journal, loser = journal)
+
+# Add some ties. This is fictitional data.
+ties = 5 + 0*m
+ties[2,1] = ties[1,2] = 9
+
+# Now fit the model
+fit = vglm(Brat(m, ties) ~ 1, bratt(refgp=1), trace=TRUE)
+fit = vglm(Brat(m, ties) ~ 1, bratt(refgp=1), trace=TRUE, cri="c")
+
+summary(fit)
+c(0, coef(fit)) # log-abilities (in order of "journal"); last is log(alpha0)
+c(1, Coef(fit)) # abilities (in order of "journal"); last is alpha0
+
+fit at misc$alpha # alpha_1,...,alpha_M
+fit at misc$alpha0 # alpha_0
+
+fitted(fit) # probabilities of winning and tying, in awkward form
+predict(fit)
+(check = InverseBrat(fitted(fit))) # probabilities of winning
+qprob = attr(fitted(fit), "probtie") # probabilities of a tie
+qprobmat = InverseBrat(c(qprob), NCo=nrow(ties)) # probabilities of a tie
+check + t(check) + qprobmat # Should be 1's in the off-diagonals
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/calibrate-methods.Rd b/man/calibrate-methods.Rd
new file mode 100644
index 0000000..5146fb1
--- /dev/null
+++ b/man/calibrate-methods.Rd
@@ -0,0 +1,36 @@
+\name{calibrate-methods}
+\docType{methods}
+\alias{calibrate,rrvglm-method}
+\alias{calibrate,Coef.qrrvglm-method}
+\title{ Calibration for Constrained Regression Models }
+\description{
+ \code{calibrate} is a generic function applied to QRR-VGLMs and
+ RR-VGAMs etc.
+
+}
+%\usage{
+% \S4method{calibrate}{cao,Coef.cao}(object, ...)
+%}
+
+\section{Methods}{
+\describe{
+
+\item{object}{
+ The object from which the calibration is performed.
+
+}
+}
+}
+%\note{
+% See \code{\link{lvplot}} which is very much related to biplots.
+%
+%}
+
+\keyword{methods}
+\keyword{classes}
+%\keyword{ ~~ other possible keyword(s)}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/calibrate.Rd b/man/calibrate.Rd
new file mode 100644
index 0000000..895bbeb
--- /dev/null
+++ b/man/calibrate.Rd
@@ -0,0 +1,81 @@
+\name{calibrate}
+\alias{calibrate}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Model Calibrations }
+\description{
+ \code{calibrate} is a generic function used to produce calibrations
+ from various model fitting functions. The function invokes
+ particular `methods' which depend on the `class' of the first
+ argument.
+
+}
+\usage{
+calibrate(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which a calibration is desired. }
+ \item{\dots}{ Additional arguments affecting the calibration produced.
+ Usually the most important argument in \code{\dots} is
+ \code{newdata} which, for \code{calibrate}, contains new
+ \emph{response} data, \bold{Y}, say. }
+}
+\details{
+ Given a regression model with explanatory variables \bold{X} and
+ response \bold{Y},
+ calibration involves estimating \bold{X} from \bold{Y} using the
+ regression model.
+ It can be loosely thought of as the opposite of \code{\link{predict}}
+ (which takes an \bold{X} and returns a \bold{Y}.)
+
+}
+\value{
+ In general, given a new response \bold{Y}, the explanatory
+ variables \bold{X} are returned.
+ However, for constrained ordination models such as CQO and CAO models,
+ it is usually not possible to return \bold{X}, so the latent
+ variables are returned instead (they are
+ linear combinations of the \bold{X}).
+ See the specific \code{calibrate} methods functions to see
+ what they return.
+}
+%\references{
+%}
+\author{ T. W. Yee }
+\note{
+ This function was not called \code{predictx} because of the
+ inability of constrained ordination models to return \bold{X};
+ they can only return the latent variable values (site scores) instead.
+}
+
+
+\seealso{
+ \code{\link{predict}},
+ \code{\link{calibrate.qrrvglm}}.
+}
+
+\examples{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(123)
+p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ df1.nl = c(Zoraspin=2, 1.9),
+ Bestof = 3, Crow1positive = TRUE)
+
+siteNos = 1:2 # Calibrate these sites
+cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace=TRUE)
+
+\dontrun{
+# Graphically compare the actual site scores with their calibrated values
+persp(p1, main="Solid=actual, dashed=calibrated site scores",
+ label=TRUE, col="blue", las=1)
+abline(v=lv(p1)[siteNos], lty=1, col=1:length(siteNos)) # actual site scores
+abline(v=cp1, lty=2, col=1:length(siteNos)) # calibrated values
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
new file mode 100644
index 0000000..591007e
--- /dev/null
+++ b/man/calibrate.qrrvglm.Rd
@@ -0,0 +1,140 @@
+\name{calibrate.qrrvglm}
+\alias{calibrate.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Calibration for CQO, UQO and CAO models }
+\description{
+ Performs maximum likelihood calibration for constrained and
+ unconstrained quadratic and additive ordination models (CQO and CAO
+ models are better known as QRR-VGLMs and RR-VGAMs respectively).
+
+}
+\usage{
+calibrate.qrrvglm(object, newdata = NULL,
+ type=c("lv","predictors","response","vcov","all3or4"),
+ initial.vals = NULL, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ The fitted CQO/CAO model.
+ }
+ \item{newdata}{ A data frame with new response data
+ (usually new species data).
+ The default is to use the original data used to fit the model;
+ however, the calibration may take a long time to compute
+ because the computations are expensive.
+ }
+ \item{type}{ What type of result is to be returned.
+ The first are the calibrated latent variables or site scores.
+ This must be computed always.
+ The \code{"predictors"} are the linear/quadratic or additive
+ predictors evaluated at the calibrated latent variables or site
+ scores.
+ The \code{"response"} are the fitted means evaluated at the
+ calibrated latent variables or site scores.
+ The \code{"vcov"} are the estimated variance-covariance
+ matrices of the
+ calibrated latent variables or site scores.
+ The \code{"all3or4"} is for all of them, i.e., all \code{type}s.
+ For CAO models,
+ \code{"vcov"} is unavailable, so all 3 are returned.
+ For CQO models,
+ \code{"vcov"} is available, so all 4 are returned.
+ }
+ \item{initial.vals}{ Initial values for the search.
+ For rank-1 models, this should be a vector of length
+ \code{nrow(newdata)}, and for rank 2 models
+ this should be a two column matrix with the number of rows equalling
+ the number of rows in \code{newdata}.
+ The default is a grid defined by arguments in
+ \code{\link{calibrate.qrrvglm.control}}.
+ }
+ \item{\dots}{ Arguments that are fed into
+ \code{\link{calibrate.qrrvglm.control}}. }
+}
+\details{
+ Given a fitted regression CQO/CAO model,
+ maximum likelihood calibration is theoretically easy and elegant.
+ However, the method assumes that all species are
+ independent, which is not really true in practice.
+ More details and references are given in Yee (2005).
+
+ The function \code{\link[stats]{optim}} is used to search for
+ the maximum likelihood solution. Good initial values are
+ needed, and \code{\link{calibrate.qrrvglm.control}}
+ allows the user some control over the choice of these.
+}
+\value{
+ The argument \code{type} determines what is returned.
+ If \code{type="all3or4"} then all the \code{type} values are returned
+ in a list, with the following components.
+ Each component has length \code{nrow(newdata)}.
+
+ \item{lv}{Calibrated latent variables or site scores. }
+ \item{predictors }{linear/quadratic or additive predictors.
+ For example, for Poisson families, this will be on a log scale,
+ and for binomial families, this will be on a logit scale.}
+ \item{response}{Fitted values of the response, evaluated at the
+ calibrated latent variables or site scores.}
+ \item{vcov}{Estimated variance-covariance matrix of the
+ calibrated latent variables or site scores.
+ Actually, these are stored in an array whose last dimension
+ is \code{nrow(newdata)}.
+ }
+}
+\references{
+Yee, T. W. (2005)
+On constrained and unconstrained
+quadratic ordination.
+\emph{Manuscript in preparation}.
+
+ter Braak, C. J. F. 1995.
+Calibration. In:
+\emph{Data Analysis in Community and Landscape Ecology}
+by Jongman, R. H. G., ter Braak, C. J. F. and
+van Tongeren, O. F. R. (Eds.)
+Cambridge University Press,
+Cambridge.
+
+}
+\author{T. W. Yee}
+\note{
+ Despite the name of this function, UQO and CAO models are handled
+ as well.
+}
+\section{Warning }{
+ This function is computationally expensive.
+ Setting \code{trace=TRUE} to get a running log is a good idea.
+}
+
+\seealso{
+ \code{\link{calibrate.qrrvglm.control}},
+ \code{\link{calibrate}},
+ \code{\link{cqo}},
+ \code{\link{uqo}},
+ \code{\link{cao}}.
+}
+\examples{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardize the environmental variables
+set.seed(123)
+p1 = cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ IToler = TRUE, Crow1positive = TRUE)
+
+siteNos = 1:2 # Calibrate these sites
+cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace=TRUE)
+
+\dontrun{
+# Graphically compare the actual site scores with their calibrated values
+persp(p1, main="Site scores: solid=actual, dashed=calibrated",
+ label=TRUE, col="blue", las=1)
+abline(v=lv(p1)[siteNos], lty=1, col=1:length(siteNos)) # actual site scores
+abline(v=cp1, lty=2, col=1:length(siteNos)) # calibrated values
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
new file mode 100644
index 0000000..625fd5f
--- /dev/null
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -0,0 +1,106 @@
+\name{calibrate.qrrvglm.control}
+\alias{calibrate.qrrvglm.control}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Control function for CQO/UQO/CAO calibration }
+\description{
+ Algorithmic constants and parameters for running
+ \code{\link{calibrate.qrrvglm}} are set using this function.
+
+}
+\usage{
+calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
+ gridSize = if (Rank == 1) 9 else 5,
+ varlvI = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ The fitted CQO/UQO/CAO model. The user should ignore this argument.
+
+ }
+ \item{trace}{
+ Logical indicating if output should be produced for each iteration. It
+ is a good idea to set this argument to be \code{TRUE} since the
+ computations are expensive.
+
+ }
+\item{Method.optim}{
+ Character. Fed into the \code{method} argument of
+ \code{\link[stats]{optim}}.
+
+ }
+\item{gridSize}{
+ Numeric, recycled to length \code{Rank}. Controls the resolution
+ of the grid used for initial values. For each latent variable,
+ an equally spaced grid of length \code{gridSize} is cast from the
+ smallest site score to the largest site score. Then the likelihood
+ function is evaluated on the grid, and the best fit is chosen as the
+ initial value. Thus increasing the value of \code{gridSize} increases
+ the chance of obtaining the global solution, however, the computing
+ time increases proportionately.
+
+ }
+ \item{varlvI}{
+ Logical. For CQO objects only, this argument is fed into
+ \code{\link{Coef.qrrvglm}}.
+
+ }
+ \item{\dots}{
+ Avoids an error message for extraneous arguments.
+
+ }
+}
+\details{
+ Most CQO/CAO users will only need to make use of \code{trace}
+ and \code{gridSize}. These arguments should be used inside their
+ call to \code{\link{calibrate.qrrvglm}}, not this function
+ directly.
+ }
+\value{
+ A list which with the following components.
+ \item{trace }{Numeric (even though the input can be logical). }
+ \item{gridSize }{Positive integer. }
+ \item{varlvI }{Logical.}
+}
+\references{
+Yee, T. W. (2005)
+On constrained and unconstrained quadratic ordination.
+\emph{Manuscript in preparation}.
+
+}
+\author{T. W. Yee}
+\note{
+ Despite the name of this function, UQO and CAO models are handled
+ as well.
+
+}
+
+\seealso{
+ \code{\link{calibrate.qrrvglm}},
+ \code{\link{Coef.qrrvglm}}.
+}
+\examples{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Needed when ITol = TRUE
+set.seed(123)
+p1 = cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
+ Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, ITol = TRUE)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+
+siteNos = 1:2 # Calibrate these sites
+cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace=TRUE)
+
+\dontrun{
+# Graphically compare the actual site scores with their calibrated values
+persp(p1, main="Site scores: solid=actual, dashed=calibrated",
+ label=TRUE, col="blue", las=1)
+abline(v=lv(p1)[siteNos], lty=1, col=1:length(siteNos)) # actual site scores
+abline(v=cp1, lty=2, col=1:length(siteNos)) # calibrated values
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cao.Rd b/man/cao.Rd
new file mode 100644
index 0000000..8503e83
--- /dev/null
+++ b/man/cao.Rd
@@ -0,0 +1,311 @@
+\name{cao}
+\alias{cao}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fitting Constrained Additive Ordination (CAO) }
+\description{
+ A constrained additive ordination (CAO) model is fitted using the
+ \emph{reduced-rank vector generalized additive model} (RR-VGAM)
+ framework.
+
+}
+\usage{
+cao(formula, family, data = list(),
+ weights = NULL, subset = NULL, na.action = na.fail,
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ control = cao.control(...), offset = NULL,
+ method = "cao.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL,
+ extra = NULL, qr.arg = FALSE, smart = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ The arguments of \code{cao} are a mixture of those from
+ \code{\link{vgam}} and \code{\link{cqo}}, but with some extras
+ in \code{\link{cao.control}}. Currently, not all of the following
+ arguments work properly.
+
+ \item{formula}{
+ a symbolic description of the model to be fit. The RHS of the
+ formula is used to construct the latent variables, upon which the
+ smooths are applied. All the variables in the formula are used
+ for the construction of latent variables except for those specified
+ by the argument \code{Norrr}, which is itself a formula. The LHS
+ of the formula contains the response variables, which should be a
+ matrix with each column being a response (species).
+
+ }
+ \item{family}{
+ a function of class \code{"vglmff"} describing what statistical
+ model is to be fitted.
+ See \code{\link{cqo}} for a list of those presently implemented,
+
+ }
+
+ \item{data}{
+ an optional data frame containing the variables in the model.
+ By default the variables are taken from \code{environment(formula)},
+ typically the environment from which \code{cao} is called.
+
+ }
+ \item{weights}{
+ an optional vector or matrix of (prior) weights to be used in the
+ fitting process. For \code{cao}, this argument currently should
+ not be used.
+
+ }
+ \item{subset}{
+ an optional logical vector specifying a subset of observations to
+ be used in the fitting process.
+
+ }
+ \item{na.action}{
+ a function which indicates what should happen when the data contain
+ \code{NA}s. The default is set by the \code{na.action} setting of
+ \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+
+ }
+ \item{etastart}{
+ starting values for the linear predictors. It is a \eqn{M}-column
+ matrix. If \eqn{M=1} then it may be a vector. For \code{cao},
+ this argument currently should not be used.
+
+ }
+ \item{mustart}{
+ starting values for the fitted values. It can be a vector or a
+ matrix. Some family functions do not make use of this argument.
+ For \code{cao}, this argument currently should not be used.
+
+ }
+ \item{coefstart}{
+ starting values for the coefficient vector. For \code{cao}, this
+ argument currently should not be used.
+
+ }
+ \item{control}{
+ a list of parameters for controlling the fitting process.
+ See \code{\link{cao.control}} for details.
+
+ }
+ \item{offset}{
+ a vector or \eqn{M}-column matrix of offset values. These are
+ \emph{a priori} known and are added to the linear predictors during
+ fitting. For \code{cao}, this argument currently should not be used.
+
+ }
+ \item{method}{
+ the method to be used in fitting the model. The default
+ (and presently only) method \code{cao.fit} uses iteratively
+ reweighted least squares (IRLS) within FORTRAN code called from
+ \code{\link[stats]{optim}}.
+
+ }
+ \item{model}{
+ a logical value indicating whether the \emph{model frame} should be
+ assigned in the \code{model} slot.
+
+ }
+ \item{x.arg, y.arg}{
+ logical values indicating whether the model matrix and response
+ vector/matrix used in the fitting process should be assigned in the
+ \code{x} and \code{y} slots. Note the model matrix is the linear
+ model (LM) matrix.
+
+ }
+ \item{contrasts}{
+ an optional list. See the \code{contrasts.arg} of
+ \code{\link{model.matrix.default}}.
+
+ }
+ \item{constraints}{
+ an optional list of constraint matrices. For \code{cao}, this
+ argument currently should not be used. The components of the list
+ must be named with the term it corresponds to (and it must match
+ in character format). Each constraint matrix must have \eqn{M}
+ rows, and be of full-column rank. By default, constraint matrices
+ are the \eqn{M} by \eqn{M} identity matrix unless arguments in the
+ family function itself override these values. If \code{constraints}
+ is used it must contain \emph{all} the terms; an incomplete list is
+ not accepted.
+
+ }
+ \item{extra}{
+ an optional list with any extra information that might be needed by
+ the family function. For \code{cao}, this argument currently should
+ not be used.
+
+ }
+ \item{qr.arg}{
+ For \code{cao}, this argument currently should not be used.
+
+ }
+ \item{smart}{
+ logical value indicating whether smart prediction
+ (\code{\link{smartpred}}) will be used.
+
+ }
+ \item{\dots}{
+ further arguments passed into \code{\link{cao.control}}.
+
+ }
+}
+\details{
+ CAO can be loosely be thought of as the result of fitting generalized
+ additive models (GAMs) to several responses (e.g., species) against
+ a very small number of latent variables. Each latent variable is a
+ linear combination of the explanatory variables; the coefficients
+ \bold{C} (called \eqn{C} below) are called \emph{constrained
+ coefficients} or \emph{canonical coefficients}, and are interpreted as
+ weights or loadings. The \bold{C} are estimated by maximum likelihood
+ estimation. It is often a good idea to apply \code{\link[base]{scale}}
+ to each explanatory variable first.
+
+ For each response (e.g., species), each latent variable is smoothed
+ by a cubic smoothing spline, thus CAO is data-driven. If each smooth
+ were a quadratic then CAO would simplify to \emph{constrained quadratic
+ ordination} (CQO; formerly called \emph{canonical Gaussian ordination}
+ or CGO).
+ If each smooth were linear then CAO would simplify to \emph{constrained
+ linear ordination} (CLO). CLO can theoretically be fitted with
+ \code{cao} by specifying \code{df1.nl=0}, however it is more efficient
+ to use \code{\link{rrvglm}}.
+
+ Currently, only \code{Rank=1} is implemented, and only
+ \code{Norrr = ~1} models are handled.
+
+% Poisson and binary responses are implemented (viz.,
+% \code{\link{poissonff}}, \code{\link{binomialff}}), and
+% dispersion parameters for these must be assumed known. Hence using
+% \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}} will
+% currently fail. Also, currently, only \code{Norrr = ~ 1} models are
+% handled.
+
+ With binomial data, the default formula is
+ \deqn{logit(P[Y_s=1]) = \eta_s = f_s(\nu), \ \ \ s=1,2,\ldots,S}{%
+ logit(P[Y_s=1]) = eta_s = f_s(\nu), \ \ \ s=1,2,\ldots,S}
+ where \eqn{x_2}{x_2} is a vector of environmental variables, and
+ \eqn{\nu=C^T x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables.
+ The \eqn{\eta_s}{eta_s} is an additive predictor for species \eqn{s},
+ and it models the probabilities of presence as an additive model on
+ the logit scale. The matrix \eqn{C} is estimated from the data, as
+ well as the smooth functions \eqn{f_s}. The argument \code{Norrr = ~
+ 1} specifies that the vector \eqn{x_1}{x_1}, defined for RR-VGLMs
+ and QRR-VGLMs, is simply a 1 for an intercept.
+ Here, the intercept in the model is absorbed into the functions.
+ A \code{\link{cloglog}} link may be preferable over a
+ \code{\link{logit}} link.
+
+ With Poisson count data, the formula is
+ \deqn{\log(E[Y_s]) = \eta_s = f_s(\nu)}{%
+ log(E[Y_s]) = eta_s = f_s(\nu)}
+ which models the mean response as an additive models on the log scale.
+
+ The fitted latent variables (site scores) are scaled to have
+ unit variance. The concept of a tolerance is undefined for
+ CAO models, but the optima and maxima are defined. The generic
+ functions \code{\link{Max}} and \code{\link{Opt}} should work for
+ CAO objects, but note that if the maximum occurs at the boundary then
+ \code{\link{Max}} will return a \code{NA}. Inference for CAO models
+ is currently undeveloped.
+
+}
+\value{
+ An object of class \code{"cao"}
+ (this may change to \code{"rrvgam"} in the future).
+ Several generic functions can be applied to the object, e.g.,
+ \code{\link{Coef}}, \code{\link{ccoef}}, \code{\link{lvplot}},
+ \code{\link{summary}}.
+
+}
+
+\references{
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{T. W. Yee}
+\note{
+ CAO models are computationally expensive, therefore setting \code{trace
+ = TRUE} is a good idea, as well as running it on a simple random sample
+ of the data set instead.
+
+ Sometimes the IRLS algorithm does not converge within the FORTRAN
+ code. This results in warnings being issued. In particular, if an
+ error code of 3 is issued, then this indicates the IRLS algorithm has
+ not converged. One possible remedy is to increase or decrease
+ the nonlinear degrees of freedom so that the curves become more or
+ less flexible, respectively.
+
+}
+\section{Warning }{
+ CAO models present a difficult optimization problem, partly because the
+ log-likelihood function contains many local solutions. To obtain the
+ (global) solution the user is advised to try many initial values. This
+ can be done by setting \code{Bestof} some appropriate value (see
+ \code{\link{cao.control}}). Trying many initial values becomes
+ progressively more important as the nonlinear degrees of freedom of
+ the smooths increase. Use \code{\link[base:Random]{set.seed}} to make your
+ results reproducible.
+
+% The code is a little fragile at this stage, so the function might
+% hang/lock up in the microsoft Windows version.
+
+}
+
+\seealso{
+ \code{\link{cao.control}},
+ \code{Coef.cao},
+ \code{\link{cqo}},
+ \code{\link{lv}},
+ \code{\link{Opt}},
+ \code{\link{Max}},
+ \code{\link{lv}},
+ \code{persp.cao},
+ \code{\link{poissonff}},
+ \code{\link{binomialff}},
+ \code{\link{negbinomial}},
+ \code{\link{gamma2}},
+ \code{\link{gaussianff}},
+ \code{\link[base:Random]{set.seed}},
+ \code{gam}.
+}
+
+\examples{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(149)
+ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ df1.nl = c(Pardpull=2.7, 2.5),
+ Bestof = 7, Crow1positive = FALSE)
+sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
+
+Coef(ap1)
+ccoef(ap1)
+
+\dontrun{
+par(mfrow=c(2,2))
+plot(ap1) # All the curves are unimodal; some quite symmetric
+
+par(mfrow=c(1,1), las=1)
+index = 1:ncol(ap1 at y)
+lvplot(ap1, lcol=index, pcol=index, y=TRUE)
+
+trplot(ap1, label=TRUE, col=index)
+abline(a=0, b=1, lty=2)
+
+trplot(ap1, label=TRUE, col="blue", log="xy", whichSp=c(1,3))
+abline(a=0, b=1, lty=2)
+
+persp(ap1, col=index, lwd=2, label=TRUE)
+abline(v=Opt(ap1), lty=2, col=index)
+abline(h=Max(ap1), lty=2, col=index)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
new file mode 100644
index 0000000..d88fe04
--- /dev/null
+++ b/man/cao.control.Rd
@@ -0,0 +1,348 @@
+\name{cao.control}
+\alias{cao.control}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Control Function for RR-VGAMs (CAO) }
+\description{
+ Algorithmic constants and parameters for a constrained additive
+ ordination (CAO), by fitting a \emph{reduced-rank vector generalized
+ additive model} (RR-VGAM), are set using this function.
+ This is the control function for \code{\link{cao}}.
+
+}
+\usage{
+cao.control(Rank=1, all.knots = FALSE,
+ criterion="deviance",
+ Cinit=NULL,
+ Crow1positive=TRUE,
+ epsilon = 1.0e-05,
+ Etamat.colmax = 10,
+ GradientFunction=FALSE,
+ iKvector = 0.1,
+ iShape = 0.1,
+ Norrr = ~ 1,
+ SmallNo = 5.0e-13,
+ Use.Init.Poisson.QO=TRUE,
+ Bestof = if(length(Cinit)) 1 else 10,
+ maxitl = 40,
+ method.init = 1,
+ bf.epsilon = 1.0e-7,
+ bf.maxit = 40,
+ Maxit.optim = 250,
+ optim.maxit = 20,
+ SD.sitescores = 1.0,
+ SD.Cinit = 0.02,
+ trace = TRUE,
+ df1.nl = 2.5, df2.nl = 2.5,
+ spar1 = 0, spar2 = 0, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ Many of these arguments are identical to \code{\link{qrrvglm.control}}.
+ Here, \eqn{R} is the \code{Rank}, \eqn{M} is the number
+ of additive predictors, and \eqn{S} is the number of responses
+ (species).
+ Thus \eqn{M=S} for binomial and Poisson responses, and
+ \eqn{M=2S} for the negative binomial and 2-parameter gamma distributions.
+
+ \item{Rank}{
+ The numerical rank \eqn{R} of the model, i.e., the number of latent
+ variables. Currently only \code{Rank=1} is implemented.
+
+ }
+ \item{all.knots}{
+ Logical indicating if all distinct points of the smoothing variables
+ are to be used as knots. Assigning the value \code{FALSE} means
+ fewer knots are chosen when the number of distinct points is large,
+ meaning less computational expense. See \code{\link{vgam.control}}
+ for details.
+
+ }
+ \item{criterion}{
+ Convergence criterion. Currently, only one is supported: the deviance
+ is minimized.
+
+ }
+ \item{Cinit}{
+ Optional initial \bold{C} matrix which may speed up convergence.
+
+ }
+ \item{Crow1positive}{
+ Logical vector of length \code{Rank} (recycled if necessary): are
+ the elements of the first row of \bold{C} positive? For example,
+ if \code{Rank} is 4, then specifying \code{Crow1positive=c(FALSE,
+ TRUE)} will force \bold{C[1,1]} and \bold{C[1,3]} to be negative,
+ and \bold{C[1,2]} and \bold{C[1,4]} to be positive.
+
+ }
+ \item{epsilon}{
+ Positive numeric. Used to test for convergence for GLMs fitted in
+ FORTRAN. Larger values mean a loosening of the convergence criterion.
+
+% Used only if \code{FastAlgorithm} is \code{TRUE}.
+
+ }
+ \item{Etamat.colmax}{
+ Positive integer, no smaller than \code{Rank}. Controls the amount
+ of memory used by \code{.Init.Poisson.QO()}. It is the maximum
+ number of columns allowed for the pseudo-response and its weights.
+ In general, the larger the value, the better the initial value.
+ Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
+ }
+
+% \item{FastAlgorithm}{
+% Logical.
+% Whether compiled code is used.
+% For \code{\link{cao}} this must be \code{TRUE}.
+%
+% }
+
+\item{GradientFunction}{
+ Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr}
+ is used or not, i.e., to compute gradient values. Used only if
+ \code{FastAlgorithm} is \code{TRUE}. Currently, this argument must
+ be set to \code{FALSE}.
+
+}
+ \item{iKvector, iShape}{
+ See \code{\link{qrrvglm.control}}.
+
+}
+
+% \item{Hstep}{ Positive value. Used as the step size in the
+% finite difference approximation to the derivatives by
+% \code{\link[stats]{optim}}.
+% Used only if \code{GradientFunction} is \code{TRUE}.
+%
+% }
+
+% \item{Kinit}{
+% Initial values for the index parameters \code{k} in the negative
+% binomial distribution (one per species). In general, a smaller number
+% is preferred over a larger number. The vector is recycled to the
+% number of responses (species).
+
+%}
+
+ \item{Norrr}{
+ Formula giving terms that are \emph{not} to be included in the
+ reduced-rank regression (or formation of the latent variables).
+ The default is to omit the intercept term from the latent variables.
+ Currently, only \code{Norrr = ~ 1} is implemented.
+
+ }
+% \item{Parscale}{
+% Numerical and positive-valued vector of length \bold{C}
+% (recycled if necessary). Passed into \code{optim(...,
+% control=list(parscale=Parscale))}; the elements of \bold{C} become
+% \bold{C} / \code{Parscale}. Setting \code{ITolerances=TRUE} results
+% in line searches that are very large, therefore \bold{C} has to be
+% scaled accordingly to avoid large step sizes.
+
+% }
+
+ \item{SmallNo}{
+ Positive numeric between \code{.Machine$double.eps} and \code{0.0001}.
+ Used to avoid under- or over-flow in the IRLS algorithm.
+
+% Used only if \code{FastAlgorithm} is \code{TRUE}.
+
+ }
+ \item{Use.Init.Poisson.QO }{
+ Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO} is
+ used to obtain initial values for the canonical coefficients \bold{C}.
+ If \code{FALSE} then random numbers are used instead.
+
+ }
+
+ \item{Bestof}{
+ Integer. The best of \code{Bestof} models fitted is returned. This
+ argument helps guard against local solutions by (hopefully) finding
+ the global solution from many fits. The argument works only when
+ the function generates its own initial value for \bold{C}, i.e.,
+ when \bold{C} are \emph{not} passed in as initial values.
+ The default is only a convenient minimal number and users are urged
+ to increase this value.
+
+ }
+ \item{maxitl}{
+ Positive integer. Maximum number of
+ Newton-Raphson/Fisher-scoring/local-scoring iterations allowed.
+
+ }
+ \item{method.init}{
+ See \code{\link{qrrvglm.control}}.
+
+ }
+ \item{bf.epsilon}{
+ Positive numeric. Tolerance used by the modified vector backfitting
+ algorithm for testing convergence.
+
+ }
+ \item{bf.maxit}{
+ Positive integer.
+ Number of backfitting iterations allowed in the compiled code.
+ }
+ \item{Maxit.optim}{
+ Positive integer.
+ Number of iterations given to the function \code{\link[stats]{optim}}
+ at each of the \code{optim.maxit} iterations.
+
+ }
+ \item{optim.maxit}{
+ Positive integer.
+ Number of times \code{\link[stats]{optim}} is invoked.
+
+% At iteration \code{i}, the \code{i}th value of \code{Maxit.optim}
+% is fed into \code{\link[stats]{optim}}.
+
+ }
+% \item{se.fit}{
+% Logical indicating whether approximate
+% pointwise standard errors are to be saved on the object.
+% Currently this argument must have the value \code{FALSE}.
+
+% }
+ \item{SD.sitescores}{
+ Numeric. Standard deviation of the
+ initial values of the site scores, which are generated from
+ a normal distribution.
+ Used when \code{Use.Init.Poisson.QO} is \code{FALSE}.
+
+ }
+ \item{SD.Cinit}{
+ Standard deviation of the initial values for the elements
+ of \bold{C}.
+ These are normally distributed with mean zero.
+ This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
+
+ }
+ \item{trace}{
+ Logical indicating if output should be produced for each
+ iteration. Having the value \code{TRUE} is a good idea for large
+ data sets.
+
+ }
+ \item{df1.nl, df2.nl}{
+ Numeric and non-negative, recycled to length \emph{S}.
+ Nonlinear degrees
+ of freedom for smooths of the first and second latent variables.
+ A value of 0 means the smooth is linear. Roughly, a value between
+ 1.0 and 2.0 often has the approximate flexibility of a quadratic.
+ The user should not assign too large a value to this argument, e.g.,
+ the value 4.0 is probably too high. The argument \code{df1.nl} is
+ ignored if \code{spar1} is assigned a positive value or values. Ditto
+ for \code{df2.nl}.
+
+ }
+ \item{spar1, spar2}{
+ Numeric and non-negative, recycled to length \emph{S}.
+ Smoothing parameters of the
+ smooths of the first and second latent variables. The larger the value, the
+ more smooth (less wiggly) the fitted curves. These arguments are an
+ alternative to specifying \code{df1.nl} and \code{df2.nl}. A value
+ 0 (the default) for \code{spar1} means that \code{df1.nl} is used.
+ Ditto for \code{spar2}.
+ The values are on a scaled version of the latent variables.
+ See Green and Silverman (1994) for more information.
+
+ }
+ \item{\dots}{ Ignored at present. }
+}
+\details{
+ Allowing the smooths too much flexibility means the CAO optimization
+ problem becomes more difficult to solve. This is because the number
+ of local solutions increases as the nonlinearity of the smooths
+ increases. In situations of high nonlinearity, many initial values
+ should be used, so that \code{Bestof} should be assigned a larger
+ value. In general, there should be a reasonable value of \code{df1.nl}
+ somewhere between 0 and about 3 for most data sets.
+
+}
+\value{
+ A list with the components corresponding to its arguments, after some
+ basic error checking.
+
+}
+\references{
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+Green, P. J. and Silverman, B. W. (1994)
+\emph{Nonparametric Regression and Generalized Linear Models: A
+ Roughness Penalty Approach},
+London: Chapman & Hall.
+
+}
+\author{T. W. Yee}
+\note{
+ The argument \code{df1.nl} can be inputted in the format \code{c(spp1=2,
+ spp2=3, 2.5)}, say, meaning the default value is 2.5, but two species
+ have alternative values.
+
+ If \code{spar1=0} and \code{df1.nl=0} then this represents fitting
+ linear functions (CLO). Currently, this is handled in the awkward
+ manner of setting \code{df1.nl} to be a small positive value, so that
+ the smooth is almost linear but not quite.
+ A proper fix to this special case should done in the short future.
+
+}
+\seealso{
+ \code{\link{cao}}.
+}
+
+\examples{\dontrun{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(123)
+ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider,
+ df1.nl = c(Zoraspin=2.3, 2.1),
+ Bestof = 10, Crow1positive = FALSE)
+sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
+
+Coef(ap1)
+
+par(mfrow=c(2,3)) # All or most of the curves are unimodal; some are
+plot(ap1, lcol="blue") # quite symmetric. Hence a CQO model should be ok
+
+par(mfrow=c(1,1), las=1)
+index = 1:ncol(ap1 at y) # lvplot is jagged because only 28 sites
+lvplot(ap1, lcol=index, pcol=index, y=TRUE)
+
+trplot(ap1, label=TRUE, col=index)
+abline(a=0, b=1, lty=2)
+
+persp(ap1, label=TRUE, col=1:4)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+
+
+%cao.control(Rank = 1, all.knots = FALSE,
+% criterion = "deviance", Cinit = NULL,
+% Crow1positive = TRUE, epsilon = 1e-05,
+% Etamat.colmax = 10,
+%% FastAlgorithm = TRUE,
+%% is.loaded(symbol.For("cqo2f")),
+%% GradientFunction = FALSE,
+% iKvector = 0.1,
+% iShape = 0.1,
+% Norrr = ~1,
+%% Parscale = 1,
+% SmallNo = 5e-13,
+% Use.Init.Poisson.QO = TRUE,
+% Bestof = if(length(Cinit)) 1 else 10, maxitl = 40,
+% bf.epsilon = 1.0e-7, bf.maxit = 40,
+% Maxit.optim = 250, optim.maxit = 20,
+%% se.fit = FALSE,
+% SD.sitescores = 1,
+% SD.Cinit = 0.02, trace = TRUE,
+%% df1.nl = 2.5, spar1 = 0, ...)
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
new file mode 100644
index 0000000..9a31d6f
--- /dev/null
+++ b/man/cauchit.Rd
@@ -0,0 +1,161 @@
+\name{cauchit}
+\alias{cauchit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Cauchit Link Function }
+\description{
+ Computes the cauchit (tangent) link transformation, including its
+ inverse and the first two derivatives.
+
+}
+\usage{
+cauchit(theta, earg = list(bvalue= .Machine$double.eps),
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+ }
+ \item{earg}{
+ List. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by 1 minus the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a \code{\link{vglmff-class}}
+ object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ This link function is an alternative link function for parameters that
+ lie in the unit interval.
+ This type of link bears the same relation to the Cauchy distribution
+ as the probit link bears to the Gaussian. One characteristic of this
+ link function is that the tail is heavier relative to the other links
+ (see examples below).
+
+ Numerical values of \code{theta} close to 0 or 1 or out of range result
+ in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The arguments
+ \code{short} and \code{tag} are used only if \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the tangent of \code{theta}, i.e.,
+ \code{tan(pi * (theta-0.5))} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{0.5 + atan(theta)/pi}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of
+ \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE}
+ then it returns the reciprocal.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to 1 or 0.
+ One way of overcoming this is to use \code{earg}.
+
+ As mentioned above,
+ in terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the
+ Cauchy distribution (see \code{\link{cauchy1}}).
+
+}
+
+\seealso{
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{loge}},
+ \code{\link{cauchy1}}.
+ }
+\examples{
+p = seq(0.01, 0.99, by=0.01)
+cauchit(p)
+max(abs(cauchit(cauchit(p), inverse=TRUE) - p)) # Should be 0
+
+p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
+cauchit(p) # Has no NAs
+
+\dontrun{
+par(mfrow=c(2,2))
+y = seq(-4, 4, length=100)
+
+for(d in 0:1) {
+ matplot(p, cbind(logit(p, deriv=d), probit(p, deriv=d)),
+ type="n", col="purple", ylab="transformation",
+ lwd=2, las=1, main=if(d==0) "Some probability link functions"
+ else "First derivative")
+ lines(p, logit(p, deriv=d), col="limegreen", lwd=2)
+ lines(p, probit(p, deriv=d), col="purple", lwd=2)
+ lines(p, cloglog(p, deriv=d), col="chocolate", lwd=2)
+ lines(p, cauchit(p, deriv=d), col="tan", lwd=2)
+ if(d==0) {
+ abline(v=0.5, h=0, lty="dashed")
+ legend(0, 4.5, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+ } else
+ abline(v=0.5, lty="dashed")
+}
+
+for(d in 0) {
+ matplot(y, cbind(logit(y, deriv=d, inverse=TRUE),
+ probit(y, deriv=d, inverse=TRUE)),
+ type="n", col="purple", xlab="transformation", ylab="p",
+ main=if(d==0) "Some inverse probability link functions"
+ else "First derivative", lwd=2, las=1)
+ lines(y, logit(y, deriv=d, inverse=TRUE), col="limegreen", lwd=2)
+ lines(y, probit(y, deriv=d, inverse=TRUE), col="purple", lwd=2)
+ lines(y, cloglog(y, deriv=d, inverse=TRUE), col="chocolate", lwd=2)
+ lines(y, cauchit(y, deriv=d, inverse=TRUE), col="tan", lwd=2)
+ if(d==0) {
+ abline(h=0.5, v=0, lty="dashed")
+ legend(-4, 1, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+ }
+}
+}
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
+%plot(y, logit(y, inverse=TRUE), type="l", col="limegreen",
+% xlab="transformation", ylab="p",
+% lwd=2, las=1, main="Some inverse probability link functions")
+%lines(y, probit(y, inverse=TRUE), col="purple", lwd=2)
+%lines(y, cloglog(y, inverse=TRUE), col="chocolate", lwd=2)
+%abline(h=0.5, v=0, lty="dashed")
+
+
+
+
diff --git a/man/cauchy1.Rd b/man/cauchy1.Rd
new file mode 100644
index 0000000..230b5a0
--- /dev/null
+++ b/man/cauchy1.Rd
@@ -0,0 +1,92 @@
+\name{cauchy1}
+\alias{cauchy1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Cauchy Distribution Family Function }
+\description{
+ Estimates the location parameter of the Cauchy distribution by maximum
+ likelihood estimation.
+
+}
+\usage{
+cauchy1(scale.arg=1, llocation="identity",
+ ilocation=NULL, method.init=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{scale.arg}{
+ Known (positive) scale parameter, called \eqn{s}{s} below.
+
+ }
+ \item{llocation}{
+ Parameter link function for the \eqn{a}{a} location parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ilocation}{
+ Optional initial value for \eqn{a}{a}.
+ By default, an initial value is chosen internally.
+
+ }
+ \item{method.init}{
+ Integer, either 1 or 2 or 3. Initial method, three algorithms are
+ implemented. Choose the another value if convergence fails, or use
+ \code{ilocation}.
+
+ }
+}
+\details{
+ The Cauchy distribution has a density function
+ \deqn{f(y;a,s) = \left\{ \pi s [1 + ((y-a)/s)^2] \right\}^{-1} }{%
+ f(y;a,s) = 1 / [pi * s * [1 + ((y-a)/s)^2]] }
+ where \eqn{y} and \eqn{a} are real and finite,
+ and \eqn{s>0}{s>0}.
+ The distribution is symmetric about \eqn{a} and has a heavy tail.
+ Its median and mode are \eqn{a}, but the mean does not exist, therefore
+ the fitted values are all \code{NA}s.
+ Fisher scoring is used.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+\note{
+ Good initial values are needed. It pays to select a wide range
+ of initial values via the \code{ilocation} and \code{method.init}
+ arguments.
+
+}
+
+\seealso{
+ \code{\link[stats:Cauchy]{Cauchy}},
+ \code{\link{cauchit}}.
+}
+\examples{
+set.seed(123)
+n = 500
+x = runif(n)
+
+y = rcauchy(n, loc=1+5*x, scale=.4)
+fit = vglm(y ~ x, cauchy1(scale=0.4), trace =TRUE, crit="c")
+coef(fit, matrix=TRUE)
+
+y = rcauchy(n, loc=exp(1+0.5*x), scale=.4)
+\dontrun{hist(y)}
+fit = vglm(y ~ x, cauchy1(scale=0.4, lloc="loge"), trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+fitted(fit)[1:4]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/ccoef-methods.Rd b/man/ccoef-methods.Rd
new file mode 100644
index 0000000..537ff8f
--- /dev/null
+++ b/man/ccoef-methods.Rd
@@ -0,0 +1,44 @@
+\name{ccoef-methods}
+\docType{methods}
+%\alias{ccoef,ANY-method}
+\alias{ccoef-method}
+\alias{ccoef,cao-method}
+\alias{ccoef,Coef.cao-method}
+\alias{ccoef,rrvglm-method}
+\alias{ccoef,qrrvglm-method}
+\alias{ccoef,Coef.rrvglm-method}
+\alias{ccoef,Coef.qrrvglm-method}
+%
+% This does not work:
+%\alias{ccoef,cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm-method}
+%
+\title{ Constrained (Canonical) Coefficients }
+\description{
+ \code{ccoef} is a generic function used to return the constrained
+ (canonical) coefficients of a constrained ordination model.
+ The function invokes particular methods which depend on the class of
+ the first argument.
+
+}
+%\usage{
+% \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm}(object, ...)
+%}
+
+\section{Methods}{
+\describe{
+
+\item{object}{
+ The object from which the constrained coefficients are
+ extracted.
+
+}
+}
+}
+\keyword{methods}
+\keyword{classes}
+%\keyword{ ~~ other possible keyword(s)}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/ccoef.Rd b/man/ccoef.Rd
new file mode 100644
index 0000000..4a5df12
--- /dev/null
+++ b/man/ccoef.Rd
@@ -0,0 +1,92 @@
+\name{ccoef}
+\alias{ccoef}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Extract Model Constrained/Canonical Coefficients }
+\description{
+ \code{ccoef} is a generic function which extracts the constrained
+ (canonical) coefficients from objects returned by certain modelling
+ functions.
+
+}
+\usage{
+ccoef(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the extraction of canonical
+ coefficients is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model.
+ }
+}
+\details{
+ For constrained quadratic and ordination models, \emph{canonical
+ coefficients} are the elements of the \bold{C} matrix used to form
+ the latent variables. They are highly interpretable in ecology,
+ and are looked at as weights or loadings.
+
+ They are also applicable for reduced-rank VGLMs.
+
+}
+\value{
+ The value returned depends specifically on the methods function invoked.
+
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+\section{Warning }{
+ For QO models, there is a direct inverse relationship between the
+ scaling of the latent variables (site scores) and the tolerances.
+ One normalization is for the latent variables to have unit variance.
+ Another normalization is for all the species' tolerances to be
+ unit (provided \code{EqualTolerances} is \code{TRUE}). These two
+ normalizations cannot simultaneously hold in general. For rank
+ \eqn{R} models with \eqn{R>1} it becomes more complicated because
+ the latent variables are also uncorrelated. An important argument when
+ fitting quadratic ordination models is whether \code{EqualTolerances}
+ is \code{TRUE} or \code{FALSE}. See Yee (2004) for details.
+
+}
+
+\seealso{
+ \code{\link{ccoef-method}},
+ \code{ccoef.qrrvglm},
+ \code{ccoef.cao},
+ \code{\link[stats]{coef}}.
+
+}
+\examples{
+data(hspider)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+ccoef(p1)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
new file mode 100644
index 0000000..9c21276
--- /dev/null
+++ b/man/cdf.lmscreg.Rd
@@ -0,0 +1,76 @@
+\name{cdf.lmscreg}
+\alias{cdf.lmscreg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Cumulative Distribution Function for LMS Quantile Regression }
+\description{
+ Computes the cumulative distribution function (CDF) for observations,
+ based on a LMS quantile regression.
+}
+\usage{
+cdf.lmscreg(object, newdata = NULL, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A \pkg{VGAM} quantile regression model, i.e.,
+ an object produced by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}} with a family function beginning with
+ \code{"lms."}.
+ }
+ \item{newdata}{ Data frame where the predictions are
+ to be made. If missing, the original data is used.
+ }
+ \item{\dots}{ Parameters which are passed into functions such as
+ \code{cdf.lms.yjn}.
+ }
+}
+\details{
+ The CDFs returned here are values lying in [0,1] giving the relative
+ probabilities associated with the quantiles \code{newdata}.
+ For example, a value near 0.75 means it is close to the upper quartile
+ of the distribution.
+}
+\value{
+ A vector of CDF values lying in [0,1].
+}
+\references{
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The data are treated like quantiles, and the percentiles
+are returned. The opposite is performed by
+\code{\link{qtplot.lmscreg}}.
+
+The CDF values of the model have been placed in
+\code{@post$cdf} when the model was fitted.
+
+}
+
+\seealso{
+\code{\link{deplot.lmscreg}},
+\code{\link{qtplot.lmscreg}},
+\code{\link{lms.bcn}},
+\code{\link{lms.bcg}},
+\code{\link{lms.yjn}}.
+}
+\examples{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+fit at post$cdf[1:5]
+cdf(fit)[1:5] # Same
+fit at y[1:5]
+fitted(fit)[1:5,]
+
+cdf(fit, data.frame(age=c(31.5,39), BMI=c(28.4,24)))
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cexpon.Rd b/man/cexpon.Rd
new file mode 100644
index 0000000..8b51555
--- /dev/null
+++ b/man/cexpon.Rd
@@ -0,0 +1,88 @@
+\name{cexpon}
+\alias{cexpon}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Censored Exponential Distribution }
+\description{
+ Maximum likelihood estimation for the exponential distribution with
+ left and right censoring.
+}
+\usage{
+cexpon(link = "loge", location = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Character.
+ Parameter link function applied to the positive parameter \eqn{rate}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{location}{
+ Numeric of length 1, the known location parameter, \eqn{A}, say.
+
+ }
+}
+\details{
+ The family function assumes the response \eqn{Y} has density
+ \deqn{f(y) = \lambda \exp(-\lambda (y-A))}{%
+ f(y) = rate * exp(-rate * (y-A)) }
+ for \eqn{y > A}, where \eqn{A} is the known location parameter.
+ By default, \eqn{A=0}.
+ Then \eqn{E(Y) = A + 1/ \lambda}{E(Y) = A + 1/rate}
+ (returned as the fitted values) and
+ \eqn{Var(Y) = 1/ \lambda^2}{Var(Y) = 1/rate^2}.
+
+ The data may be left-censored so that the true value would be less than
+ the observed value; else right-censored so that the true value would be
+ greater than the observed value. To indicate which type of censoring,
+ input \code{extra = list(leftcensored = vec1, rightcensored = vec2)}
+ where \code{vec1} and \code{vec2} are logical vectors the same length
+ as the response.
+ If the two components of this list are missing then
+ all the logical values are taken to be \code{FALSE}.
+ The fitted object has these two components stored in the \code{extra}
+ slot.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+
+\author{ T. W. Yee }
+\note{
+ This function was adapted from \code{\link{exponential}} which should
+ be used when there are no censored observations.
+
+ The fitted object has a component called \code{"location"} stored in the
+ \code{extra} slot which contains the value of the location parameter.
+
+}
+\seealso{
+ \code{\link{exponential}}.
+}
+
+\examples{
+n = 100
+lambda = exp(-0.1)
+ystar = rexp(n, rate=lambda)
+L = 2 # Lower censoring point
+U = 3 # Upper censoring point
+y = pmax(L, ystar) # left censoring
+y = pmin(U, y) # right censoring
+\dontrun{hist(y)}
+extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit = vglm(y ~ 1, cexpon, trace=TRUE, extra=extra)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit at extra
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cgo.Rd b/man/cgo.Rd
new file mode 100644
index 0000000..c29fdc0
--- /dev/null
+++ b/man/cgo.Rd
@@ -0,0 +1,58 @@
+\name{cgo}
+\alias{cgo}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Redirects the user to cqo }
+\description{
+ Redirects the user to the function \code{\link{cqo}}.
+}
+\usage{
+cgo(...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{\dots}{ Ignored. }
+}
+\details{
+The former function \code{cgo} has been renamed \code{\link{cqo}}
+because CGO (for \emph{canonical Gaussian ordination}) is a confusing
+and inaccurate name.
+CQO (for \emph{constrained quadratic ordination}) is better.
+This new nomenclature described in Yee (2006).
+}
+\value{
+ Nothing is returned; an error message is issued.
+}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{Thomas W. Yee}
+
+\section{Warning }{
+
+The code, therefore, in Yee (2004) will not run without changing the
+\code{"g"} to a \code{"q"}.
+
+}
+
+\seealso{
+ \code{\link{cqo}}.
+}
+\examples{
+
+\dontrun{
+cgo()
+}
+
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
new file mode 100644
index 0000000..7c4f645
--- /dev/null
+++ b/man/cgumbel.Rd
@@ -0,0 +1,136 @@
+\name{cgumbel}
+\alias{cgumbel}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Censored Gumbel Distribution }
+\description{
+ Maximum likelihood estimation of the 2-parameter Gumbel distribution
+ when there are censored observations.
+ A matrix response is not allowed.
+
+}
+\usage{
+cgumbel(llocation="identity", lscale = "loge",
+ elocation = list(), escale = list(), iscale=NULL,
+ mean=TRUE, percentiles=NULL, zero=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation, lscale}{
+ Character.
+ Parameter link functions for the location and
+ (positive) \eqn{scale} parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{elocation, escale}{
+ Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{iscale}{
+ Numeric and positive.
+ Initial value for \eqn{scale}. Recycled to the appropriate length.
+ In general, a larger value is better than a smaller value.
+ The default is to choose the value internally.
+
+ }
+ \item{mean}{
+ Logical. Return the mean? If \code{TRUE} then the mean is returned,
+ otherwise percentiles given by the \code{percentiles} argument.
+
+ }
+ \item{percentiles}{
+ Numeric with values between 0 and 100.
+ If \code{mean=FALSE} then the fitted values are percentiles which must
+ be specified by this argument.
+
+ }
+
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The value (possibly values) must be
+ from the set \{1,2\} corresponding respectively to \eqn{location} and
+ \eqn{scale}. If \code{zero=NULL} then all linear/additive predictors
+ are modelled as a linear combination of the explanatory variables.
+ The default is to fit the shape parameter as an intercept only.
+
+ }
+}
+\details{
+ This \pkg{VGAM} family function is like \code{\link{gumbel}}
+ but handles observations
+ that are left-censored (so that the true value would be less than
+ the observed value) else right-censored (so that the true value would be
+ greater than the observed value). To indicate which type of censoring,
+ input \code{extra = list(leftcensored = vec1, rightcensored = vec2)}
+ where \code{vec1} and \code{vec2} are logical vectors the same length
+ as the response.
+ If the two components of this list are missing then the logical
+ values are taken to be \code{FALSE}. The fitted object has these two
+ components stored in the \code{extra} slot.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+
+}
+\author{ T. W. Yee }
+\section{Warning}{
+ Numerical problems may occur if the amount of censoring is excessive.
+
+}
+
+\note{
+ See \code{\link{gumbel}} for details about the Gumbel distribution.
+ The initial values are based on assuming all uncensored observations,
+ therefore could be improved upon.
+
+}
+
+\seealso{
+ \code{\link{gumbel}},
+ \code{\link{egumbel}},
+ \code{\link{rgumbel}},
+ \code{\link{guplot}},
+ \code{\link{gev}},
+ \code{\link{venice}}.
+}
+
+\examples{
+# Example 1
+data(venice)
+ystar = venice[["r1"]] # Use the first order statistic as the response
+n = length(ystar)
+L = runif(n, 100, 104) # Lower censoring points
+U = runif(n, 130, 135) # Upper censoring points
+y = pmax(L, ystar) # Left censored
+y = pmin(U, y) # Right censored
+extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit = vglm(y ~ scale(year), data=venice, trace=TRUE, extra=extra,
+ cgumbel(mean=FALSE, perc=c(5,25,50,75,95)))
+coef(fit, matrix=TRUE)
+fitted(fit)[1:5,]
+fit at extra
+
+# Example 2: simulated data
+n = 1000
+ystar = rgumbel(n, loc=1, scale=exp(0.5)) # The uncensored data
+L = runif(n, -1, 1) # Lower censoring points
+U = runif(n, 2, 5) # Upper censoring points
+y = pmax(L, ystar) # Left censored
+y = pmin(U, y) # Right censored
+\dontrun{par(mfrow=c(1,2)); hist(ystar); hist(y);}
+extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit = vglm(y ~ 1, trace=TRUE, extra=extra, cgumbel)
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/chest.Rd b/man/chest.Rd
new file mode 100644
index 0000000..bb79118
--- /dev/null
+++ b/man/chest.Rd
@@ -0,0 +1,43 @@
+\name{chest}
+\alias{chest}
+\docType{data}
+\title{ Chest Pain in NZ Adults }
+\description{
+ Presence/absence of chest pain in 10186 New Zealand adults.
+}
+\usage{data(chest)}
+\format{
+ A data frame with 73 rows and the following 5 variables.
+ \describe{
+ \item{age}{a numeric vector; age (years).}
+ \item{nolnor}{a numeric vector of counts; no pain on LHS or RHS.}
+ \item{nolr}{a numeric vector of counts; no pain on LHS but pain on RHS.}
+ \item{lnor}{a numeric vector of counts; no pain on RHS but pain on LHS.}
+ \item{lr}{a numeric vector of counts; pain on LHS and RHS of chest.}
+ }
+}
+\details{
+ Each adult was asked their age and whether they experienced any
+ pain or discomfort in their chest over the last six months.
+ If yes, they indicated whether it
+ was on their LHS and/or RHS of their chest.
+
+}
+\source{
+ MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
+ Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
+ Fletcher Challenge-University of Auckland Heart &
+ Health Study: design and baseline findings.
+ \emph{New Zealand Medical Journal},
+ \bold{108}, 499--502.
+}
+\examples{
+data(chest)
+fit = vgam(cbind(nolnor, nolr,lnor,lr) ~ s(age, c(4,3)),
+ binom2.or(exchan=TRUE, zero=NULL), data = chest)
+coef(fit, matrix=TRUE)
+\dontrun{
+plot(fit, which.cf=2, se=TRUE)
+}
+}
+\keyword{datasets}
diff --git a/man/chisq.Rd b/man/chisq.Rd
new file mode 100644
index 0000000..003c1a2
--- /dev/null
+++ b/man/chisq.Rd
@@ -0,0 +1,55 @@
+\name{chisq}
+\alias{chisq}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Chi-squared Distribution }
+\description{
+ Maximum likelihood estimation of the degrees of freedom for
+ a chi-squared distribution.
+}
+\usage{
+chisq(link = "loge")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function.
+ See \code{\link{Links}} for more choices.
+
+ }
+}
+\details{
+ The degrees of freedom is treated as a parameter to be estimated.
+ It is treated as real and not integer.
+ Being positive, a log link is used by default.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+
+\author{ T. W. Yee }
+\note{
+There may be convergence problems if the degrees of freedom
+is very large.
+}
+
+\seealso{
+ \code{\link[stats]{Chisquare}}.
+ \code{\link{normal1}}.
+}
+\examples{
+y = rchisq(n=200, df=exp(2))
+fit = vglm(y ~ 1, chisq)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/clo.Rd b/man/clo.Rd
new file mode 100644
index 0000000..feda2fc
--- /dev/null
+++ b/man/clo.Rd
@@ -0,0 +1,61 @@
+\name{clo}
+\alias{clo}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Redirects the user to rrvglm }
+\description{
+ Redirects the user to the function \code{\link{rrvglm}}.
+}
+\usage{
+clo(...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{\dots}{ Ignored. }
+}
+\details{
+CLO stands for \emph{constrained linear ordination}, and
+is fitted with a statistical class of models called
+\emph{reduced-rank vector generalized linear models}
+(RR-VGLMs). It allows for generalized reduced-rank regression
+in that response types such as Poisson counts and presence/absence
+data can be handled.
+
+Currently in the \pkg{VGAM} package, \code{\link{rrvglm}} is
+used to fit RR-VGLMs. However, the Author's opinion is that
+linear responses to a latent variable (composite environmental
+gradient) is not as common as unimodal responses, therefore
+\code{\link{cqo}} is often more appropriate.
+
+The new CLO/CQO/CAO nomenclature described in Yee (2006).
+}
+\value{
+ Nothing is returned; an error message is issued.
+}
+\references{
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+}
+\author{Thomas W. Yee}
+
+\seealso{
+ \code{\link{rrvglm}},
+ \code{\link{cqo}}.
+}
+\examples{
+
+\dontrun{
+clo()
+}
+
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
new file mode 100644
index 0000000..7d086f2
--- /dev/null
+++ b/man/cloglog.Rd
@@ -0,0 +1,138 @@
+\name{cloglog}
+\alias{cloglog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Complementary Log-log Link Function }
+\description{
+ Computes the complementary log-log transformation,
+ including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by 1 minus the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The complementary log-log link function is commonly used for parameters
+ that lie in the unit interval. Numerical values of \code{theta}
+ close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf},
+ \code{NA} or \code{NaN}. The arguments \code{short} and \code{tag}
+ are used only if \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the complimentary log-log of \code{theta},
+ i.e., \code{log(-log(1 - theta))} when \code{inverse = FALSE}, and if
+ \code{inverse = TRUE} then \code{1-exp(-exp(theta))},.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to 1 or 0.
+ One way of overcoming this is to use \code{earg}.
+
+ With constrained ordination (e.g., \code{\link{cqo}} and
+ \code{\link{cao}}) used with \code{\link{binomialff}}, a complementary
+ log-log link function is preferred over the default \code{\link{logit}}
+ link, for a good reason. See the example below.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the extreme
+ value distribution.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cauchit}}.
+
+}
+\examples{
+p = seq(0.01, 0.99, by=0.01)
+cloglog(p)
+max(abs(cloglog(cloglog(p), inverse=TRUE) - p)) # Should be 0
+
+p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
+cloglog(p) # Has NAs
+cloglog(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
+
+\dontrun{
+plot(p, logit(p), type="l", col="limegreen", ylab="transformation",
+ lwd=2, las=1, main="Some probability link functions")
+lines(p, probit(p), col="purple", lwd=2)
+lines(p, cloglog(p), col="chocolate", lwd=2)
+lines(p, cauchit(p), col="tan", lwd=2)
+abline(v=0.5, h=0, lty="dashed")
+legend(0.1, 4, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+}
+
+# This example shows that a cloglog link is preferred over the logit
+n = 500; p = 5; S = 3; Rank = 1 # Species packing model:
+mydata = rcqo(n, p, S, EqualTol=TRUE, ESOpt=TRUE, EqualMax=TRUE,
+ family="binomial", hiabundance=5, seed=123, Rank=Rank)
+fitc = cqo(attr(mydata, "formula"), ITol=TRUE, data=mydata,
+ fam=binomialff(mv=TRUE, link="cloglog"), Rank=Rank)
+fitl = cqo(attr(mydata, "formula"), ITol=TRUE, data=mydata,
+ fam=binomialff(mv=TRUE, link="logit"), Rank=Rank)
+
+# Compare the fitted models (cols 1 and 3) with the truth (col 2)
+cbind(ccoef(fitc), attr(mydata, "ccoefficients"), ccoef(fitl))
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/cnormal1.Rd b/man/cnormal1.Rd
new file mode 100644
index 0000000..5ac6981
--- /dev/null
+++ b/man/cnormal1.Rd
@@ -0,0 +1,93 @@
+\name{cnormal1}
+\alias{cnormal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Censored Normal Distribution }
+\description{
+ Maximum likelihood estimation for the normal distribution with
+ left and right censoring.
+}
+\usage{
+cnormal1(lmu="identity", lsd="loge", imethod=1, zero=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, lsd}{
+ Parameter link functions applied to the mean and
+ standard deviation parameters.
+ See \code{\link{Links}} for more choices.
+ The standard deviation is a positive quantity, therefore a log link
+ is the default.
+
+ }
+ \item{imethod}{
+ Initialization method. Either 1 or 2, this specifies
+ two methods for obtaining initial values for the parameters.
+
+ }
+ \item{zero}{
+ An integer vector, containing the value 1 or 2. If so,
+ the mean or standard deviation respectively are modelled
+ as an intercept only.
+ Setting \code{zero=NULL} means both linear/additive predictors
+ are modelled as functions of the explanatory variables.
+
+ }
+}
+\details{
+ This function is like \code{\link{normal1}} but handles observations
+ that are left-censored (so that the true value would be less than
+ the observed value) else right-censored (so that the true value would be
+ greater than the observed value). To indicate which type of censoring,
+ input \code{extra = list(leftcensored = vec1, rightcensored = vec2)}
+ where \code{vec1} and \code{vec2} are logical vectors the same length
+ as the response.
+ If the two components of this list are missing then
+ the logical values are taken to be \code{FALSE}.
+ The fitted object has these two components stored in the \code{extra}
+ slot.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+%\references{
+
+%}
+
+\author{ T. W. Yee }
+\note{
+ This function was adapted from \code{\link{tobit}}.
+ The Tobit model is a special case of this \pkg{VGAM}
+ family function because the observations have a common
+ lower censoring point and upper censoring point.
+ If there are no censored observation then \code{\link{normal1}}
+ is recommended instead.
+
+}
+\seealso{
+ \code{\link{tobit}},
+ \code{\link{normal1}},
+ \code{\link{dcnormal1}}.
+}
+
+\examples{
+n = 1000
+x = runif(n)
+ystar = rnorm(n, mean=100 + 15 * x, sd=exp(3)) # True values
+\dontrun{hist(ystar)}
+L = runif(n, 80, 90) # Lower censoring points
+U = runif(n, 130, 140) # Upper censoring points
+y = pmax(L, ystar) # Left censored
+y = pmin(U, y) # Right censored
+\dontrun{hist(y)}
+extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit = vglm(y ~ x, cnormal1(zero=2), trace=TRUE, extra=extra)
+coef(fit, matrix=TRUE)
+Coef(fit)
+names(fit at extra)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/coalminers.Rd b/man/coalminers.Rd
new file mode 100644
index 0000000..5092b6d
--- /dev/null
+++ b/man/coalminers.Rd
@@ -0,0 +1,39 @@
+\name{coalminers}
+\alias{coalminers}
+\docType{data}
+\title{ Breathlessness and Wheeze Amongst Coalminers }
+\description{
+ Coalminers who are smokers without radiological pneumoconiosis,
+ classified by age, breathlessness and wheeze.
+}
+\usage{data(coalminers)}
+\format{
+ A data frame with 9 age groups with the following 5 columns.
+ \describe{
+ \item{BW}{Counts with breathlessness and wheeze. }
+ \item{BnW}{Counts with breathlessness but no wheeze. }
+ \item{nBW}{Counts with no breathlessness but wheeze. }
+ \item{nBnW}{Counts with neither breathlessness or wheeze. }
+ \item{age}{Age of the coal miners (actually, the
+ midpoints of the 5-year category ranges). }
+ }
+}
+\details{
+ The data were published in Ashford and Sowden (1970).
+ A more recent analysis is McCullagh and Nelder (1989, Section 6.6).
+}
+\source{
+ Ashford, J. R. and Sowden, R. R. (1970)
+ Multi-variate probit analysis.
+ \emph{Biometrics}, \bold{26}, 535--546.
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}. 2nd ed. London: Chapman & Hall.
+
+}
+\examples{
+data(coalminers)
+str(coalminers)
+}
+\keyword{datasets}
diff --git a/man/constraints.Rd b/man/constraints.Rd
new file mode 100644
index 0000000..b0e1e03
--- /dev/null
+++ b/man/constraints.Rd
@@ -0,0 +1,109 @@
+\name{constraints}
+\alias{constraints}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Constraint Matrices }
+\description{
+ Returns the \emph{constraint matrices} of objects in the
+ \pkg{VGAM} package.
+
+}
+\usage{
+constraints(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ Some \pkg{VGAM} object, for example, having
+ class \code{\link{vglmff-class}}.
+ }
+ \item{\dots}{ Other possible arguments. }
+}
+\details{
+ Constraint matrices describe the relationship of
+ coefficients/component functions of a particular explanatory
+ variable between
+ the linear/additive predictors in VGLM/VGAM
+ models. For example, they may be all different (constraint
+ matrix is the identity matrix) or all the same (constraint
+ matrix has one column and has unit values).
+
+ VGLMs and VGAMs have constraint matrices which are \emph{known}.
+ The class of RR-VGLMs have constraint matrices which are
+ \emph{unknown} and are to be estimated.
+}
+\value{
+ This extractor function returns a list comprising of
+ constraint matrices---one for each column of the
+ LM model matrix, and in that order.
+ The list is labelled with the variable names.
+ Each constraint matrix has \eqn{M} rows, where
+ \eqn{M} is the number of linear/additive predictors,
+ and whose rank is equal to the number of columns.
+ A model with no constraints at all has an order
+ \eqn{M} identity matrix as each variable's
+ constraint matrix.
+}
+\author{T. W. Yee }
+\note{
+ In all \pkg{VGAM} family functions \code{zero=NULL} means
+ none of the linear/additive predictors are modelled as
+ intercepts-only.
+ Other arguments found in certain \pkg{VGAM} family functions
+ which affect constraint matrices include
+ \code{parallel} and \code{exchangeable}.
+
+ The \code{constraints} argument in \code{\link{vglm}}
+ and \code{\link{vgam}} allows constraint matrices to
+ be inputted. If so, then \code{constraints(fit)} should
+ return the same as the input.
+}
+
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+\url{http://www.stat.auckland.ac.nz/~yee} contains additional
+information.
+}
+
+\section{Warning }{The \code{xij} argument changes things,
+ and this has not been fully resolved yet.
+}
+
+\seealso{
+ VGLMs are described in \code{\link{vglm-class}};
+ RR-VGLMs are described in \code{\link{rrvglm-class}}.
+
+ Arguments such as \code{zero} and \code{parallel}
+ found in many \pkg{VGAM}
+ family functions are a way of creating/modifying constraint
+ matrices conveniently, e.g., see \code{\link{zero}}.
+}
+\examples{
+# Fit the proportional odds model
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo)
+fit
+coef(fit, matrix=TRUE)
+constraints(fit) # Parallel assumption results in this
+
+
+
+
+# Fit a rank-1 stereotype (RR-multinomial logit) model
+data(car.all)
+fit = rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank=1)
+constraints(fit) # All except the first are the A matrix
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cqo.Rd b/man/cqo.Rd
new file mode 100644
index 0000000..1bb884b
--- /dev/null
+++ b/man/cqo.Rd
@@ -0,0 +1,479 @@
+\name{cqo}
+\alias{cqo}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fitting Constrained Quadratic Ordination (CQO)}
+\description{
+ A \emph{constrained quadratic ordination} (CQO; formerly called
+ \emph{canonical Gaussian ordination} or CGO) model is fitted using
+ the \emph{quadratic reduced-rank vector generalized linear model}
+ (QRR-VGLM) framework.
+
+}
+\usage{
+cqo(formula, family, data = list(), weights = NULL, subset = NULL,
+ na.action = na.fail, etastart = NULL, mustart = NULL,
+ coefstart = NULL, control = qrrvglm.control(...), offset = NULL,
+ method = "cqo.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL, extra = NULL,
+ smart = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In this documentation, \eqn{M} is the number of linear predictors,
+ \eqn{S} is the number of responses (species). Then
+ \eqn{M=S} for Poisson and binomial species data,
+ and \eqn{M=2S} for negative binomial and gamma distributed species data.
+
+ \item{formula}{ a symbolic description of the model to be fit.
+ The RHS of the formula is applied to each linear predictor. Different
+ variables in each linear predictor can be chosen by specifying
+ constraint matrices.
+
+ }
+ \item{family}{
+ a function of class \code{"vglmff"} describing what statistical model
+ is to be fitted.
+ Currently the following families are supported:
+ \code{\link{poissonff}},
+ \code{\link{binomialff}}
+ (\code{\link{logit}} and \code{\link{cloglog}} links available),
+ \code{\link{negbinomial}},
+ \code{\link{gamma2}},
+ \code{\link{gaussianff}}.
+ Sometimes special arguments are required for \code{cqo()}, e.g.,
+ \code{binomialff(mv=TRUE)}.
+ Also, \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}}
+ may or may not work.
+
+% \code{negbinomial(deviance=TRUE)},
+% \code{gamma2(deviance=TRUE)}.
+
+ }
+ \item{data}{
+ an optional data frame containing the variables in the model.
+ By default the variables are taken from \code{environment(formula)},
+ typically the environment from which \code{cqo} is called.
+
+ }
+ \item{weights}{ an optional vector or matrix of (prior) weights
+ to be used in the fitting process.
+ Currently, this argument should not be used.
+
+ }
+ \item{subset}{
+ an optional logical vector specifying a subset of
+ observations to be used in the fitting process.
+
+ }
+ \item{na.action}{
+ a function which indicates what should happen when the data contain
+ \code{NA}s. The default is set by the \code{na.action} setting of
+ \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+
+ }
+ \item{etastart}{
+ starting values for the linear predictors.
+ It is a \eqn{M}-column matrix.
+ If \eqn{M=1} then it may be a vector.
+ Currently, this argument probably should not be used.
+
+ }
+ \item{mustart}{
+ starting values for the
+ fitted values. It can be a vector or a matrix.
+ Some family functions do not make use of this argument.
+ Currently, this argument probably should not be used.
+
+ }
+ \item{coefstart}{
+ starting values for the
+ coefficient vector.
+ Currently, this argument probably should not be used.
+
+ }
+ \item{control}{
+ a list of parameters for controlling the fitting process.
+ See \code{\link{qrrvglm.control}} for details.
+
+ }
+ \item{offset}{
+ This argument must not be used.
+
+% especially when \code{ITolerances=TRUE}.
+% a vector or \eqn{M}-column matrix of offset values.
+% These are \emph{a priori} known and are
+% added to the linear predictors during fitting.
+
+ }
+ \item{method}{
+ the method to be used in fitting the model.
+ The default (and presently only) method \code{cqo.fit}
+ uses \emph{iteratively reweighted least squares} (IRLS).
+
+ }
+ \item{model}{
+ a logical value indicating whether the \emph{model frame}
+ should be assigned in the \code{model} slot.
+
+ }
+ \item{x.arg, y.arg}{
+ logical values indicating whether
+ the model matrix and response matrix used in the fitting
+ process should be assigned in the \code{x} and \code{y} slots.
+ Note the model matrix is the LM model matrix.
+
+% ; to get the VGLM
+% model matrix type \code{model.matrix(vglmfit)} where
+% \code{vglmfit} is a \code{vglm} object.
+
+ }
+ \item{contrasts}{
+ an optional list. See the \code{contrasts.arg}
+ of \code{model.matrix.default}.
+
+ }
+ \item{constraints}{
+ an optional list of constraint matrices.
+ The components of the list must be named with the term it corresponds
+ to (and it must match in character format).
+ Each constraint matrix must have \eqn{M} rows, and be of full-column
+ rank. By default, constraint matrices are the \eqn{M} by \eqn{M}
+ identity
+ matrix unless arguments in the family function itself override these values.
+ If \code{constraints} is used it must contain \emph{all} the
+ terms; an incomplete list is not accepted.
+ Constraint matrices for \eqn{x_2}{x_2} variables are taken as the
+ identity matrix.
+
+ }
+ \item{extra}{
+ an optional list with any extra information that might be needed
+ by the family function.
+
+ }
+% \item{qr.arg}{ logical value indicating whether
+% the slot \code{qr}, which returns the QR decomposition of the
+% VLM model matrix, is returned on the object.
+% }
+ \item{smart}{
+ logical value indicating whether smart prediction
+ (\code{\link{smartpred}}) will be used.
+
+ }
+ \item{\dots}{
+ further arguments passed into \code{\link{qrrvglm.control}}.
+
+ }
+}
+\details{
+ QRR-VGLMs or \emph{constrained quadratic ordination} (CQO) models
+ are estimated here by maximum likelihood estimation. Optimal linear
+ combinations of the environmental variables are computed, called
+ \emph{latent variables} (these appear as \code{lv} for \eqn{R=1}
+ else \code{lv1}, \code{lv2}, etc. in the output). Here, \eqn{R}
+ is the \emph{rank} or the number of ordination axes. Each species'
+ response is then a regression of these latent variables using quadratic
+ polynomials on a transformed scale (e.g., log for Poisson counts, logit
+ for presence/absence responses). The solution is obtained iteratively
+ in order to maximize the log-likelihood function, or equivalently,
+ minimize the deviance.
+
+ The central formula (for Poisson and binomial species data) is
+ given by
+ \deqn{\eta = B_1^T x_1 + A \nu +
+ \sum_{m=1}^M (\nu^T D_m \nu) e_m}{%
+ eta = B_1^T x_1 + A nu +
+ sum_{m=1}^M (nu^T D_m nu) e_m}
+ where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept),
+ \eqn{x_2}{x_2} is a vector of environmental variables, \eqn{\nu=C^T
+ x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables, \eqn{e_m} is
+ a vector of 0s but with a 1 in the \eqn{m}th position.
+ The \eqn{\eta}{eta} are a vector of linear/additive predictors,
+ e.g., the \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m =
+ log(E[Y_m])} for the \eqn{m}th species. The matrices \eqn{B_1},
+ \eqn{A}, \eqn{C} and \eqn{D_m} are estimated from the data, i.e.,
+ contain the regression coefficients. The tolerance matrices
+ satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}.
+ Many important CQO details are directly related to arguments
+ in \code{\link{qrrvglm.control}}, e.g., the argument \code{Norrr}
+ specifies which variables comprise \eqn{x_1}{x_1}.
+
+ Theoretically, the four most popular \pkg{VGAM} family functions
+ to be used with \code{cqo} correspond to the Poisson, binomial,
+ normal, and negative binomial distributions. The latter is a
+ 2-parameter model. All of these are implemented, as well as the
+ 2-parameter gamma. The Poisson is or should be catered for by
+ \code{\link{quasipoissonff}} and \code{\link{poissonff}}, and the
+ binomial by \code{\link{quasibinomialff}} and \code{\link{binomialff}}.
+ Those beginning with \code{"quasi"} have dispersion parameters that
+ are estimated for each species.
+
+ %the negative binomial by \code{\link{negbinomial}}, and the normal by
+ %\code{gaussianff}.
+
+ %For overdispersed Poisson data, using \code{\link{quasipoissonff}} is
+ %strongly recommended over \code{\link{negbinomial}}; the latter is
+ %\emph{very} sensitive to departures from the model assumptions.
+
+ For initial values, the function \code{.Init.Poisson.QO} should
+ work reasonably well if the data is Poisson with species having equal
+ tolerances. It can be quite good on binary data too. Otherwise the
+ \code{Cinit} argument in \code{\link{qrrvglm.control}} can be used.
+
+ %(and negative binomial)
+
+ It is possible to relax the quadratic form to an additive model. The
+ result is a data-driven approach rather than a model-driven approach,
+ so that CQO is extended to \emph{constrained additive ordination}
+ (CAO) when \eqn{R=1}. See \code{\link{cao}} for more details.
+
+}
+\value{
+ An object of class \code{"qrrvglm"}.
+ Note that the slot \code{misc} has a list component called
+ \code{deviance.Bestof} which gives the history of deviances over all
+ the iterations.
+
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+ter Braak, C. J. F. and Prentice, I. C. (1988)
+A theory of gradient analysis.
+\emph{Advances in Ecological Research},
+\bold{18}, 271--317.
+
+%Yee, T. W. (2005)
+%On constrained and unconstrained
+%quadratic ordination.
+%\emph{Manuscript in preparation}.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{Thomas W. Yee}
+
+\note{
+ By default, a rank-1 equal-tolerances QRR-VGLM model is fitted
+ (see \code{\link{qrrvglm.control}} for the default control
+ parameters).
+ The latent variables are always transformed so that they
+ are uncorrelated.
+ By default, the argument \code{trace} is \code{TRUE} meaning a running
+ log is printed out while the computations are taking place. This is
+ because the algorithm is computationally expensive, therefore users
+ might think that their computers have frozen if \code{trace=FALSE}!
+
+ The argument \code{Bestof} in \code{\link{qrrvglm.control}} controls
+ the number of models fitted (each uses different starting values) to
+ the data. This argument is important because convergence may be to a
+ \emph{local} solution rather than the \emph{global} solution. Using
+ more starting values increases the chances of finding the global
+ solution. Always plot an ordination diagram (use the generic function
+ \code{\link{lvplot}}) and see if it looks sensible. Local solutions
+ arise because the optimization problem is highly nonlinear, and this is
+ particularly true for CAO.
+
+ %Convergence of QRR-VGLMs can be difficult, especially for binary
+ %data. If this is so, then setting \code{ITolerances=TRUE} or
+ %\code{EqualTolerances=TRUE} may help, especially when the number of sites,
+ %\eqn{n}, is small.
+
+ %If the negative binomial family function \code{\link{negbinomial}} is
+ %used for \code{cqo} then set \code{negbinomial(deviance=TRUE)}
+ %is necessary. This means to minimize the deviance, which the fast
+ %algorithm can handle.
+
+ Many of the arguments applicable to \code{cqo} are common to
+ \code{\link{vglm}} and \code{\link{rrvglm.control}}.
+ The most important arguments are
+ \code{Rank},
+ \code{Norrr},
+ \code{Bestof},
+ \code{ITolerances},
+ \code{EqualTolerances},
+ \code{isdlv}, and
+ \code{MUXfactor}.
+
+ When fitting a 2-parameter model such as the negative binomial
+ or gamma, it pays to set \code{EqualTolerances=TRUE} and
+ \code{ITolerances=FALSE}. This is because numerical problems can
+ occur when fitting the model far away from the global solution when
+ \code{ITolerances=TRUE}. Setting the two arguments as described will
+ slow down the computation considerably, however it is numerically
+ more stable.
+
+ In Example 1 below, an unequal-tolerances rank-1 QRR-VGLM is fitted to the
+ hunting spiders dataset.
+ In Example 2 below, an equal-tolerances rank-2 QRR-VGLM is fitted to the
+ hunting spiders dataset.
+ The numerical difficulties encountered in fitting the rank-2 model
+ suggests a rank-1 model is probably preferable.
+ In Example 3 below, constrained binary quadratic ordination (in old
+ nomenclature, constrained Gaussian logit ordination) is fitted to some
+ simulated data coming from a species packing model.
+ With multivariate binary responses, one must use \code{mv=TRUE} to
+ indicate that the response (matrix) is multivariate. Otherwise, it is
+ interpreted as a single binary response variable.
+
+ Sometime in the future, this function might handle input of the form
+ \code{cqo(x, y)}, where \code{x} and \code{y} are matrices containing
+ the environmental and species data respectively.
+
+}
+\section{Warning }{
+ Local solutions are not uncommon when fitting CQO models. To increase
+ the chances of obtaining the global solution, increase the value
+ of the argument \code{Bestof} in \code{\link{qrrvglm.control}}.
+ For reproducibility of the results, it pays to set a different
+ random number seed before calling \code{cqo} (the function
+ \code{\link[base:Random]{set.seed}} does this). The function \code{cqo}
+ chooses initial values for \bold{C} using \code{.Init.Poisson.QO()}
+ if \code{Use.Init.Poisson.QO=TRUE}, else random numbers.
+
+ Unless \code{ITolerances=TRUE} or \code{EqualTolerances=FALSE},
+ CQO is computationally expensive. It pays to keep the rank down to 1
+ or 2. If \code{EqualTolerances=TRUE} and \code{ITolerances=FALSE} then
+ the cost grows quickly with the number of species and sites (in terms of
+ memory requirements and time). The data needs to conform quite closely
+ to the statistical model, and the environmental range of the data should
+ be wide in order for the quadratics to fit the data well (bell-shaped
+ response surfaces). If not, RR-VGLMs will be more appropriate because
+ the response is linear on the transformed scale (e.g., log or logit)
+ and the ordination is called \emph{constrained linear ordination} or CLO.
+
+ Like many regression models, CQO is sensitive to outliers (in the
+ environmental and species data), sparse data, high leverage points,
+ multicollinearity etc. For these reasons, it is necessary to examine
+ the data carefully for these features and take corrective action
+ (e.g., omitting certain species, sites, environmental variables from
+ the analysis, transforming certain environmental variables, etc.).
+ Any optimum lying outside the convex hull of the site scores should not
+ be trusted. Fitting a CAO is recommended first, then upon transformations
+ etc., possibly a CQO can be fitted.
+
+ For binary data, it is necessary to have `enough' data. In general,
+ the number of sites \eqn{n} ought to be much larger than the number of
+ species \emph{S}, e.g., at least 100 sites for two species. Compared
+ to count (Poisson) data, numerical problems occur more frequently
+ with presence/absence (binary) data. For example, if \code{Rank=1}
+ and if the response data for each species is a string of all absences,
+ then all presences, then all absences (when enumerated along the latent
+ variable) then infinite parameter estimates will occur. In general,
+ setting \code{ITolerances=TRUE} may help.
+
+ This function was formerly called \code{cgo}. It has been renamed to
+ reinforce a new nomenclature described in Yee (2006).
+
+}
+
+\seealso{
+ \code{\link{qrrvglm.control}},
+ \code{\link{Coef.qrrvglm}},
+ \code{\link{rcqo}},
+ \code{\link{cao}},
+ \code{\link{uqo}},
+ \code{\link{rrvglm}},
+% \code{\link{rrvglm.control}},
+ \code{\link{poissonff}},
+ \code{\link{binomialff}},
+ \code{\link{negbinomial}},
+ \code{\link{gamma2}},
+ \code{\link{lvplot.qrrvglm}},
+ \code{\link{persp.qrrvglm}},
+ \code{\link{trplot.qrrvglm}},
+% \code{\link{vcovqrrvglm}},
+ \code{\link{vglm}},
+ \code{\link[base:Random]{set.seed}},
+ \code{\link{hspider}}.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\examples{
+# Example 1; Fit an unequal tolerances model to the hunting spiders data
+data(hspider)
+hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=FALSE)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+if(deviance(p1) > 1177) stop("suboptimal fit obtained")
+
+\dontrun{
+S = ncol(p1 at y) # Number of species
+clr = (1:(S+1))[-7] # omits yellow
+lvplot(p1, y=TRUE, lcol=clr, pch=1:S, pcol=clr, las=1) # ordination diagram
+legend("topright", leg=dimnames(p1 at y)[[2]], col=clr,
+ pch=1:S, merge=TRUE, bty="n", lty=1:S, lwd=2)
+}
+(cp = Coef(p1))
+
+(a = cp at lv[cp at lvOrder]) # The ordered site scores along the gradient
+# Names of the ordered sites along the gradient:
+rownames(cp at lv)[cp at lvOrder]
+(a = (cp at Optimum)[,cp at OptimumOrder]) # The ordered optima along the gradient
+a = a[!is.na(a)] # Delete the species that is not unimodal
+names(a) # Names of the ordered optima along the gradient
+
+\dontrun{
+trplot(p1, whichSpecies=1:3, log="xy", type="b", lty=1, lwd=2,
+ col=c("blue","red","green"), label=TRUE) -> ii # trajectory plot
+legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
+ lwd=2, lty=1, col=c("blue","red","green"))
+abline(a=0, b=1, lty="dashed")
+
+S = ncol(p1 at y) # Number of species
+clr = (1:(S+1))[-7] # omits yellow
+persp(p1, col=clr, label=TRUE, las=1) # perspective plot
+}
+
+# Example 2: A rank-2 equal tolerances CQO model with Poisson data
+# This example is numerically fraught.
+set.seed(555)
+p2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam=poissonff, data=hspider, Crow1positive=FALSE,
+# ITol=FALSE, EqualTol=TRUE,
+ Rank=2, Bestof=1, isdlv=c(2.1,0.9))
+sort(p2 at misc$deviance.Bestof) # A history of all the iterations
+if(deviance(p2) > 1127) stop("suboptimal fit obtained")
+\dontrun{
+lvplot(p2, ellips=FALSE, label=TRUE, xlim=c(-3,4),
+ C=TRUE, Ccol="brown", sites=TRUE, scol="grey",
+ pcol="blue", pch="+", chull=TRUE, ccol="grey")
+}
+
+
+# Example 3: species packing model with presence/absence data
+n = 200; p = 5; S = 5
+mydata = rcqo(n, p, S, fam="binomial", hiabundance=4,
+ EqualTol=TRUE, ESOpt=TRUE, EqualMax=TRUE)
+myform = attr(mydata, "formula")
+b1 = cqo(myform, fam=binomialff(mv=TRUE, link="cloglog"), data=mydata)
+sort(b1 at misc$deviance.Bestof) # A history of all the iterations
+\dontrun{
+lvplot(b1, y=TRUE, lcol=1:S, pch=1:S, pcol=1:S, las=1)
+}
+Coef(b1)
+
+# Compare the fitted model with the 'truth'
+cbind(truth=attr(mydata, "ccoefficients"), fitted=ccoef(b1))
+}
+\keyword{models}
+\keyword{regression}
+
+%legend("topright", x=1, y=135, leg=dimnames(p1 at y)[[2]], col=clr,
+% pch=1:S, merge=TRUE, bty="n", lty=1:S, lwd=2)
diff --git a/man/cratio.Rd b/man/cratio.Rd
new file mode 100644
index 0000000..8fc3a7a
--- /dev/null
+++ b/man/cratio.Rd
@@ -0,0 +1,124 @@
+\name{cratio}
+\alias{cratio}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Ordinal Regression with Continuation Ratios }
+\description{
+ Fits a continuation ratio logit/probit/cloglog/cauchit/...
+ regression model to an ordered (preferably) factor response.
+}
+\usage{
+cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response \eqn{Y} is assumed to be a factor
+ with ordered values \eqn{1,2,\dots,M+1}, so that
+ \eqn{M} is the number of linear/additive predictors
+ \eqn{\eta_j}{eta_j}.
+
+ \item{link}{
+ Link function applied to the \eqn{M} continuation ratio probabilities.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{parallel}{
+ A logical, or formula specifying which terms have
+ equal/unequal coefficients.
+
+ }
+ \item{reverse}{
+ Logical.
+ By default, the continuation ratios used are
+ \eqn{\eta_j = logit(P[Y>j|Y \geq j])}{eta_j = logit(P[Y>j|Y>=j])} for
+ \eqn{j=1,\dots,M}.
+ If \code{reverse} is \code{TRUE}, then
+ \eqn{\eta_j = logit(P[Y<j+1|Y\leq j+1])}{eta_j=logit(P[Y<j+1|Y<=j+1])}
+ will be used.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+}
+\details{
+ There are a number of definitions for the \emph{continuation ratio}
+ in the literature. To make life easier, in the \pkg{VGAM} package,
+ we use \emph{continuation} ratios and \emph{stopping} ratios
+ (see \code{\link{sratio}}).
+ Stopping ratios deal with quantities such as
+ \code{logit(P[Y=j|Y>=j])}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Simonoff, J. S. (2003)
+\emph{Analyzing Categorical Data},
+New York: Springer-Verlag.
+
+McCullagh, P. and Nelder, J. A. (1989)
+\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a matrix of counts (with row sums that
+ are all positive), or a factor. In both cases, the \code{y} slot
+ returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
+ of counts.
+
+ For a nominal (unordered) factor response, the multinomial
+ logit model (\code{\link{multinomial}}) is more appropriate.
+
+ Here is an example of the usage of the \code{parallel} argument.
+ If there are covariates \code{x1}, \code{x2} and \code{x3}, then
+ \code{parallel = TRUE ~ x1 + x2 -1} and
+ \code{parallel = FALSE ~ x3} are equivalent. This would constrain
+ the regression coefficients for \code{x1} and \code{x2} to be
+ equal; those of the intercepts and \code{x3} would be different.
+
+
+}
+\section{Warning }{
+ No check is made to verify that the response is ordinal.
+}
+
+\seealso{
+ \code{\link{sratio}},
+ \code{\link{acat}},
+ \code{\link{cumulative}},
+ \code{\link{multinomial}},
+ \code{\link{pneumo}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}}.
+}
+
+\examples{
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal,mild,severe) ~ let, cratio(parallel=TRUE), pneumo))
+coef(fit, matrix=TRUE)
+constraints(fit)
+predict(fit)
+predict(fit, untransform=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
new file mode 100644
index 0000000..2d75b27
--- /dev/null
+++ b/man/cumulative.Rd
@@ -0,0 +1,205 @@
+\name{cumulative}
+\alias{cumulative}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Ordinal Regression with Cumulative Probabilities }
+\description{
+ Fits a cumulative logit/probit/cloglog/cauchit/...
+ regression model to an ordered (preferably) factor response.
+
+}
+\usage{
+cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
+ earg = list(), mv = FALSE, intercept.apply = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response \eqn{Y} is assumed to be a factor
+ with ordered values \eqn{1,2,\dots,M+1}, so that
+ \eqn{M} is the number of linear/additive predictors
+ \eqn{\eta_j}{eta_j}.
+
+ \item{link}{
+ Link function applied to the \eqn{M} cumulative probabilities.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{parallel}{
+ A logical, or formula specifying which terms have
+ equal/unequal coefficients.
+
+ }
+ \item{reverse}{
+ Logical.
+ By default, the cumulative probabilities used are
+ \eqn{P(Y\leq 1)}{P(Y<=1)}, \eqn{P(Y\leq 2)}{P(Y<=2)},
+ \dots, \eqn{P(Y\leq M)}{P(Y<=M)}.
+ If \code{reverse} is \code{TRUE}, then
+ \eqn{P(Y\geq 2)}{P(Y>=2)}, \eqn{P(Y\geq 3)}{P(Y>=3)}, \dots,
+ \eqn{P(Y\geq M+1)}{P(Y>=M+1)} will be used.
+
+ This should be set to \code{TRUE} for \code{link=}
+ \code{\link{golf}},
+ \code{\link{polf}},
+ \code{\link{nbolf}}.
+ For these links the cutpoints must be an increasing sequence;
+ if \code{reverse=FALSE} for then the cutpoints must be an decreasing sequence.
+
+ }
+ \item{earg}{
+ List. Extra argument for the link function.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{mv}{
+ Logical.
+ Multivariate response? If \code{TRUE} then the input should be
+ a matrix with values \eqn{1,2,\dots,L}, where \eqn{L} is the
+ number of levels.
+ Each column of the matrix is a response, i.e., multivariate response.
+ A suitable matrix can be obtained from \code{Cut}.
+
+ }
+ \item{intercept.apply}{
+ Logical.
+ Whether the \code{parallel} argument should be applied to the intercept term.
+ This should be set to \code{TRUE} for \code{link=}
+ \code{\link{golf}},
+ \code{\link{polf}},
+ \code{\link{nbolf}}.
+
+ }
+}
+\details{
+ By default, the non-parallel cumulative logit model is fitted, i.e.,
+ \deqn{\eta_j = logit(P[Y \leq j])}{%
+ eta_j = logit(P[Y<=j])}
+ where \eqn{j=1,2,\dots,M} and
+ the \eqn{\eta_j}{eta_j} are not constrained to be parallel.
+ This is also known as the \emph{non-proportional odds model}.
+ If the logit link is replaced by a complementary log-log link
+ (\code{\link{cloglog}}) then
+ this is known as the \emph{proportional-hazards model}.
+
+ In almost all the literature, the constraint matrices associated
+ with this family of models are known. For example, setting
+ \code{parallel=TRUE} will make all constraint matrices (except for
+ the intercept) equal to a vector of \eqn{M} 1's.
+ If the constraint matrices are equal, unknown and to be estimated, then
+ this can be achieved by fitting the model as a
+ reduced-rank vector generalized
+ linear model (RR-VGLM; see \code{\link{rrvglm}}).
+ Currently, reduced-rank vector generalized additive models
+ (RR-VGAMs) have not been implemented here.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Dobson, A. J. (2001)
+\emph{An Introduction to Generalized Linear Models},
+2nd ed. Boca Raton: Chapman & Hall/CRC Press.
+
+McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+Simonoff, J. S. (2003)
+\emph{Analyzing Categorical Data},
+New York: Springer-Verlag.
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a matrix of counts (with row sums that
+ are all positive), or a factor. In both cases, the \code{y} slot
+ returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
+ of counts.
+
+ For a nominal (unordered) factor response, the multinomial
+ logit model (\code{\link{multinomial}}) is more appropriate.
+
+ With the logit link, setting \code{parallel=TRUE} will fit a
+ proportional odds model. Note that the \code{TRUE} here does
+ not apply to the intercept term.
+ In practice, the validity of the proportional odds
+ assumption needs to be checked, e.g., by a likelihood ratio test.
+ If acceptable on the data,
+ then numerical problems are less likely to occur during the fitting,
+ and there are less parameters. Numerical problems occur when
+ the linear/additive predictors cross, which results in probabilities
+ outside of (0,1); setting \code{parallel=TRUE} will help avoid
+ this problem.
+
+ Here is an example of the usage of the \code{parallel} argument.
+ If there are covariates \code{x1}, \code{x2} and \code{x3}, then
+ \code{parallel = TRUE ~ x1 + x2 -1} and
+ \code{parallel = FALSE ~ x3} are equivalent. This would constrain
+ the regression coefficients for \code{x1} and \code{x2} to be
+ equal; those of the intercepts and \code{x3} would be different.
+
+ In the future, this family function may be renamed to
+ ``\code{cups}'' (for \bold{cu}mulative \bold{p}robabilitie\bold{s})
+ or ``\code{cute}'' (for \bold{cu}mulative probabili\bold{t}i\bold{e}s).
+
+% Please let me know if you strongly agree or disagree about this.
+
+}
+\section{Warning }{
+ No check is made to verify that the response is ordinal.
+
+}
+
+\seealso{
+ \code{\link{acat}},
+ \code{\link{cratio}},
+ \code{\link{sratio}},
+ \code{\link{multinomial}},
+ \code{\link{pneumo}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}},
+ \code{\link{golf}},
+ \code{\link{polf}},
+ \code{\link{nbolf}}.
+
+}
+\examples{
+# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo))
+fit at y # Sample proportions
+weights(fit, type="prior") # Number of observations
+coef(fit, matrix=TRUE)
+constraints(fit) # Constraint matrices
+
+# Check that the model is linear in let
+fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df=2),
+ cumulative(reverse=TRUE), pneumo)
+\dontrun{
+plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+% pneumo$let = log(pneumo$exposure.time)
diff --git a/man/dagum.Rd b/man/dagum.Rd
new file mode 100644
index 0000000..6113cd1
--- /dev/null
+++ b/man/dagum.Rd
@@ -0,0 +1,109 @@
+\name{dagum}
+\alias{dagum}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Dagum Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 3-parameter
+ Dagum distribution.
+}
+\usage{
+dagum(link.a = "loge", link.scale = "loge", link.p = "loge",
+ init.a = NULL, init.scale = NULL, init.p = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale, link.p}{
+ Parameter link functions applied to the
+ (positive) parameters \code{a}, \code{scale}, and \code{p}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale, init.p}{
+ Optional initial values for \code{a}, \code{scale}, and \code{p}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2,3\} which correspond to
+ \code{a}, \code{scale}, \code{p}, respectively.
+
+ }
+}
+\details{
+ The 3-parameter Dagum distribution is the 4-parameter
+ generalized beta II distribution with shape parameter \eqn{q=1}.
+It is known under various other names, such as the Burr III, inverse Burr,
+beta-K, and 3-parameter kappa distribution.
+It can be considered a generalized log-logistic distribution.
+Some distributions which are special cases of the 3-parameter Dagum
+are the inverse Lomax (\eqn{a=1}), Fisk (\eqn{p=1}),
+and the inverse paralogistic (\eqn{a=p}).
+ More details can be found in Kleiber and Kotz (2003).
+
+
+The Dagum distribution has a cumulative distribution function
+ \deqn{F(y) = [1 + (y/b)^{-a}]^{-p}}{%
+ F(y) = [1 + (y/b)^(-a)]^(-p)}
+which leads to a probability density function
+ \deqn{f(y) = ap y^{ap-1} / [b^{ap} \{1 + (y/b)^a\}^{p+1}]}{%
+ f(y) = ap y^(ap-1) / [b^(ap) (1 + (y/b)^a)^(p+1)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and the others are shape parameters.
+The mean is
+ \deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(1 - 1/a) / \Gamma(p)}{%
+ E(Y) = b gamma(p + 1/a) gamma(1 - 1/a) / gamma(p)}
+provided \eqn{-ap < 1 < a}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+From Kleiber and Kotz (2003), the MLE is rather sensitive to isolated
+observations located sufficiently far from the majority of the data.
+Reliable estimation of the scale parameter require \eqn{n>7000},
+while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
+\eqn{n>2000} or 3000.
+
+}
+
+\seealso{
+ \code{\link{Dagum}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rdagum(n=3000, 4, 6, 2)
+fit = vglm(y ~ 1, dagum, trace=TRUE)
+fit = vglm(y ~ 1, dagum(init.a=2.1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/dcnormal1.Rd b/man/dcnormal1.Rd
new file mode 100644
index 0000000..83ac6bd
--- /dev/null
+++ b/man/dcnormal1.Rd
@@ -0,0 +1,98 @@
+\name{dcnormal1}
+\alias{dcnormal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Univariate Normal Distribution with Double Censoring }
+\description{
+ Maximum likelihood estimation of the two parameters of a univariate
+ normal distribution when there is double censoring.
+
+}
+\usage{
+dcnormal1(r1 = 0, r2 = 0, link.sd = "loge", isd = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{r1, r2}{
+ Integers. Number of smallest and largest values censored, respectively.
+
+ }
+ \item{link.sd}{
+ Parameter link function applied to the standard deviation.
+ See \code{\link{Links}} for more choices.
+ Being a positive quantity, a log link is the default.
+
+ }
+ \item{isd}{
+ Numeric. Initial value for the standard deviation.
+ The default value \code{NULL} means an initial value is
+ obtained internally from the data.
+
+ }
+ \item{zero}{
+ An integer with value 1 or 2. If so, the mean or standard deviation
+ respectively are modelled as an intercept only. Usually, setting
+ \code{zero=2} will be used, if used at all. The default value
+ \code{NULL} means both linear/additive predictors are modelled as
+ functions of the explanatory variables.
+
+ }
+}
+\details{
+ This family function uses the Fisher information matrix given in Harter
+ and Moore (1966). The matrix is not diagonal if either \code{r1}
+ or \code{r2} are positive.
+
+ By default, the mean is the first linear/additive predictor and the
+ log of the standard deviation is the second linear/additive predictor.
+
+} \value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Harter, H. L. and Moore, A. H. (1966) Iterative maximum-likelihood
+ estimation of the parameters of normal populations from singly and
+ doubly censored samples. \emph{Biometrika}, \bold{53}, 205--213.
+}
+\author{ T. W. Yee }
+\note{
+ This family function only handles a vector or one-column matrix
+ response. The \code{weights} argument, if used, are interpreted as
+ frequencies, therefore it must be a vector with positive integer values.
+
+ With no censoring at all (the default), it is better (and equivalent)
+ to use \code{\link{normal1}}.
+
+}
+
+\seealso{
+\code{\link{normal1}},
+\code{\link{tobit}}.
+}
+\examples{\dontrun{# Repeat the simulations described in Harter and Moore (1966)
+SIMS = 100 # Number of simulations (change this to 1000)
+mu.save = sd.save = rep(NA, len=SIMS)
+r1 = 0; r2 = 4; n = 20
+for(sim in 1:SIMS) {
+ y = sort(rnorm(n))
+ y = y[(1+r1):(n-r2)] # Delete r1 smallest and r2 largest
+ fit = vglm(y ~ 1, dcnormal1(r1=r1, r2=r2))
+ mu.save[sim] = predict(fit)[1,1]
+ sd.save[sim] = exp(predict(fit)[1,2]) # Assumes a log link and ~ 1
+}
+# Now look at the results
+c(mean(mu.save), mean(sd.save)) # Should be c(0,1)
+c(sd(mu.save), sd(sd.save))
+}
+
+# Data from Sarhan and Greenberg (1962); MLEs are mu=9.2606, sd=1.3754
+strontium90 = c(8.2, 8.4, 9.1, 9.8, 9.9)
+fit = vglm(strontium90 ~ 1, dcnormal1(r1=2, r2=3, isd=6), trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
new file mode 100644
index 0000000..2e00748
--- /dev/null
+++ b/man/deplot.lmscreg.Rd
@@ -0,0 +1,89 @@
+\name{deplot.lmscreg}
+\alias{deplot.lmscreg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Density Plot for LMS Quantile Regression }
+\description{
+ Plots a probability density function
+ associated with a LMS quantile regression.
+}
+\usage{
+deplot.lmscreg(object, newdata = NULL,
+ x0, y.arg, plot.it = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A \pkg{VGAM} quantile regression model, i.e.,
+ an object produced by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}} with a family function beginning with
+ \code{"lms."}, e.g., \code{\link{lms.yjn}}.
+ }
+ \item{newdata}{ Optional data frame containing secondary variables such
+ as sex. It should have a maximum of one row.
+ The default is to use the original data.
+ }
+ \item{x0}{ Numeric. The value of the primary variable at which to
+ make the `slice'.
+ }
+ \item{y.arg}{ Numerical vector. The values of the response variable
+ at which to evaluate the density. This should be a grid that is fine
+ enough to ensure the plotted curves are smooth. }
+ \item{plot.it}{ Logical. Plot it? If \code{FALSE} no plot will
+ be done. }
+ \item{\dots}{ Graphical parameter that are passed into
+ \code{\link{plotdeplot.lmscreg}}.
+ }
+}
+\details{
+ This function calls, e.g., \code{deplot.lms.yjn} in order to compute
+ the density function.
+}
+\value{
+ The original \code{object} but with a list
+ placed in the slot \code{post}, called
+ \code{@post$deplot}. The list has components
+ \item{newdata }{ The argument \code{newdata} above, or a one-row
+ data frame constructed out of the \code{x0} argument. }
+ \item{y}{ The argument \code{y.arg} above. }
+ \item{density}{ Vector of the density function values evaluated at \code{y.arg}. }
+}
+\references{
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ \code{\link{plotdeplot.lmscreg}} actually does the plotting.
+}
+\seealso{
+\code{\link{plotdeplot.lmscreg}},
+\code{\link{qtplot.lmscreg}},
+\code{\link{lms.bcn}},
+\code{\link{lms.bcg}},
+\code{\link{lms.yjn}}.
+}
+
+\examples{\dontrun{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+ygrid = seq(15, 43, by=0.25)
+deplot(fit, x0=20, y=ygrid, xlab="BMI", col="green", llwd=2,
+ main="BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
+deplot(fit, x0=40, y=ygrid, add=TRUE, col="blue", llwd=2)
+deplot(fit, x0=60, y=ygrid, add=TRUE, col="red", llwd=2) -> a
+
+names(a at post$deplot)
+a at post$deplot$newdata
+a at post$deplot$y[1:5]
+a at post$deplot$density[1:5]
+}
+}
+\keyword{hplot}
+\keyword{models}
+\keyword{regression}
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
new file mode 100644
index 0000000..e3d7cc0
--- /dev/null
+++ b/man/dirichlet.Rd
@@ -0,0 +1,119 @@
+\name{dirichlet}
+\alias{dirichlet}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fitting a Dirichlet Distribution }
+\description{
+ Fits a Dirichlet distribution to a matrix of compositions.
+
+}
+\usage{
+dirichlet(link = "loge", zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response is assumed to be a \eqn{M}-column
+ matrix with positive values and whose rows each sum to unity. Such
+ data can be thought of as compositional data.
+ There are \eqn{M} linear/additive predictors \eqn{\eta_j}{eta_j}.
+
+ \item{link}{
+ Link function applied to each of the \eqn{M} (positive) shape
+ parameters \eqn{\alpha_j}{alpha_j}.
+ See \code{\link{Links}} for more choices.
+ The default gives \eqn{\eta_j=\log(\alpha_j)}{eta_j=log(alpha_j)}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose values from the set \eqn{\{1,2,\ldots,M\}}.
+
+ }
+}
+\details{
+ The Dirichlet distribution is commonly used to model compositional
+ data, including applications in genetics.
+ Suppose \eqn{(Y_1,\ldots,Y_{M})^T}{(Y_1,\ldots,Y_M)^T} is
+ the response. Then it has a Dirichlet distribution if
+ \eqn{(Y_1,\ldots,Y_{M-1})^T}{(Y_1,\ldots,Y_{M-1})^T} has density
+ \deqn{\frac{\Gamma(\alpha_{+})}
+ {\prod_{j=1}^{M} \Gamma(\alpha_{j})}
+ \prod_{j=1}^{M} y_j^{\alpha_{j} -1}}{%
+ (Gamma(alpha_+) / prod_{j=1}^M gamma(alpha_j))
+ prod_{j=1}^M y_j^(alpha_j -1)}
+ where \eqn{\alpha_+=\alpha_1+\cdots+\alpha_M}{alpha_+= alpha_1 + \dots + alpha_M},
+ \eqn{\alpha_j > 0}{alpha_j > 0},
+ and the density is defined on the unit simplex
+ \deqn{\Delta_{M} = \left\{
+ (y_1,\ldots,y_{M})^T :
+ y_1 > 0, \ldots, y_{M} > 0,
+ \sum_{j=1}^{M} y_j = 1 \right\}. }{%
+ Delta_M = {
+ (y_1,\ldots,y_M)^T :
+ y_1 > 0, \dots, y_M > 0,
+ \sum_{j=1}^M y_j = 1 }.
+ }
+ One has \eqn{E(Y_j) = \alpha_j / \alpha_{+}}{E(Y_j) = alpha_j /
+ alpha_{+}}, which are returned as the fitted values.
+ For this distribution Fisher scoring corresponds to Newton-Raphson.
+
+ The Dirichlet distribution can be motivated by considering the random variables
+ \eqn{(G_1,\ldots,G_{M})^T}{(G_1,\ldots,G_M)^T} which are each independent
+ and identically distributed as a gamma distribution with density
+ \eqn{f(g_j)=g_j^{\alpha_j - 1} e^{-g_j} / \Gamma(\alpha_j)}{f(g_j)=
+ g_j^(alpha_j - 1) e^(-g_j) / gamma(alpha_j)}.
+ Then the Dirichlet distribution arises when
+ \eqn{Y_j=G_j / (G_1 + \cdots + G_M)}{Y_j = G_j / (G_1 + ... + G_M)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ \eqn{M}-column matrix of means.
+
+}
+\references{
+Lange, K. (2002)
+\emph{Mathematical and Statistical Methods for Genetic Analysis},
+2nd ed. New York: Springer-Verlag.
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+%Documentation accompanying the \pkg{VGAM} package at
+%\url{http://www.stat.auckland.ac.nz/~yee}
+%contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be a matrix of positive values whose rows
+ each sum to unity. Similar to this is count data, where probably a
+ multinomial logit model (\code{\link{multinomial}}) may be appropriate.
+ Another similar distribution to the Dirichlet is the
+ Dirichlet-multinomial (see \code{\link{dirmultinomial}}).
+
+}
+
+\seealso{
+ \code{\link{rdiric}},
+ \code{\link{dirmultinomial}},
+ \code{\link{multinomial}}.
+
+}
+\examples{
+y = rdiric(n=1000, shape=c(3, 1, 4))
+fit = vglm(y ~ 1, dirichlet, trace = TRUE, crit="c")
+Coef(fit)
+coef(fit, matrix=TRUE)
+fitted(fit)[1:2,]
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
new file mode 100644
index 0000000..3859480
--- /dev/null
+++ b/man/dirmul.old.Rd
@@ -0,0 +1,146 @@
+\name{dirmul.old}
+\alias{dirmul.old}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Fitting a Dirichlet-Multinomial Distribution }
+\description{
+ Fits a Dirichlet-multinomial distribution to a matrix of
+ non-negative integers.
+}
+\usage{
+dirmul.old(link = "loge", init.alpha = 0.01, parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to each of the \eqn{M} (positive)
+ shape parameters \eqn{\alpha_j}{alpha_j} for \eqn{j=1,\ldots,M}.
+ See \code{\link{Links}} for more choices.
+ Here, \eqn{M} is the number of columns of the response matrix.
+
+ }
+ \item{init.alpha}{
+ Numeric vector. Initial values for the
+ \code{alpha} vector. Must be positive.
+ Recycled to length \eqn{M}.
+
+ }
+ \item{parallel}{
+ A logical, or formula specifying which terms have equal/unequal
+ coefficients.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\}.
+
+ }
+}
+% formula is p.49 of Lange 2002.
+\details{
+ The Dirichlet-multinomial distribution, which is somewhat similar to a
+ Dirichlet distribution, has probability function
+ \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) =
+ {2y_{*} \choose {y_1,\ldots,y_M}}
+ \frac{\Gamma(\alpha_{+})}{\Gamma(2y_{*}+\alpha_{+})}
+ \prod_{j=1}^M \frac{\Gamma(y_j+\alpha_{j})}{\Gamma(\alpha_{j})}}{%
+ P(Y_1=y_1,\ldots,Y_M=y_M) = C_{y_1,\ldots,y_M}^{2y_{*}}
+ Gamma(alpha_+) / Gamma( 2y_* + alpha_+)
+ prod_{j=1}^M [ Gamma( y_j+ alpha_j) / Gamma( alpha_j)]}
+ for \eqn{\alpha_j > 0}{alpha_j > 0},
+ \eqn{\alpha_+ = \alpha_1 + \cdots + \alpha_M}{alpha_+ = alpha_1 + \cdots + alpha_M},
+ and \eqn{2y_{*} = y_1 + \cdots + y_M}{2y_* = y_1 + \cdots + y_M}.
+ Here, \eqn{a \choose b}{C_b^a} means ``\eqn{a} choose \eqn{b}'' and
+ refers to combinations (see \code{\link[base]{choose}}).
+The (posterior) mean is
+ \deqn{E(Y_j) = (y_j + \alpha_j) / (2y_{*} + \alpha_{+})}{%
+ E(Y_j) = (y_j + alpha_j) / (2y_{*} + alpha_+)}
+ for \eqn{j=1,\ldots,M}{j=1,\ldots,M}, and these are returned as
+ the fitted values as a \eqn{M}-column matrix.
+
+
+% One situation that arises for the Dirichlet-multinomial distribution
+% is a locus with M codominant alleles. If in a sample of y_* people,
+% allele i appears y_j times, then the maximum likelihood estimate of
+% the ith allele frequency is y_j / (2y_*).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Lange, K. (2002)
+\emph{Mathematical and Statistical Methods for Genetic Analysis},
+2nd ed. New York: Springer-Verlag.
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+Paul, S. R., Balasooriya, U. and Banerjee, T. (2005)
+Fisher information matrix of the Dirichlet-multinomial distribution.
+\emph{Biometrical Journal}, \bold{47}, 230--236.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ The response should be a matrix of non-negative values.
+ Convergence seems to slow down if there are zero values.
+ Currently, initial values can be improved upon.
+
+ This function is almost defunct and may be withdrawn soon.
+ Use \code{\link{dirmultinomial}} instead.
+
+}
+
+\seealso{
+ \code{\link{dirmultinomial}},
+ \code{\link{dirichlet}},
+ \code{\link{betabin.ab}},
+ \code{\link{multinomial}}.
+}
+
+\examples{
+# Data from p.50 of Lange (2002)
+alleleCounts = c(2, 84, 59, 41, 53, 131, 2, 0,
+ 0, 50, 137, 78, 54, 51, 0, 0,
+ 0, 80, 128, 26, 55, 95, 0, 0,
+ 0, 16, 40, 8, 68, 14, 7, 1)
+dim(alleleCounts) = c(8, 4)
+alleleCounts = data.frame(t(alleleCounts))
+dimnames(alleleCounts) = list(c("White","Black","Chicano","Asian"),
+ paste("Allele", 5:12, sep=""))
+
+set.seed(123) # @initialize uses random numbers
+fit = vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
+ Allele10,Allele11,Allele12) ~ 1, dirmul.old,
+ trace=TRUE, crit="c", data=alleleCounts)
+
+(sfit = summary(fit))
+vcov(sfit)
+round(eta2theta(coef(fit), fit at misc$link), dig=2) # not preferred
+round(Coef(fit), dig=2) # preferred # preferred
+round(t(fitted(fit)), dig=4) # 2nd row of Table 3.5 of Lange (2002)
+coef(fit, matrix=TRUE)
+
+
+pfit = vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
+ Allele10,Allele11,Allele12) ~ 1,
+ dirmul.old(parallel=TRUE), trace=TRUE,
+ data=alleleCounts)
+round(eta2theta(coef(pfit), pfit at misc$link), dig=2) # not preferred
+round(Coef(pfit), dig=2) # preferred
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
new file mode 100644
index 0000000..6ff1949
--- /dev/null
+++ b/man/dirmultinomial.Rd
@@ -0,0 +1,182 @@
+\name{dirmultinomial}
+\alias{dirmultinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Fitting a Dirichlet-Multinomial Distribution }
+\description{
+ Fits a Dirichlet-multinomial distribution to a matrix response.
+
+}
+\usage{
+dirmultinomial(lphi="logit", iphi = 0.10, parallel= FALSE, zero="M")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lphi}{
+ Link function applied to the \eqn{\phi}{phi}
+ parameter, which lies in the open unit interval \eqn{(0,1)}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iphi}{
+ Numeric. Initial value for \eqn{\phi}{phi}.
+ Must be in the open unit interval \eqn{(0,1)}.
+ If a failure to converge occurs try assigning this argument a different
+ value.
+
+ }
+ \item{parallel}{
+ A logical (formula not allowed here) indicating whether the
+ probabilities \eqn{\pi_1,\ldots,\pi_{M-1}}{pi_1,\ldots,pi_{M-1}}
+ are to be equal via equal coefficients.
+ Note \eqn{\pi_M}{pi_M} will generally be different from the
+ other probabilities.
+ Setting \code{parallel=TRUE} will only work if you also set
+ \code{zero=NULL} because of interference between these arguments
+ (with respect to the intercept term).
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \eqn{\{1,2,\ldots,M\}}.
+ If the character \code{"M"} then this means the numerical value
+ \eqn{M}, which corresponds to linear/additive predictor associated
+ with \eqn{\phi}{phi}.
+ Setting \code{zero=NULL} means none of the values from
+ the set \eqn{\{1,2,\ldots,M\}}.
+
+ }
+}
+\details{
+ The Dirichlet-multinomial distribution
+ arises from a multinomial distribution where
+ the probability parameters are not constant but are generated from a
+ multivariate distribution called the Dirichlet distribution.
+ The Dirichlet-multinomial distribution has probability function
+ \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) =
+ {N_{*} \choose {y_1,\ldots,y_M}}
+ \frac{
+ \prod_{j=1}^{M}
+ \prod_{r=1}^{y_{j}}
+ (\pi_j (1-\phi) + (r-1)\phi)}{
+ \prod_{r=1}^{N_{*}}
+ (1-\phi + (r-1)\phi)}}{%
+ P(Y_1=y_1,\ldots,Y_M=y_M) = C_{y_1,\ldots,y_M}^{N_{*}}
+ prod_{j=1}^{M}
+ prod_{r=1}^{y_{j}}
+ (pi_j (1-phi) + (r-1)phi) /
+ prod_{r=1}^{N_{*}}
+ (1-phi + (r-1)phi)}
+ where \eqn{\phi}{phi} is the \emph{over-dispersion} parameter
+ and \eqn{N_{*} = y_1+\cdots+y_M}{N_* = y_1+\cdots+y_M}.
+ Here, \eqn{a \choose b}{C_b^a} means ``\eqn{a} choose \eqn{b}'' and
+ refers to combinations (see \code{\link[base]{choose}}).
+ The above formula applies to each row of the matrix response.
+ In this \pkg{VGAM} family function the first \eqn{M-1} linear/additive
+ predictors correspond to the first \eqn{M-1} probabilities via
+ \deqn{\eta_j = \log(P[Y=j]/ P[Y=M]) = \log(\pi_j/\pi_M)}{%
+ eta_j = log(P[Y=j]/ P[Y=M]) = log(pi_j/pi_M)}
+ where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor
+ (\eqn{\eta_M=0}{eta_M=0} by definition for \eqn{P[Y=M]} but not for
+ \eqn{\phi}{phi})
+ and
+ \eqn{j=1,\ldots,M-1}.
+ The \eqn{M}th linear/additive predictor corresponds to \code{lphi}
+ applied to \eqn{\phi}{phi}.
+
+ Note that \eqn{E(Y_j) = N_* \pi_j}{E(Y_j) = N_* pi_j} but
+ the probabilities (returned as the fitted values)
+ \eqn{\pi_j}{pi_j} are bundled together as a \eqn{M}-column matrix.
+ The quantities \eqn{N_*} are returned as the prior weights.
+
+ The beta-binomial distribution is a special case of
+ the Dirichlet-multinomial distribution when \eqn{M=2};
+ see \code{\link{betabinomial}}. It is easy to show that
+ the first shape parameter of the beta distribution is
+ \eqn{shape1=\pi(1/\phi-1)}{shape1=pi*(1/phi-1)} and the second shape
+ parameter is \eqn{shape2=(1-\pi)(1/\phi-1)}{shape2=(1-pi)*(1/phi-1)}.
+ Also, \eqn{\phi=1/(1+shape1+shape2)}{phi=1/(1+shape1+shape2)}, which
+ is known as the \emph{intra-cluster correlation} coefficient.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+ If the model is an intercept-only model then \code{@misc} (which is a
+ list) has a component called \code{shape} which is a vector with the
+ \eqn{M} values \eqn{\pi_j(1/\phi-1)}{pi_j * (1/phi-1)}.
+
+% zz not sure: These are the shape parameters of the underlying
+% Dirichlet distribution.
+
+}
+\references{
+
+Paul, S. R., Balasooriya, U. and Banerjee, T. (2005)
+Fisher information matrix of the Dirichlet-multinomial distribution.
+\emph{Biometrical Journal}, \bold{47}, 230--236.
+
+}
+\author{ Thomas W. Yee }
+
+\section{Warning }{
+ This \pkg{VGAM} family function is prone to numerical problems,
+ especially when there are covariates.
+
+}
+\note{
+ The response can be a matrix of non-negative integers, or else
+ a matrix of sample proportions and the total number of counts in
+ each row specified using the \code{weights} argument.
+ This dual input option is similar to \code{\link{multinomial}}.
+
+ To fit a `parallel' model with the \eqn{\phi}{phi} parameter
+ being an intercept-only you will need to use the \code{constraints}
+ argument.
+
+ Currently, Fisher scoring is implemented. To compute the expected
+ information matrix a \code{for} loop is used; this may be very slow
+ when the counts are large.
+ Additionally, convergence may be slower than usual due to round-off
+ error when computing the expected information matrices.
+
+}
+
+\seealso{
+ \code{\link{dirmul.old}},
+ \code{\link{betabinomial}},
+ \code{\link{betabin.ab}},
+ \code{\link{dirichlet}},
+ \code{\link{multinomial}}.
+}
+
+\examples{
+n = 10
+M = 5
+y = round(matrix(runif(n*M)*10, n, M)) # Integer counts
+fit = vglm(y ~ 1, dirmultinomial, trace=TRUE)
+fitted(fit)[1:2,]
+fit at y # Sample proportions
+weights(fit, type="prior", matrix=FALSE) # Total counts per row
+
+x = runif(n)
+fit = vglm(y ~ x, dirmultinomial, trace=TRUE)
+\dontrun{
+Coef(fit) # This does not work
+}
+coef(fit, matrix=TRUE)
+(sfit = summary(fit))
+vcov(sfit)
+}
+\keyword{models}
+\keyword{regression}
+
+
+% zz \eqn{\alpha_j = P[Y=j] \times (1/\phi - 1)}{alpha_j = P[Y=j] *
+% (1/phi - 1)} are the shape parameters,
+% for \eqn{j=1,\ldots,M}.
+
+% Currently, initial values can be improved upon.
diff --git a/man/dzeta.Rd b/man/dzeta.Rd
new file mode 100644
index 0000000..0bd5835
--- /dev/null
+++ b/man/dzeta.Rd
@@ -0,0 +1,70 @@
+\name{Zeta}
+\alias{Zeta}
+\alias{dzeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{The Zeta Distribution }
+\description{
+ Density for the zeta distribution.
+}
+% zz p is not a good argument name, esp. with qzeta(p, p)
+\usage{
+dzeta(x, p)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{ Numerical vector/matrix to evaluate the density. }
+ \item{p}{ The parameter \eqn{p}. This must be greater than 1. }
+}
+\details{
+ The density function of the zeta distribution is given by
+ \deqn{y^{-p} / \zeta(p)}{%
+ y^(-p) / zeta(p)}
+ where \eqn{p>1}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is
+ Riemann's zeta function.
+
+}
+\value{
+ Returns the density evaluated at \code{x}.
+}
+\references{
+
+Johnson N. L., Kotz S., and Balakrishnan N. (1993)
+\emph{Univariate Discrete Distributions},
+2nd ed.
+New York: Wiley.
+
+% Lindsey, J. K. (2002zz)
+% \emph{Applied Statistical Modelling}, 2nd ed.
+% London: Chapman & Hall.zz
+
+% Knight, K. (2002zz)
+% Theory book.
+% London: Chapman & Hall.zz
+
+}
+\author{ T. W. Yee }
+\note{
+ The \pkg{VGAM} family function \code{\link{zetaff}} estimates the
+ parameter \eqn{p}.
+
+}
+
+\section{Warning}{
+ This function has not been fully tested.
+}
+
+\seealso{
+ \code{\link{zeta}},
+ \code{\link{zetaff}}.
+}
+
+\examples{
+dzeta(1:20, p=2)
+\dontrun{
+plot(1:6, dzeta(1:6, p=4), type="h", las=1, ylab="Probability",
+ main="zeta probability function; black: p = 4; blue: p = 2")
+points(0.10+1:6, dzeta(1:6, p=2), type="h", col="blue")
+}
+}
+\keyword{distribution}
+
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
new file mode 100644
index 0000000..f782a23
--- /dev/null
+++ b/man/enzyme.Rd
@@ -0,0 +1,45 @@
+\name{enzyme}
+\alias{enzyme}
+\docType{data}
+\title{ Enzyme data}
+\description{
+ Enzyme velocity and substrate concentration.
+}
+\usage{data(enzyme)}
+\format{
+ A data frame with 12 observations on the following 2 variables.
+ \describe{
+ \item{conc}{a numeric explanatory vector; substrate concentration}
+ \item{velocity}{a numeric response vector; enzyme velocity}
+ }
+}
+\details{
+ Sorry, more details need to be included later.
+}
+\source{
+ Sorry, more details need to be included later.
+}
+\references{
+Watts, D. G. (1981)
+ An introduction to nonlinear least squares.
+ In: L. Endrenyi (Ed.),
+ \emph{Kinetic Data Analysis: Design and Analysis of Enzyme and
+ Pharmacokinetic Experiments}, pp.1--24.
+ New York: Plenum Press.
+}
+\seealso{
+\code{\link{micmen}}.
+}
+\examples{
+data(enzyme)
+fit = vglm(velocity ~ 1, micmen, enzyme, trace = TRUE, crit = "c",
+ regressor = enzyme$conc)
+\dontrun{attach(enzyme)
+plot(conc, velocity, xlab="concentration", las=1, main="enzyme data")
+lines(conc, fitted(fit), col="blue")
+detach(enzyme)
+}
+
+summary(fit)
+}
+\keyword{datasets}
diff --git a/man/erf.Rd b/man/erf.Rd
new file mode 100644
index 0000000..05a959b
--- /dev/null
+++ b/man/erf.Rd
@@ -0,0 +1,51 @@
+\name{erf}
+\alias{erf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Error Function }
+\description{
+ Computes the error function based on the normal distribution.
+}
+\usage{
+erf(x)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{ Numeric. }
+}
+\details{
+ \eqn{Erf(x)} is defined as
+ \deqn{Erf(x) = \frac{2}{\sqrt{\pi}} \int_0^x \exp(-t^2) dt}{%
+ Erf(x) = (2/sqrt(pi)) int_0^x exp(-t^2) dt}
+ so that it is closely related to \code{\link[stats]{pnorm}}.
+}
+\value{
+ Returns the value of the function evaluated at \code{x}.
+}
+\references{
+Abramowitz, M. and Stegun, I. A. (1972)
+\emph{Handbook of Mathematical Functions with Formulas,
+ Graphs, and Mathematical Tables},
+New York: Dover Publications Inc.
+}
+\author{ T. W. Yee}
+\note{
+ Some authors omit the term \eqn{2/\sqrt{\pi}}{2/sqrt(pi)} from the
+ definition of \eqn{Erf(x)}. Although defined for complex
+ arguments, this function only works for real
+ arguments.
+
+ The \emph{complementary error function} \eqn{erfc(x)} is defined
+ as \eqn{1-erf(x)}, and is implemented by \code{erfc}.
+}
+
+\seealso{ \code{\link[stats]{pnorm}}. }
+\examples{
+\dontrun{
+x = seq(-3, 3, len=200)
+plot(x, erf(x), type="l", col="red", las=1, lwd=2,
+ main="red is erf(x), blue is pnorm(x)")
+abline(v=0, h=0, lty="dashed", col="black")
+lines(x, pnorm(x), col="blue", lty="dotted", lwd=2)
+}
+}
+\keyword{math}
diff --git a/man/erlang.Rd b/man/erlang.Rd
new file mode 100644
index 0000000..f87e050
--- /dev/null
+++ b/man/erlang.Rd
@@ -0,0 +1,95 @@
+\name{erlang}
+\alias{erlang}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Erlang Distribution }
+\description{
+ Estimates the scale parameter of the Erlang distribution
+ by maximum likelihood estimation.
+}
+\usage{
+erlang(shape.arg, link = "loge", method.init = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{shape.arg}{
+ The shape parameter.
+ The user must specify a positive integer.
+
+ }
+ \item{link}{
+ Link function applied to the (positive) \eqn{scale} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value.
+
+ }
+}
+\details{
+ The Erlang distribution is a special case of the gamma distribution
+ with \emph{shape} that is a positive integer. If \code{shape.arg=1}
+ then it simplifies to the exponential distribution. As illustrated
+ in the example below, the Erlang distribution is the distribution of
+ the sum of \code{shape.arg} independent and identically distributed
+ exponential random variates.
+
+ The probability density function of the Erlang
+ distribution is given by
+ \deqn{f(y) = \exp(-y/scale) y^{shape-1} scale^{-shape} / \Gamma(shape)}{%
+ f(y) = exp(-y/scale) y^(shape-1) scale^(-shape) / gamma(shape)}
+ for known positive integer \eqn{shape},
+ unknown \eqn{scale > 0} and \eqn{y > 0}.
+ Here,
+ \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
+ function, as in \code{\link[base:Special]{gamma}}.
+ The mean of \emph{Y}
+ is \eqn{\mu=shape \times scale}{mu=shape*scale} and
+ its variance is \eqn{shape \times scale^2}{shape*scale^2}.
+ The linear/additive predictor, by default, is
+ \eqn{\eta=\log(scale)}{eta=log(scale)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Most standard texts on statistical distributions describe
+ this distribution, e.g.,
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+
+\note{
+ The \code{rate} parameter found in \code{\link{gamma2.ab}}
+ is \code{1/scale} here---see also \code{\link[stats]{rgamma}}.
+
+}
+
+\seealso{
+ \code{\link{gamma2.ab}},
+ \code{\link{exponential}}.
+}
+\examples{
+n = 200; rate = 8
+y = rep(0, n)
+for(i in 1:3)
+ y = y + rexp(n, rate=rate)
+fit = vglm(y ~ 1, erlang(shape=3), trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit) # Answer = 1/rate
+1/rate
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/expexp.Rd b/man/expexp.Rd
new file mode 100644
index 0000000..9d2ec76
--- /dev/null
+++ b/man/expexp.Rd
@@ -0,0 +1,158 @@
+\name{expexp}
+\alias{expexp}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Exponentiated Exponential Distribution }
+\description{
+ Estimates the two parameters of the exponentiated exponential
+ distribution by maximum likelihood estimation.
+
+}
+\usage{
+expexp(lshape = "loge", lscale = "loge",
+ ishape = 1.1, iscale = NULL,
+ tolerance = 1.0e-6, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link functions for the
+ \eqn{\alpha}{shape} and \eqn{\lambda}{scale} parameters.
+ See \code{\link{Links}} for more choices.
+ The defaults ensure both parameters are positive.
+
+ }
+ \item{ishape}{
+ Initial value for the \eqn{\alpha}{shape}
+ parameter. If convergence fails try setting a different
+ value for this argument.
+
+ }
+ \item{iscale}{
+ Initial value for the \eqn{\lambda}{scale} parameter.
+ By default, an initial value is chosen internally using
+ \code{ishape}.
+
+ }
+ \item{tolerance}{
+ Numeric. Small positive value for testing whether values
+ are close enough to 1 and 2.
+
+ }
+ \item{zero}{ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose one value from the set \{1,2\}.
+
+ }
+}
+\details{
+ The exponentiated exponential distribution is an alternative
+ to the Weibull and the gamma distributions.
+ The formula for the density is
+ \deqn{f(y;\alpha,\lambda) =
+ \alpha \lambda (1-\exp(-\lambda y))^{\alpha-1}
+ \exp(-\lambda y) }{%
+ f(y;shape,scale) =
+ shape scale (1-\exp(-scale y))^(shape-1)
+ \exp(-scale y) }
+ where \eqn{y>0},
+ \eqn{\alpha>0}{shape>0} and
+ \eqn{\lambda>0}{scale>0}.
+ The mean of \eqn{Y} is
+ \eqn{(\psi(\alpha+1)-\psi(1))/\lambda}{(psi(shape+1)-psi(1))/scale}
+ (returned as the fitted values)
+ where \eqn{\psi}{psi} is the digamma function.
+ The variance of \eqn{Y} is
+ \eqn{(\psi'(1)-\psi'(\alpha+1))/\lambda^2}{(psi'(1)-psi'(shape+1))/
+ scale^2}
+ where \eqn{\psi'}{psi'} is the trigamma function.
+
+ This distribution has been called the two-parameter generalized
+ exponential distribution by Gupta and Kundu (2006).
+ A special case of the exponentiated exponential distribution:
+ \eqn{\alpha=1}{shape=1} is the exponential distribution.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Gupta, R. D. and Kundu, D. (2001)
+ Exponentiated exponential family: an alternative to
+ gamma and Weibull distributions,
+ \emph{Biometrical Journal},
+ \bold{43},
+ 117--130.
+
+ Gupta, R. D. and Kundu, D. (2006)
+ On the comparison of Fisher information of the
+ Weibull and GE distributions,
+ \emph{Journal of Statistical Planning and Inference},
+ \bold{136},
+ 3130--3144.
+
+}
+\author{ T. W. Yee }
+\note{
+ Fisher scoring is used, however, convergence is usually very slow.
+ This is a good sign that there is a bug, but I have yet to check
+ that the expected information is correct.
+ Also, I have yet to implement Type-I right censored data using the
+ results of Gupta and Kundu (2006).
+
+ Another algorithm for fitting this model is implemented in
+ \code{\link{expexp1}}.
+
+}
+\section{Warning }{
+ Practical experience shows that reasonably good initial values really
+ helps. In particular, try setting different values for the \code{ishape}
+ argument if numerical problems are encountered or failure to convergence
+ occurs. Even if convergence occurs try perturbing the initial value
+ to make sure the global solution is obtained and not a local solution.
+ The algorithm may fail if the estimate of the shape parameter is
+ too close to unity.
+
+}
+
+\seealso{
+ \code{\link{expexp1}},
+ \code{\link{gamma2.ab}},
+ \code{\link{weibull}}.
+}
+\examples{
+# A special case: exponential data
+y = rexp(n <- 1000)
+fit = vglm(y ~ 1, fam=expexp, trace=TRUE, maxit=99)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+
+# Ball bearings data (number of million revolutions before failure)
+bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
+68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
+128.04, 173.40)
+fit = vglm(bbearings ~ 1, fam=expexp(iscale=0.05, ish=5),
+ trace=TRUE, maxit=300)
+coef(fit, matrix=TRUE)
+Coef(fit) # Authors get c(shape=5.2589, scale=0.0314)
+logLik(fit) # Authors get -112.9763
+
+
+# Failure times of the airconditioning system of an airplane
+acplane = c(23, 261, 87, 7, 120, 14, 62, 47,
+225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
+71, 11, 14, 11, 16, 90, 1, 16, 52, 95)
+fit = vglm(acplane ~ 1, fam=expexp(ishape=0.8, isc=0.15),
+ trace=TRUE, maxit=99)
+coef(fit, matrix=TRUE)
+Coef(fit) # Authors get c(shape=0.8130, scale=0.0145)
+logLik(fit) # Authors get log-lik -152.264
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/expexp1.Rd b/man/expexp1.Rd
new file mode 100644
index 0000000..e391b08
--- /dev/null
+++ b/man/expexp1.Rd
@@ -0,0 +1,112 @@
+\name{expexp1}
+\alias{expexp1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Exponentiated Exponential Distribution }
+\description{
+ Estimates the two parameters of the exponentiated exponential
+ distribution by maximizing a profile (concentrated) likelihood.
+}
+\usage{
+expexp1(lscale = "loge", iscale = NULL, ishape = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lscale}{
+ Parameter link function for the (positive) \eqn{\lambda}{scale} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iscale}{
+ Initial value for the \eqn{\lambda}{scale} parameter.
+ By default, an initial value is chosen internally using \code{ishape}.
+
+ }
+ \item{ishape}{
+ Initial value for the \eqn{\alpha}{shape} parameter. If convergence
+ fails try setting a different value for this argument.
+
+ }
+}
+\details{
+ See \code{\link{expexp}} for details about the
+ exponentiated exponential distribution. This
+ family function uses a different algorithm for fitting
+ the model. Given \eqn{\lambda}{scale}, the MLE of
+ \eqn{\alpha}{shape} can easily be solved in terms of
+ \eqn{\lambda}{scale}. This family function maximizes
+ a profile (concentrated) likelihood with respect to \eqn{\lambda}{scale}.
+ Newton-Raphson is used, which compares with Fisher scoring
+ with \code{\link{expexp}}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+ Gupta, R. D. and Kundu, D. (2001)
+ Exponentiated exponential family: an alternative to
+ gamma and Weibull distributions,
+ \emph{Biometrical Journal},
+ \bold{43},
+ 117--130.
+
+}
+
+}
+\author{ T. W. Yee }
+\note{
+ This family function works only for intercept-only models,
+ i.e., \code{y ~ 1} where \code{y} is the response.
+
+ The estimate of \eqn{\alpha}{shape} is attached to the
+ \code{misc} slot of the object, which is a list and contains
+ the component \code{shape}.
+
+ As Newton-Raphson is used, the working weights are sometimes
+ negative, and some adjustment is made to these to make them
+ positive.
+
+ Like \code{\link{expexp}}, good initial
+ values are needed. Convergence may be slow.
+
+}
+
+\section{Warning }{The standard errors produced by a
+ \code{summary} of the model may be wrong.
+
+}
+
+\seealso{
+ \code{\link{expexp}}.
+}
+\examples{
+# Ball bearings data (number of million revolutions before failure)
+bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
+68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
+128.04, 173.40)
+fit = vglm(bbearings ~ 1, expexp1(ishape=4), trace=TRUE,
+ maxit=50, checkwz=FALSE)
+coef(fit, matrix=TRUE)
+Coef(fit) # Authors get c(0.0314, 5.2589) with log-lik -112.9763
+fit at misc$shape # Estimate of shape
+logLik(fit)
+
+
+# Failure times of the airconditioning system of an airplane
+acplane = c(23, 261, 87, 7, 120, 14, 62, 47,
+225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
+71, 11, 14, 11, 16, 90, 1, 16, 52, 95)
+fit = vglm(acplane ~ 1, expexp1(ishape=0.8), trace=TRUE,
+ maxit=50, checkwz=FALSE)
+coef(fit, matrix=TRUE)
+Coef(fit) # Authors get c(0.0145, 0.8130) with log-lik -152.264
+fit at misc$shape # Estimate of shape
+logLik(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/exponential.Rd b/man/exponential.Rd
new file mode 100644
index 0000000..c681adb
--- /dev/null
+++ b/man/exponential.Rd
@@ -0,0 +1,91 @@
+\name{exponential}
+\alias{exponential}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Exponential Distribution }
+\description{
+ Maximum likelihood estimation for the exponential distribution.
+
+}
+\usage{
+exponential(link = "loge", location = 0, expected = TRUE, earg = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the positive parameter \eqn{rate}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{location}{
+ Numeric of length 1, the known location parameter, \eqn{A}, say.
+
+ }
+ \item{expected}{
+ Logical. If \code{TRUE} Fisher scoring is used,
+ otherwise Newton-Raphson. The latter is usually faster.
+
+ }
+ \item{earg}{
+ Extra argument for the \pkg{VGAM} link function.
+ See \code{\link{Links}} for more details.
+
+ }
+
+}
+\details{
+ The family function assumes the response \eqn{Y} has density
+ \deqn{f(y) = \lambda \exp(-\lambda (y-A))}{%
+ f(y) = rate * exp(-rate * (y-A)) }
+ for \eqn{y > A}, where \eqn{A} is the known location parameter.
+ By default, \eqn{A=0}.
+ Then \eqn{E(Y) = A + 1/ \lambda}{E(Y) = A + 1/rate} and
+ \eqn{Var(Y) = 1/ \lambda^2}{Var(Y) = 1/rate^2}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+
+\author{ T. W. Yee }
+\note{
+ Suppose \eqn{A=0}.
+ For a fixed time interval, the number of events is
+ Poisson with mean \eqn{\lambda}{rate} if the time
+ between events has a
+ geometric distribution with mean \eqn{\lambda^{-1}}{1/rate}.
+ The argument \code{rate} in \code{exponential} is the same as
+ \code{\link[stats:Exponential]{rexp}} etc.
+ The argument \code{lambda} in \code{\link{rpois}} is somewhat
+ the same as \code{rate} here.
+
+}
+\seealso{
+ \code{\link{cexpon}},
+ \code{\link{poissonff}},
+ \code{\link{freund61}}.
+}
+
+\examples{
+nn = 100
+x1 = runif(nn) - 0.5
+x2 = runif(nn) - 0.5
+eta = 0.2 - 0.7 * x1 + 1.9 * x2
+rate = exp(eta)
+y = rexp(nn, rate=rate)
+stem(y)
+fit = vglm(y ~ x1 + x2, exponential, trace=TRUE, crit="c") # slower
+fit = vglm(y ~ x1 + x2, exponential(exp=FALSE), trace=TRUE, crit="c") # faster
+coef(fit)
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/fff.Rd b/man/fff.Rd
new file mode 100644
index 0000000..ff615bf
--- /dev/null
+++ b/man/fff.Rd
@@ -0,0 +1,93 @@
+\name{fff}
+\alias{fff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ F Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the (2-parameter) F distribution.
+}
+\usage{
+fff(link="loge", idf1=NULL, idf2=NULL, method.init=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function for both parameters.
+ See \code{\link{Links}} for more choices.
+ The default keeps the parameters positive.
+
+ }
+ \item{idf1,idf2}{
+ Numeric and positive.
+ Initial value for the parameters.
+ The default is to choose each value internally.
+
+ }
+ \item{method.init}{
+ Initialization method. Either the value 1 or 2.
+ If both fail try setting values for \code{idf1} and \code{idf2}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The value must be from the set \{1,2\}, corresponding
+ respectively to \eqn{df1} and \eqn{df2}.
+ By default all linear/additive predictors are modelled as
+ a linear combination of the explanatory variables.
+
+ }
+}
+\details{
+ The F distribution is named after Fisher and has a density function
+ that has two parameters, called \code{df1} and \code{df2} here.
+ This function treats these degrees of freedom as \emph{positive reals}
+ rather than integers.
+ The mean of the distribution is
+ \eqn{df2/(df2-2)} provided \eqn{df2>2}, and its variance is
+ \eqn{2 df2^2 (df1+df2-2)/(df1 (df2-2)^2 (df2-4))}{2*df2^2*(df1+df2-2)/
+ (df1*(df2-2)^2*(df2-4))} provided \eqn{df2>4}.
+ The estimated mean is returned as the fitted values.
+ Although the F distribution can be defined to accommodate a
+ non-centrality parameter \code{ncp}, it is assumed zero here.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+\section{Warning}{
+ Numerical problems will occur when the estimates of the parameters
+ are too low.
+}
+
+\note{
+ This family function uses the BFGS quasi-Newton update formula for the
+ working weight matrices. Consequently the estimated variance-covariance
+ matrix may be inaccurate or simply wrong! The standard errors must be
+ therefore treated with caution; these are computed in functions such
+ as \code{vcov()} and \code{summary()}.
+
+}
+\seealso{
+ \code{\link[stats:Fdist]{FDist}}.
+}
+\examples{
+x = runif(n <- 4000)
+df1 = exp(2+0.5*x)
+df2 = exp(2-0.5*x)
+y = rf(n, df1, df2)
+fit = vglm(y ~ x, fff, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+vcov(fit) # caution needed!
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/fill.Rd b/man/fill.Rd
new file mode 100644
index 0000000..b55248a
--- /dev/null
+++ b/man/fill.Rd
@@ -0,0 +1,233 @@
+\name{fill}
+\alias{fill}
+\alias{fill1}
+%- \alias{fill2}
+%- \alias{fill3}
+%- \alias{fill4}
+%- \alias{fill5}
+%- \alias{fill6}
+%- \alias{fill7}
+%- \alias{fill8}
+%- \alias{fill9}
+%- \alias{fill10}
+%- \alias{fill11}
+%- \alias{fill12}
+%- \alias{fill13}
+%- \alias{fill14}
+%- \alias{fill15}
+%- \alias{fill16}
+%- \alias{fill17}
+%- \alias{fill18}
+%- \alias{fill19}
+%- \alias{fill20}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Creates a Matrix of Appropriate Dimension }
+\description{
+ A support function for the argument \code{xij}, it generates a matrix
+ of an appropriate dimension.
+
+}
+\usage{
+fill(x, values = 0, ncolx = ncol(x))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+ A vector or matrix which is used to determine the dimension of the
+ answer, in particular, the number of rows. After converting \code{x}
+ to a matrix if necessary, the answer is a matrix of \code{values}
+ values, of dimension \code{nrow(x)} by \code{ncolx}.
+
+ }
+ \item{values}{
+ Numeric. The answer contains these values which are recycled if
+ necessary.
+
+ }
+ \item{ncolx}{
+ The number of columns of the returned matrix.
+ The default is the number of columns of \code{x}.
+
+ }
+}
+\details{
+ The \code{xij} argument for \code{\link{vglm}} allows the user to input
+ variables specific to each linear predictor. For example, consider
+ the bivariate logit model where the first/second linear/additive
+ predictor is the logistic regression of the first/second binary response
+ respectively. The third linear/additive predictor is \code{log(OR) =
+ eta3}, where \code{OR} is the odds ratio. If one has ocular pressure
+ as a covariate in this model then \code{xij} is required to handle the
+ ocular pressure for each eye, since these will be different in general.
+ [This contrasts with a variable such as \code{age}, the age of the
+ person, which has a common value for both eyes.] In order to input
+ these data into \code{\link{vglm}} one often finds that functions
+ \code{fill}, \code{fill1}, etc. are useful.
+
+ All terms in the \code{xij} argument must appear in the main
+ \code{formula} argument in \code{\link{vglm}}.
+
+}
+\value{
+ \code{matrix(values, nrow=nrow(x), ncol=ncolx)}, i.e., a matrix
+ consisting of values \code{values}, with the number of rows matching
+ \code{x}, and the default number of columns is the number of columns
+ of \code{x}.
+
+}
+\references{
+ More information can be found at
+ \url{http://www.stat.auckland.ac.nz/~yee}.
+
+}
+\section{Warning }{
+ The use of the \code{xij} argument overrides other arguments such as
+ \code{exchangeable} and \code{zero}. Care is needed in such cases.
+ See the examples below.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ Additionally, there are currently 3 other identical \code{fill}
+ functions, called \code{fill1}, \code{fill2} and \code{fill3}; if you
+ need more then assign \code{fill4 = fill5 = fill1} etc.
+ The reason for this is that if more than one \code{fill} function is
+ needed then they must be unique.
+ For example, if \eqn{M=4} then
+ \code{xij = op ~ lop + rop + fill(mop) + fill(mop)} would reduce to
+ \code{xij = op ~ lop + rop + fill(mop)}, whereas
+ \code{xij = op ~ lop + rop + fill1(mop) + fill2(mop)} would retain
+ \eqn{M} terms, which is needed.
+
+ The constraint matrices, as returned by \code{constraints}, have a
+ different meaning when \code{xij} is used.
+
+ In Examples 1 to 3 below, the \code{xij} argument illustrates covariates
+ that are specific to a linear predictor. Here, \code{lop}/\code{rop} are
+ the ocular pressures of the left/right eye in an artificial dataset,
+ and \code{mop} is their mean. Variables \code{leye} and \code{reye}
+ might be the presence/absence of a particular disease on the LHS/RHS
+ eye respectively. Examples 1 and 2 are deliberately misspecified.
+ The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but
+ is correct because the coefficients are multiplied by the zeros
+ produced from \code{fill}.
+
+ In Example 4,
+ the \code{xij} argument illustrates fitting the model where there
+ is a common smooth function of the ocular pressure. One should use
+ regression splines since \code{\link{s}} in \code{\link{vgam}} does not
+ handle the \code{xij} argument. However, regression splines such as
+ \code{\link[splines]{bs}} and \code{\link[splines]{ns}} need to have
+ the same knots here for both functions, and Example 4 illustrates
+ a trick involving a function \code{BS} to obtain this. Although
+ regression splines create more than a single column per term in the
+ model matrix, \code{fill(BS(lop,rop,mop))} creates the required (same)
+ number of columns.
+
+}
+
+\seealso{
+ \code{\link{vglm}},
+ \code{\link{vglm.control}}.
+}
+\examples{
+fill(runif(5))
+fill(runif(5), ncol=3)
+fill(runif(5), val=1, ncol=3)
+
+# Generate eyes data for the examples below. Eyes are independent (OR=1).
+set.seed(123)
+n = 2000 # Number of people
+eyes = data.frame(lop = round(runif(n), 2),
+ rop = round(runif(n), 2),
+ age = round(rnorm(n, 40, 10)))
+eyes = transform(eyes,
+ mop = (lop + rop) / 2, # mean ocular pressure
+ eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye
+ eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye
+eyes = transform(eyes,
+ leye = rbinom(n, size=1, prob=exp(eta1)/(1+exp(eta1))),
+ reye = rbinom(n, size=1, prob=exp(eta2)/(1+exp(eta2))))
+
+# Example 1
+# Non-exchangeable errors (misspecified model)
+fit1 = vglm(cbind(leye,reye) ~ lop + rop + fill(lop) + age,
+ family = binom2.or(exchangeable=FALSE, zero=NULL),
+ xij = op ~ lop + rop + fill(lop), data=eyes)
+model.matrix(fit1, type="lm")[1:7,] # LM model matrix
+model.matrix(fit1, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit1)
+coef(fit1, matrix=TRUE) # Looks wrong but is correct
+coef(fit1, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
+constraints(fit1)
+max(abs(predict(fit1)-predict(fit1, new=eyes))) # Predicts correctly
+summary(fit1)
+
+
+# Example 2
+# Nonexchangeable errors (misspecified model), OR is a function of mop
+fit2 = vglm(cbind(leye,reye) ~ lop + rop + mop + age,
+ family = binom2.or(exchangeable=FALSE, zero=NULL),
+ xij = op ~ lop + rop + mop, data=eyes)
+model.matrix(fit2, type="lm")[1:7,] # LM model matrix
+model.matrix(fit2, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit2)
+coef(fit2, matrix=TRUE) # correct
+coef(fit2, matrix=TRUE, compress=FALSE) # correct
+max(abs(predict(fit2)-predict(fit2, new=eyes))) # Predicts correctly
+summary(fit2)
+
+
+# Example 3. This model is correctly specified.
+# Exchangeable errors
+fit3 = vglm(cbind(leye,reye) ~ lop + rop + fill(lop) + age,
+ family = binom2.or(exchangeable=TRUE, zero=3),
+ xij = op ~ lop + rop + fill(lop), data=eyes)
+model.matrix(fit3, type="lm")[1:7,] # LM model matrix
+model.matrix(fit3, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit3)
+coef(fit3, matrix=TRUE) # Looks wrong but is correct
+coef(fit3, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
+predict(fit3, new=eyes[1:4,]) # Note the 'scalar' OR, i.e., zero=3
+max(abs(predict(fit3)-predict(fit3, new=eyes))) # Predicts correctly
+summary(fit3)
+
+
+# Example 4. This model uses regression splines on ocular pressure.
+# It assumes exchangeable errors.
+BS = function(x, ...) bs(c(x,...), df=3)[1:length(x),]
+fit4 = vglm(cbind(leye,reye) ~ BS(lop,rop,mop) + BS(rop,lop,mop) +
+ fill(BS(lop,rop,mop)) + age,
+ family = binom2.or(exchangeable=TRUE, zero=3),
+ xij = BS(op) ~ BS(lop,rop,mop) + BS(rop,lop,mop) +
+ fill(BS(lop,rop,mop)), data=eyes)
+model.matrix(fit4, type="lm")[1:7,] # LM model matrix
+model.matrix(fit4, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit4)
+coef(fit4, matrix=TRUE) # Looks wrong but is correct
+coef(fit4, matrix=TRUE, compress=FALSE) # Looks wrong but is correct
+predict(fit4, new=eyes[1:4,]) # Note the 'scalar' OR, i.e., zero=3
+max(abs(predict(fit4)-predict(fit4, new=eyes))) # Predicts correctly
+summary(fit4)
+}
+\keyword{models}
+\keyword{regression}
+
+
+%This function is unrelated to the \code{zero} argument found in many
+%\pkg{VGAM} family functions. [zz implies I should call it
+%\code{fill1(x, value=0, ncolx=ncol(x))} and create .Rd file for
+%\code{zero} argument.]
+
+%eyes$leye = ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
+%eyes$reye = ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
+
+% \deqn{logit P(Y_k=1) = f_k(x_{ijk}) }{%
+% logit P(Y_k=1) = f_k(x_{ijk}) }
+% for \code{k=1,2}.
+% fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyes)
+
+% Models using the \code{xij} argument may or may not predict correctly,
+% and inference obtained using \code{summary} may be incorrect.
+
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
new file mode 100644
index 0000000..b754fed
--- /dev/null
+++ b/man/fisherz.Rd
@@ -0,0 +1,111 @@
+\name{fisherz}
+\alias{fisherz}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fisher's Z Link Function }
+\description{
+ Computes the Fisher Z transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to -1 can be
+ replaced by the \code{bminvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by the \code{bmaxvalue} component of the list \code{earg}
+ before computing the link function value.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The \code{fisherz} link function is commonly used for parameters that
+ lie between -1 and 1.
+ Numerical values of \code{theta} close to -1 or 1 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+}
+\value{
+ For \code{deriv = 0},
+ \code{0.5 * log((1+theta)/(1-theta))} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{(exp(2*theta)-1)/(exp(2*theta)+1)}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to -1 or 1.
+ One way of overcoming this is to use \code{earg}.
+
+ The link function \code{\link{rhobit}} is very similar to \code{fisherz},
+ e.g., just twice the value of \code{fisherz}.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{rhobit}},
+ \code{\link{logit}}.
+
+ }
+\examples{
+theta = seq(-0.99, 0.99, by=0.01)
+y = fisherz(theta)
+\dontrun{
+plot(theta, y, type="l", las=1, ylab="", main="fisherz(theta)")
+abline(v=0, h=0, lty=2)
+}
+
+x = c(seq(-1.02, -0.98, by=0.01), seq(0.97, 1.02, by=0.01))
+fisherz(x) # Has NAs
+fisherz(x, earg=list(bminvalue= -1 + .Machine$double.eps,
+ bmaxvalue= 1 - .Machine$double.eps)) # Has no NAs
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/fisk.Rd b/man/fisk.Rd
new file mode 100644
index 0000000..03e5aa4
--- /dev/null
+++ b/man/fisk.Rd
@@ -0,0 +1,98 @@
+\name{fisk}
+\alias{fisk}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fisk Distribution family function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ Fisk distribution.
+}
+\usage{
+fisk(link.a = "loge", link.scale = "loge",
+ init.a = NULL, init.scale = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale}{
+ Parameter link functions applied to the
+ (positive) parameters \code{a} and \code{scale}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale}{
+ Optional initial values for \code{a} and \code{scale}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2\} which correspond to
+ \code{a}, \code{scale}, respectively.
+
+ }
+}
+\details{
+ The 2-parameter Fisk (aka log-logistic) distribution is the 4-parameter
+ generalized beta II distribution with shape parameter \eqn{q=p=1}.
+ It is also the 3-parameter Singh-Maddala distribution
+ with shape parameter \eqn{q=1}, as well as the
+ Dagum distribution with \eqn{p=1}.
+ More details can be found in Kleiber and Kotz (2003).
+
+The Fisk distribution has density
+ \deqn{f(y) = a y^{a-1} / [b^a \{1 + (y/b)^a\}^2]}{%
+ f(y) = a y^(a-1) / [b^a (1 + (y/b)^a)^2]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and \code{a} is a shape parameter.
+The cumulative distribution function is
+ \deqn{F(y) = 1 - [1 + (y/b)^a]^{-1} = [1 + (y/b)^{-a}]^{-1}.}{%
+ F(y) = 1 - [1 + (y/b)^a]^(-1) = [1 + (y/b)^(-a)]^(-1).}
+The mean is
+ \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(1 - 1/a)}{%
+ E(Y) = b gamma(1 + 1/a) gamma(1 - 1/a)}
+provided \eqn{a > 1}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ If the self-starting initial values fail, try experimenting
+ with the initial value arguments, especially those whose
+ default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Fisk}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rfisk(n=200, 4, 6)
+fit = vglm(y ~ 1, fisk, trace=TRUE)
+fit = vglm(y ~ 1, fisk(init.a=3.3), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/fitted.vlm.Rd b/man/fitted.vlm.Rd
new file mode 100644
index 0000000..b6fae78
--- /dev/null
+++ b/man/fitted.vlm.Rd
@@ -0,0 +1,85 @@
+\name{fitted.vlm}
+\alias{fitted.vlm}
+\alias{fitted.values.vlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Fitted Values of a VLM object}
+\description{
+ Extractor function for the fitted values of a model object that
+ inherits from a \emph{vector linear model} (VLM), e.g., a model of
+ class \code{"vglm"}.
+
+}
+\usage{
+fitted.vlm(object, matrix = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ a model object that inherits from a VLM.
+ }
+ \item{matrix}{ Logical. Return the answer as a matrix?
+ If \code{FALSE} then it will be a vector. }
+ \item{\dots}{ Currently unused. }
+}
+\details{
+ The ``fitted values'' usually corresponds to the mean response,
+ however, because the \pkg{VGAM} package fits so many models,
+ this sometimes refers to quantities such as quantiles. It may
+ even not exist, e.g., for a Cauchy distribution.
+}
+\value{
+ The fitted values as returned by the
+ \code{inverse} slot of the \pkg{VGAM} family function,
+ evaluated at the final IRLS iteration.
+}
+\references{
+Chambers, J. M. and T. J. Hastie (eds) (1992)
+ \emph{Statistical Models in S}.
+ Wadsworth & Brooks/Cole.
+}
+\author{ Thomas W. Yee }
+\note{
+ This function is one of several extractor functions for
+ the \pkg{VGAM} package. Others include \code{coef},
+ \code{deviance}, \code{weights} and \code{constraints} etc.
+ This function is equivalent to the methods function for the
+ generic function \code{fitted.values}.
+
+ If \code{fit} is a VLM or VGLM then \code{fitted(fit)} and
+ \code{predict(fit, type="response")} should be equivalent.
+ The latter has the advantage in that it handles a \code{newdata}
+ argument so that the fitted values can be computed for a
+ different data set.
+}
+
+\seealso{
+ \code{\link[stats]{fitted}},
+ \code{\link{predict.vglm}},
+ \code{\link{vglmff-class}}.
+}
+\examples{
+# Categorical regression example 1
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo)
+fit
+fitted(fit)
+
+
+# Nonparametric logistic regression example 2
+data(hunua)
+fit = vgam(agaaus ~ s(altitude), binomialff, hunua)
+fitted(fit, matrix=FALSE)[1:3]
+
+
+# LMS quantile regression example 3
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)),
+ fam=lms.bcn(zero=1), data=bminz, trace=TRUE)
+# The following are equal
+predict(fit, type="r")[1:3,]
+fitted(fit)[1:3,]
+predict(fit, type="r", newdata=bminz[1:3,])
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/frank.Rd b/man/frank.Rd
new file mode 100644
index 0000000..5014ad6
--- /dev/null
+++ b/man/frank.Rd
@@ -0,0 +1,96 @@
+\name{frank}
+\alias{frank}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Frank's Bivariate Distribution Family Function }
+\description{
+ Estimate the correlation parameter of Frank's bivariate distribution
+ using maximum likelihood estimation.
+
+}
+\usage{
+frank(lcorp="loge", icorp=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lcorp}{
+ Link function applied to the (positive) correlation parameter
+ \eqn{\alpha}{alpha}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{icorp}{
+ Numeric. Initial value for \eqn{\alpha}{alpha}.
+ If a convergence failure occurs try assigning a different value.
+
+ }
+}
+\details{
+ The cumulative distribution function is
+ \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = H_{\alpha}(y_1,y_2) =
+ \log_{\alpha} [1 + (\alpha^{y_1}-1)(\alpha^{y_2}-1)/
+ (\alpha-1)] }{%
+ P(Y1 <= y1, Y2 <= y2) =
+ H_{alpha}(y1,y2) = log_{alpha} [1 + (alpha^(y1)-1)*(alpha^(y2)-1)/
+ (alpha-1)] }
+ for \eqn{\alpha \ne 1}{alpha != 1}.
+ Note the logarithm here is to base \eqn{\alpha}{alpha}.
+ The support of the function is the unit square.
+
+ When \eqn{0 < \alpha < 1}{0<alpha<1} the probability density function
+ \eqn{h_{\alpha}(y_1,y_2)}{h_{alpha}(y_1,y_2)}
+ is symmetric with respect to the lines \eqn{y_2=y_1}{y2=y1}
+ and \eqn{y_2=1-y_1}{y2=1-y1}.
+ When \eqn{\alpha > 1}{alpha>1} then
+ \eqn{h_{\alpha}(y_1,y_2) = h_{1/\alpha}(1-y_1,y_2)}{h_{1/alpha}(1-y_1,y_2)}.
+
+ If \eqn{\alpha=1}{alpha=1} then \eqn{H(y_1,y_2) = y_1 y_2}{H(y1,y2)=y1*y2},
+ i.e., uniform on the unit square.
+ As \eqn{\alpha}{alpha} approaches 0 then
+ \eqn{H(y_1,y_2) = \min(y_1,y_2)}{H(y1,y2)=min(y1,y2)}.
+ As \eqn{\alpha}{alpha} approaches infinity then
+ \eqn{H(y_1,y_2) = \max(0, y_1+y_2-1)}{H(y1,y2)=max(0,y1+y2-1)}.
+
+ A variant of Newton-Raphson is used, which only seems to work for an
+ intercept model.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+
+%% improve the references
+\references{
+
+Genest, C. (1987)
+Frank's family of bivariate distributions.
+\emph{Biometrika},
+\bold{74}, 549--555.
+
+}
+\author{ T. W. Yee }
+\note{
+ The response must be a two-column matrix. Currently, the fitted
+ value is a matrix with two columns and values equal to a half.
+ This is because the marginal distributions correspond to a standard
+ uniform distribution.
+
+}
+
+\seealso{
+ \code{\link{rfrank}}.
+}
+\examples{
+ymat = rfrank(n=2000, alpha=exp(4))
+\dontrun{plot(ymat)}
+fit = vglm(ymat ~ 1, fam=frank, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+vcov(fit)
+fitted(fit)[1:5,]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/frankUC.Rd b/man/frankUC.Rd
new file mode 100644
index 0000000..c65511a
--- /dev/null
+++ b/man/frankUC.Rd
@@ -0,0 +1,68 @@
+\name{Frank}
+\alias{Frank}
+\alias{dfrank}
+\alias{pfrank}
+\alias{rfrank}
+\title{Frank's Bivariate Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the one parameter Frank distribution.
+}
+\usage{
+dfrank(x1, x2, alpha)
+pfrank(q1, q2, alpha)
+rfrank(n, alpha)
+}
+\arguments{
+ \item{x1, x2, q1, q2}{vector of quantiles.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{alpha}{the positive correlation parameter \eqn{\alpha}{alpha}.}
+}
+\value{
+ \code{dfrank} gives the density,
+ \code{pfrank} gives the distribution function, and
+ \code{rfrank} generates random deviates (a two-column matrix).
+}
+\references{
+
+Genest, C. (1987)
+Frank's family of bivariate distributions.
+\emph{Biometrika},
+\bold{74}, 549--555.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{frank}}, the \pkg{VGAM}
+ family functions for estimating the correlation
+ parameter by maximum likelihood estimation, for the formula of the
+ cumulative distribution function and other details.
+
+}
+%\note{
+%}
+\seealso{
+ \code{\link{frank}}.
+}
+\examples{
+\dontrun{
+N = 100
+x = seq(-0.30, 1.30, len=N)
+alpha = 8
+ox = expand.grid(x, x)
+z = dfrank(ox[,1], ox[,2], alp=alpha)
+contour(x, x, matrix(z, N, N))
+z = pfrank(ox[,1], ox[,2], alp=alpha)
+contour(x, x, matrix(z, N, N))
+
+alpha = exp(4)
+plot(r <- rfrank(n=3000, alpha=alpha))
+par(mfrow=c(1,2))
+hist(r[,1]) # Should be uniform
+hist(r[,2]) # Should be uniform
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/frechet.Rd b/man/frechet.Rd
new file mode 100644
index 0000000..3590c7b
--- /dev/null
+++ b/man/frechet.Rd
@@ -0,0 +1,161 @@
+\name{frechet}
+% \alias{frechet}
+\alias{frechet2}
+\alias{frechet3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Frechet Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter and 3-parameter
+ Frechet distribution.
+
+}
+\usage{
+frechet2(location=0, lscale="loge", lshape="loglog",
+ escale = list(), eshape = list(),
+ iscale=NULL, ishape=3, zero=NULL)
+frechet3(anchor=NULL, ldifference="loge",
+ lscale="loge", lshape="loglog",
+ edifference=list(), escale = list(), eshape = list(),
+ ilocation=NULL, iscale=NULL, ishape=3, zero=NULL,
+ effpos = .Machine$double.eps^0.75)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{location}{
+ Numeric. Location parameter.
+ It is called \eqn{a} below.
+
+ }
+ \item{lscale, lshape}{
+ Link functions for the parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iscale, ishape}{
+ Initial value for the scale and shape parameters respectively.
+ Both parameters must be positive.
+ A \code{NULL} means it is chosen internally.
+
+ }
+ \item{edifference, escale, eshape}{
+ Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only. The value
+ must be from the set \{1,2\}, corresponding respectively to \eqn{scale}
+ and \eqn{shape}. By default all linear/additive predictors are modelled
+ as a linear combination of the explanatory variables.
+
+ }
+ \item{anchor}{
+ An ``anchor'' point for estimating the location parameter. This must
+ be a value no greater than \code{min(y)} where \code{y} is the response.
+ The location parameter is \eqn{A - D} where
+ \eqn{A} is the anchor,
+ \eqn{D} is the ``difference'' (default is to make this positive).
+ The default value of \code{anchor} means \code{min(y)} is chosen.
+
+ }
+ \item{ldifference}{
+ Parameter link function for the difference \eqn{D} between the anchor
+ point and the location parameter estimate.
+ The default keeps this difference positive so that numerical
+ problems are less likely to occur.
+
+ }
+ \item{ilocation}{
+ Optional initial value for the location parameter.
+ A good choice can speed up the convergence rate markedly.
+ A \code{NULL} means it is chosen internally.
+
+ }
+ \item{effpos}{
+ A small number, usually positive, to pass into the quasi-Newton
+ updating function. A value greater than \code{effpos} is considered
+ effectively positive.
+
+ }
+}
+\details{
+ The Frechet distribution has a density function that can be written
+ \deqn{f(y) = \frac{sb}{(y-a)^2} [b/(y-a)]^{s-1} \, \exp[-(b/(y-a))^s] }{%
+ f(y) = ((s*b) / (y-a)^2) * exp[-(b/(y-a))^s] * [b/(y-a)]^(s-1)}
+ for \eqn{y>a} and scale parameter \eqn{b>0}.
+ The positive shape parameter is \eqn{s}.
+ The cumulative distribution function is
+ \deqn{F(y) = \exp[-(b/(y-a))^s]. }{%
+ F(y) = exp[-(b/(y-a))^s].}
+ The mean of \eqn{Y} is \eqn{a + b \Gamma(1-1/s)}{a + b*gamma(1-1/s)}
+ for \eqn{s>1} (these are returned as the fitted values).
+ Note that the \code{\link{loglog}} link ensures \eqn{s>1}.
+ The variance of \eqn{Y} is
+ \eqn{b^2 [ \Gamma(1-2/s) - \Gamma^2(1-1/s)]}{b^2 * [gamma(1-2/s) - gamma(1-1/s)^2]}
+ for \eqn{s>2}.
+
+ \code{frechet2} has \eqn{a} known whereas \code{frechet3}
+ estimates it. Estimating \eqn{a} well requires a lot of data and
+ a good choice of \code{ilocation} will help speed up convergence.
+ For \code{frechet3} the default linear/additive predictors are
+ \eqn{\log(D)}{log(D)},
+ \eqn{\log(b)}{log(b)},
+ \eqn{\log\log(s)}{log(log(s))}, respectively.
+ It would be great if the first linear/additive predictor was a direct
+ function of the location parameter, but this can run the risk that
+ the estimate is out of range (i.e., greater than \code{min(y)}).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
+\emph{Extreme Value and Related Models with Applications in Engineering and Science},
+Hoboken, N.J.: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+\section{Warning}{
+ Convergence for \code{frechet3} can be very slow, especially if the
+ initial value for the location parameter is poor. Setting something
+ like \code{maxit=200, trace=TRUE} is a good idea.
+
+}
+
+\note{
+ These family functions use the BFGS quasi-Newton update formula for the
+ working weight matrices. Consequently the estimated variance-covariance
+ matrix may be inaccurate or simply wrong! The standard errors must be
+ therefore treated with caution; these are computed in functions such
+ as \code{vcov()} and \code{summary()}.
+
+ If \code{fit} is a \code{frechet3} fit then \code{fit at extra$location}
+ is the final estimate of the location parameter, and
+ \code{fit at extra$LHSanchor} is the anchor point.
+
+}
+\seealso{
+ \code{\link{rfrechet}}.
+}
+\examples{
+y = rfrechet(n <- 1000, shape=exp(exp(0)))
+\dontrun{hist(y)}
+fit2 = vglm(y ~ 1, frechet2, trace=TRUE, maxit=155)
+fit3 = vglm(y ~ 1, frechet3(ilocation=0), trace=TRUE, maxit=155)
+coef(fit3, matrix=TRUE)
+Coef(fit3)
+fitted(fit3)[1:5,]
+mean(y)
+weights(fit3, type="w")[1:5,]
+vcov(fit3) # caution needed!
+fit3 at extra$location[1:3] # Estimate of the location parameter
+fit3 at extra$LHSanchor # Anchor point
+min(y)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
new file mode 100644
index 0000000..e5cff0c
--- /dev/null
+++ b/man/frechetUC.Rd
@@ -0,0 +1,66 @@
+\name{Frechet}
+\alias{Frechet}
+\alias{dfrechet}
+\alias{pfrechet}
+\alias{qfrechet}
+\alias{rfrechet}
+\title{The Frechet Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the three parameter Frechet distribution.
+}
+\usage{
+dfrechet(x, location=0, scale=1, shape)
+pfrechet(q, location=0, scale=1, shape)
+qfrechet(p, location=0, scale=1, shape)
+rfrechet(n, location=0, scale=1, shape)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{location, scale, shape}{the location parameter \eqn{a},
+ scale parameter \eqn{b}, and shape parameter \eqn{s}.}
+}
+\value{
+ \code{dfrechet} gives the density,
+ \code{pfrechet} gives the distribution function,
+ \code{qfrechet} gives the quantile function, and
+ \code{rfrechet} generates random deviates.
+}
+\references{
+Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
+\emph{Extreme Value and Related Models with Applications in Engineering and Science},
+Hoboken, N.J.: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{frechet2}} and \code{\link{frechet3}}, the \pkg{VGAM}
+ family functions for estimating the 2 (without location parameter) and
+ 3 parameters by maximum likelihood estimation, for the formula of the
+ probability density function and range restrictions on the parameters.
+
+}
+%\note{
+%}
+\seealso{
+ \code{\link{frechet2}},
+ \code{\link{frechet3}}.
+}
+\examples{
+\dontrun{
+shape = 5
+x = seq(-0.1, 3.5, len=100)
+plot(x, dfrechet(x, shape=shape), type="l", ylab="", las=1,
+ main="Frechet density divided into 10 equal areas; red=cdf")
+abline(h=0, col="blue", lty=2)
+qq = qfrechet(seq(0.1,0.9,by=0.1), shape=shape)
+lines(qq, dfrechet(qq, shape=shape), col="purple", lty=3, type="h")
+lines(x, pfrechet(q=x, shape=shape), col="red")
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/freund61.Rd b/man/freund61.Rd
new file mode 100644
index 0000000..78f10a1
--- /dev/null
+++ b/man/freund61.Rd
@@ -0,0 +1,181 @@
+\name{freund61}
+\alias{freund61}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Freund's (1961) Bivariate Extension of the Exponential Distribution }
+\description{
+ Estimate the four parameters of the Freund (1961) bivariate extension
+ of the exponential distribution
+ using maximum likelihood estimation.
+
+}
+\usage{
+freund61(la="loge", lap="loge", lb="loge", lbp="loge",
+ ia=NULL, iap=NULL, ib=NULL, ibp=NULL,
+ independent=FALSE, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{la,lap,lb,lbp}{
+ Link function applied to the (positive)
+ parameters \eqn{\alpha}{alpha}, \eqn{\alpha'}{alpha'},
+ \eqn{\beta}{beta} and \eqn{\beta'}{beta'}, respectively
+ (the ``\code{p}'' stands for ``prime'').
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ia,iap,ib,ibp}{
+ Initial value for the four parameters respectively.
+ The default is to estimate them all internally.
+
+ }
+ \item{independent}{
+ Logical. If \code{TRUE} then the parameters are constrained to satisfy
+ \eqn{\alpha=\alpha'}{alpha=alpha'} and \eqn{\beta=\beta'}{beta=beta'},
+ which implies that \eqn{y_1}{y1} and \eqn{y_2}{y2} are independent
+ and each have an ordinary exponential distribution.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3,4\}.
+ The default is none of them.
+
+ }
+}
+\details{
+ This model represents one type of bivariate extension of the exponential
+ distribution that is applicable to certain problems, in particular,
+ to two-component systems which can function if one of the components
+ has failed. For example, engine failures in two-engine planes, paired
+ organs such as peoples' eyes, ears and kidneys.
+ Suppose \eqn{y_1}{y1} and \eqn{y_2}{y2} are random variables
+ representing the lifetimes of two components \eqn{A} and \eqn{B}
+ in a two component system.
+ The dependence between \eqn{y_1}{y1} and \eqn{y_2}{y2}
+ is essentially such that the failure of the \eqn{B} component
+ changes the parameter of the exponential life distribution
+ of the \eqn{A} component from \eqn{\alpha}{alpha} to
+ \eqn{\alpha'}{alpha'}, while the failure of the \eqn{A} component
+ changes the parameter of the exponential life distribution
+ of the \eqn{B} component from \eqn{\beta}{beta} to
+ \eqn{\beta'}{beta'}.
+
+ The joint probability density function is given by
+ \deqn{f(y_1,y_2) = \alpha \beta' \exp(-\beta' y_2 -
+ (\alpha+\beta-\beta')y_1) }{%
+ f(y1,y2) = alpha * beta' * exp(-beta' * y2 -
+ (alpha+beta-beta') * y1) }
+ for \eqn{0 < y_1 < y_2}{0 < y1 < y2}, and
+ \deqn{f(y_1,y_2) = \beta \alpha' \exp(-\alpha' y_1 -
+ (\alpha+\beta-\alpha')y_2) }{%
+ f(y1,y2) = beta * alpha' * exp(-alpha' * y1 -
+ (alpha+beta-alpha') * y2) }
+ for \eqn{0 < y_2 < y_1}{0 < y2 < y1}.
+ Here, all four parameters are positive, as well as the responses
+ \eqn{y_1}{y1} and \eqn{y_2}{y2}.
+ Under this model, the probability that component \eqn{A}
+ is the first to fail is
+ \eqn{\alpha/(\alpha+\beta)}{alpha/(alpha+beta)}.
+ The time to the first failure is distributed as an
+ exponential distribution with rate
+ \eqn{\alpha+\beta}{alpha+beta}. Furthermore, the
+ distribution of the time from first failure to failure
+ of the other component is a mixture of
+ Exponential(\eqn{\alpha'}{alpha'}) and
+ Exponential(\eqn{\beta'}{beta'}) with proportions
+ \eqn{\beta/(\alpha+\beta)}{beta/(alpha+beta)}
+ and \eqn{\alpha/(\alpha+\beta)}{alpha/(alpha+beta)}
+ respectively.
+
+ The marginal distributions are, in general, not exponential.
+ By default, the linear/additive predictors are
+ \eqn{\eta_1=\log(\alpha)}{eta1=log(alpha)},
+ \eqn{\eta_2=\log(\alpha')}{eta2=log(alpha')},
+ \eqn{\eta_3=\log(\beta)}{eta3=log(beta)},
+ \eqn{\eta_4=\log(\beta')}{eta4=log(beta')}.
+
+ A special case is when \eqn{\alpha=\alpha'}{alpha=alpha'}
+ and \eqn{\beta=\beta'}{beta'=beta'}, which means that
+ \eqn{y_1}{y1} and \eqn{y_2}{y2} are independent, and
+ both have an ordinary exponential distribution with means
+ \eqn{1 / \alpha}{1/alpha} and \eqn{1 / \beta}{1/beta}
+ respectively.
+
+ Fisher scoring is used,
+ and the initial values correspond to the MLEs of an intercept model.
+ Consequently, convergence may take only one iteration.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+
+\references{
+
+Freund, J. E. (1961)
+A bivariate extension of the exponential distribution.
+\emph{Journal of the American Statistical Association},
+\bold{56}, 971--977.
+
+}
+\author{ T. W. Yee }
+\note{
+ To estimate all four parameters, it is necessary to have some
+ data where \eqn{y_1<y_2}{y1<y2} and \eqn{y_2<y_1}{y2<y1}.
+
+ The response must be a two-column matrix, with columns
+ \eqn{y_1}{y1} and \eqn{y_2}{y2}.
+ Currently, the fitted value is a matrix with two columns; the
+ first column has values
+ \eqn{(\alpha'+\beta)/(\alpha' (\alpha+\beta))}{(alpha'+beta)/(alpha' * (alpha+beta))}
+ for the mean of \eqn{y_1}{y1},
+ while the second column has values
+ \eqn{(\beta'+\alpha)/(\beta' (\alpha+\beta))}{(beta'+alpha)/(beta' * (alpha+beta))}
+ for the mean of \eqn{y_2}{y2}.
+ The variance of \eqn{y_1}{y1} is
+ \deqn{ \frac{(\alpha')^2 + 2 \alpha \beta + \beta^2}{
+ (\alpha')^2 (\alpha + \beta)^2}, }{%
+ [(alpha')^2 + 2 * alpha * beta + beta^2]/
+ [(alpha')^2 * (alpha + beta)^2], }
+ the variance of \eqn{y_2}{y2} is
+ \deqn{ \frac{(\beta')^2 + 2 \alpha \beta + \alpha^2 }{
+ (\beta')^2 (\alpha + \beta)^2 }, }{%
+ [(beta')^2 + 2 * alpha * beta + alpha^2]/
+ [(beta')^2 * (alpha + beta)^2], }
+ the covariance of \eqn{y_1}{y1} and \eqn{y_2}{y2} is
+ \deqn{ \frac{\alpha' \beta' - \alpha \beta }{
+ \alpha' \beta' (\alpha + \beta)^2}. }{%
+ [alpha' * beta' - alpha * beta]/
+ [alpha' * beta' * (alpha + beta)^2]. }
+
+}
+%\section{Warning}{
+%}
+
+\seealso{
+ \code{\link{exponential}}.
+}
+\examples{
+y1 = rexp(n <- 200, rate=4)
+y2 = rexp(n, rate=8)
+ymat = cbind(y1,y2)
+fit = vglm(ymat ~ 1, fam=freund61, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+vcov(fit)
+fitted(fit)[1:5,]
+summary(fit)
+
+# y1 and y2 are independent, so fit an independence model
+fit2 = vglm(ymat ~ 1, fam=freund61(indep=TRUE), trace=TRUE)
+coef(fit2, matrix=TRUE)
+constraints(fit2)
+1 - pchisq(2*(logLik(fit)-logLik(fit2)), df=2) # p-value
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
new file mode 100644
index 0000000..f6aed9e
--- /dev/null
+++ b/man/gamma1.Rd
@@ -0,0 +1,75 @@
+\name{gamma1}
+\alias{gamma1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ 1-parameter Gamma Distribution }
+\description{
+ Estimates the 1-parameter gamma distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+gamma1(link = "loge")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to the (positive) \emph{shape} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+}
+\details{
+ The density function is given by
+ \deqn{f(y) = \exp(-y) \times y^{shape-1} / \Gamma(shape)}{%
+ f(y) = exp(-y) y^(shape-1) / gamma(shape)}
+ for \eqn{shape > 0} and \eqn{y > 0}.
+ Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
+ function, as in \code{\link[base:Special]{gamma}}.
+ The mean of \eqn{Y} (returned as the fitted values)
+ is \eqn{\mu=shape}{mu=shape}, and the variance is
+ \eqn{\sigma^2 = shape}{sigma^2 = shape}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+ Most standard texts on statistical distributions describe
+ the 1-parameter gamma distribution, e.g.,
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+\note{
+ This \pkg{VGAM} family function can handle a multivariate (matrix)
+ response.
+
+ The parameter \eqn{shape} matches with \code{shape} in
+ \code{\link[stats]{rgamma}}. The argument
+ \code{rate} in \code{\link[stats]{rgamma}} is assumed
+ 1 for this family function.
+
+ If \eqn{rate} is unknown use the family function
+ \code{\link{gamma2.ab}} to estimate it too.
+
+}
+
+\seealso{
+ \code{\link{gamma2.ab}} for the 2-parameter gamma distribution.
+
+}
+\examples{
+y = rgamma(n=100, shape= exp(3))
+fit = vglm(y ~ 1, gamma1, trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
new file mode 100644
index 0000000..823173a
--- /dev/null
+++ b/man/gamma2.Rd
@@ -0,0 +1,164 @@
+\name{gamma2}
+\alias{gamma2}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ 2-parameter Gamma Distribution }
+\description{
+ Estimates the 2-parameter gamma distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+gamma2(lmu = "loge", lshape = "loge",
+ method.init = 1, deviance.arg = FALSE,
+ ishape = NULL, zero = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, lshape}{
+ Link functions applied to the (positive) \emph{mu} and \emph{shape}
+ parameters (called \eqn{\mu}{mu} and \eqn{\lambda}{shape} respectively).
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ishape}{
+ Optional initial value for \emph{shape}.
+ A \code{NULL} means a value is computed internally.
+ If a failure to converge occurs, try using this argument.
+ This argument is ignored if used within \code{\link{cqo}}; see the
+ \code{iShape} argument of \code{\link{qrrvglm.control}} instead.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method for the \eqn{\mu}{mu} parameter.
+ If failure to converge occurs
+ try another value (and/or specify a value for \code{ishape}).
+
+ }
+ \item{deviance.arg}{
+ Logical. If \code{TRUE}, the deviance function
+ is attached to the object. Under ordinary circumstances, it should
+ be left alone because it really assumes the shape parameter is at
+ the maximum likelihood estimate. Consequently, one cannot use that
+ criterion to minimize within the IRLS algorithm.
+ It should be set \code{TRUE} only when used with \code{\link{cqo}}
+ under the fast algorithm.
+
+ }
+ \item{zero}{
+% An integer specifying which
+% linear/additive predictor is to be modelled as an intercept only.
+% If assigned, the single value should be either 1 or 2 or \code{NULL}.
+% The default is to model \eqn{shape} as an intercept only.
+% A value \code{NULL} means neither 1 or 2.
+
+ Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if
+ used at all. Specifies which of the two linear/additive predictors
+ are modelled as an intercept only. By default, the shape parameter
+ (after \code{lshape} is applied) is modelled as a single unknown
+ number that is estimated. It can be modelled as a function of
+ the explanatory variables by setting \code{zero=NULL}. A negative
+ value means that the value is recycled, so setting \eqn{-2} means
+ all shape parameters are intercept only.
+
+ }
+}
+\details{
+ This distribution can model continuous skewed responses.
+ The density function is given by
+ \deqn{f(y;\mu,\lambda) = \frac{\exp(-\lambda y / \mu) \times
+ (\lambda y / \mu)^{\lambda-1}
+ \times \lambda}{
+ \mu \times \Gamma(\lambda)}}{%
+ f(y;mu,shape) = exp(-shape * y / mu) y^(shape-1) shape^(shape) /
+ [mu^(shape) * gamma(shape)]}
+ for
+ \eqn{\mu > 0}{mu > 0},
+ \eqn{\lambda > 0}{shape > 0}
+ and \eqn{y > 0}.
+ Here,
+ \eqn{\Gamma(\cdot)}{gamma()} is the gamma
+ function, as in \code{\link[base:Special]{gamma}}.
+ The mean of \emph{Y} is \eqn{\mu=\mu}{mu=mu} (returned as the fitted
+ values) with variance \eqn{\sigma^2 = \mu^2 / \lambda}{sigma^2 =
+ mu^2 / shape}. If \eqn{0<\lambda<1}{0<shape<1} then the density has a
+ pole at the origin and decreases monotonically as \eqn{y} increases.
+ If \eqn{\lambda=1}{shape=1} then this corresponds to the exponential
+ distribution. If \eqn{\lambda>1}{shape>1} then the density is zero at the
+ origin and is unimodal with mode at \eqn{y = \mu - \mu / \lambda}{y =
+ mu - mu / shape}; this can be achieved with \code{lshape="loglog"}.
+
+ By default, the two linear/additive predictors are
+ \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
+ \eqn{\eta_2=\log(\lambda)}{eta2=log(shape)}.
+ This family function implements Fisher scoring and the working
+ weight matrices are diagonal.
+
+ This \pkg{VGAM} family function handles \emph{multivariate} responses,
+ so that a matrix can be used as the response. The number of columns is
+ the number of species, say, and \code{zero=-2} means that \emph{all}
+ species have a shape parameter equalling a (different) intercept only.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ The parameterization of this \pkg{VGAM} family function is the
+ 2-parameter gamma distribution described in the monograph
+
+McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+}
+\author{ T. W. Yee }
+\note{
+ The response must be strictly positive.
+ A moment estimator for the shape parameter may be implemented in
+ the future.
+
+ If \code{mu} and \code{shape} are vectors, then \code{rgamma(n=n,
+ shape=shape, scale=mu/shape)} will generate random gamma variates of this
+ parameterization, etc.;
+ see \code{\link[stats]{GammaDist}}.
+
+ For \code{\link{cqo}} and \code{\link{cao}}, taking the logarithm
+ of the response means (approximately) a \code{\link{gaussianff}} family
+ may be used on the transformed data.
+
+}
+
+\seealso{
+ \code{\link{gamma1}} for the 1-parameter gamma distribution,
+ \code{\link{gamma2.ab}} for another parameterization of
+ the 2-parameter gamma distribution,
+ \code{\link{mckaygamma2}} for \emph{a} bivariate gamma distribution,
+ \code{\link{expexp}},
+ \code{\link[stats]{GammaDist}},
+ \code{\link{golf}}.
+
+}
+\examples{
+# Essentially a 1-parameter gamma
+y = rgamma(n <- 100, shape= exp(1))
+fit1 = vglm(y ~ 1, gamma1, trace=TRUE, crit="c")
+fit2 = vglm(y ~ 1, gamma2, trace=TRUE, crit="c")
+coef(fit1, matrix=TRUE)
+Coef(fit1)
+coef(fit2, matrix=TRUE)
+Coef(fit2)
+
+
+# Essentially a 2-parameter gamma
+y = rgamma(n <- 500, rate=exp(1), shape=exp(2))
+fit2 = vglm(y ~ 1, gamma2, trace=TRUE, crit="c")
+coef(fit2, matrix=TRUE)
+Coef(fit2)
+summary(fit2)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gamma2.ab.Rd b/man/gamma2.ab.Rd
new file mode 100644
index 0000000..b67229b
--- /dev/null
+++ b/man/gamma2.ab.Rd
@@ -0,0 +1,118 @@
+\name{gamma2.ab}
+\alias{gamma2.ab}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ 2-parameter Gamma Distribution }
+\description{ Estimates the 2-parameter gamma distribution
+ by maximum likelihood estimation.
+}
+\usage{
+gamma2.ab(lrate = "loge", lshape = "loge",
+ irate=NULL, ishape=NULL, expected = TRUE, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lrate, lshape}{
+ Link functions applied to the (positive) \emph{rate} and \emph{shape}
+ parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{expected}{
+ Logical. Use Fisher scoring? The default is yes, otherwise
+ Newton-Raphson is used.
+
+ }
+ \item{irate, ishape}{
+ Optional initial values for \emph{rate} and \emph{shape}.
+ A \code{NULL} means a value is computed internally.
+ If a failure to converge occurs, try using these arguments.
+
+ }
+ \item{zero}{
+ An integer specifying which
+ linear/additive predictor is to be modelled as an intercept only.
+ If assigned, the single value should be either 1 or 2 or \code{NULL}.
+ The default is to model \eqn{shape} as an intercept only.
+ A value \code{NULL} means neither 1 or 2.
+
+ }
+}
+\details{
+ The density function is given by
+ \deqn{f(y) = \exp(-rate \times y) \times y^{shape-1} \times rate^{shape} /
+ \Gamma(shape)}{%
+ f(y) = exp(-rate * y) y^(shape-1) rate^(shape) / gamma(shape)}
+ for \eqn{shape > 0}, \eqn{rate > 0} and \eqn{y > 0}.
+ Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
+ function, as in \code{\link[base:Special]{gamma}}.
+ The mean of \emph{Y} is \eqn{\mu=shape/rate}{mu=shape/rate}
+ (returned as the fitted values) with variance
+ \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 =
+ mu^2 /shape = shape/rate^2}.
+ By default, the two linear/additive predictors are
+ \eqn{\eta_1=\log(rate)}{eta1=log(rate)} and
+ \eqn{\eta_2=\log(shape)}{eta2=log(shape)}.
+
+ The argument \code{expected} refers to the type of information
+ matrix. The expected information matrix corresponds to Fisher scoring
+ and is numerically better here. The observed information matrix
+ corresponds to the Newton-Raphson algorithm and may be withdrawn
+ from the family function in the future. If both algorithms work then
+ the differences in the results are often not huge.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Most standard texts on statistical distributions describe
+ the 2-parameter gamma distribution, e.g.,
+
+ Evans, M., Hastings, N. and Peacock, B. (2000)
+ \emph{Statistical Distributions},
+ New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+\note{
+ The parameters \eqn{rate} and \eqn{shape} match with the arguments
+ \code{rate} and \code{shape} of \code{\link[stats]{rgamma}}. Often,
+ \eqn{scale=1/rate} is used.
+
+ If \eqn{rate=1} use the family function \code{\link{gamma1}} to
+ estimate \eqn{shape}.
+
+}
+
+\seealso{
+ \code{\link{gamma1}} for the 1-parameter gamma distribution,
+ \code{\link{gamma2}} for another parameterization of
+ the 2-parameter gamma distribution,
+ \code{\link{mckaygamma2}} for \emph{a} bivariate gamma distribution,
+ \code{\link{expexp}}.
+
+}
+\examples{
+# Essentially a 1-parameter gamma
+y = rgamma(n <- 100, shape= exp(1))
+fit1 = vglm(y ~ 1, gamma1, trace=TRUE, crit="c")
+fit2 = vglm(y ~ 1, gamma2.ab, trace=TRUE, crit="c")
+coef(fit1, matrix=TRUE)
+Coef(fit1)
+coef(fit2, matrix=TRUE)
+Coef(fit2)
+
+
+# Essentially a 2-parameter gamma
+y = rgamma(n <- 500, rate=exp(1), shape=exp(2))
+fit2 = vglm(y ~ 1, gamma2.ab, trace=TRUE, crit="c")
+coef(fit2, matrix=TRUE)
+Coef(fit2)
+summary(fit2)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gammahyp.Rd b/man/gammahyp.Rd
new file mode 100644
index 0000000..b62320c
--- /dev/null
+++ b/man/gammahyp.Rd
@@ -0,0 +1,85 @@
+\name{gammahyp}
+\alias{gammahyp}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gamma Hyperbola Bivariate Distribution }
+\description{
+ Estimate the parameter of a gamma hyperbola bivariate distribution
+ using maximum likelihood estimation.
+}
+\usage{
+gammahyp(ltheta="loge", itheta=NULL, expected=FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{ltheta}{
+ Link function applied to the (positive) parameter \eqn{\theta}{theta}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{itheta}{
+ Initial value for the parameter.
+ The default is to estimate it internally.
+
+ }
+ \item{expected}{
+ Logical. \code{FALSE} means the Newton-Raphson (using
+ the observed information matrix) algorithm, otherwise the expected
+ information matrix is used (Fisher scoring algorithm).
+
+ }
+}
+\details{
+ The joint probability density function is given by
+ \deqn{f(y_1,y_2) = \exp( -e^{-\theta} y_1 / \theta - \theta y_2 )}{%
+ f(y1,y2) = exp( -exp(-theta) * y1 / theta - theta * y2) }
+ for \eqn{\theta > 0}{theta > 0}, \eqn{y_1 > 0}{y1 > 0}, \eqn{y_2 > 1}{y2 > 1}.
+ The random variables \eqn{Y_1}{Y1} and \eqn{Y_2}{Y2} are independent.
+ The marginal distribution of \eqn{Y_1}{Y1} is an exponential distribution
+ with rate parameter \eqn{\exp(-\theta)/\theta}{exp(-theta)/theta}.
+ The marginal distribution of \eqn{Y_2}{Y2} is an exponential distribution
+ that has been shifted to the right by 1 and with
+ rate parameter \eqn{\theta}{theta}.
+ The fitted values are stored in a two-column matrix with the marginal
+ means, which are \eqn{\theta \exp(\theta)}{theta * exp(theta)} and
+ \eqn{1 + 1/\theta}{1 + 1/theta}.
+
+ The default algorithm is Newton-Raphson because Fisher scoring tends to
+ be much slower for this distribution.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+
+\references{
+
+Reid, N. (2003)
+Asymptotics and the theory of inference.
+\emph{Annals of Statistics},
+\bold{31}, 1695--1731.
+
+}
+\author{ T. W. Yee }
+\note{
+ The response must be a two column matrix.
+
+}
+\seealso{
+ \code{\link{exponential}}.
+}
+\examples{
+x = runif(n <- 1000)
+theta = exp(-2+x)
+y1 = rexp(n, rate=exp(-theta)/theta)
+y2 = 1 + rexp(n, rate=theta)
+fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp(expected=TRUE), trace=TRUE)
+fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp, trace=TRUE, crit="coef")
+coef(fit, matrix=TRUE)
+Coef(fit)
+fitted(fit)[1:4,]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/garma.Rd b/man/garma.Rd
new file mode 100644
index 0000000..7e78689
--- /dev/null
+++ b/man/garma.Rd
@@ -0,0 +1,178 @@
+\name{garma}
+\alias{garma}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{GARMA (Generalized Autoregressive Moving-Average) Models}
+\description{
+ Fits GARMA models to time series data.
+}
+\usage{
+garma(link = c("identity", "loge", "reciprocal",
+ "logit", "probit", "cloglog", "cauchit"),
+ p.ar.lag = 1, q.lag.ma = 0,
+ coefstart = NULL, step = 1, constant = 0.1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to the mean response.
+ By default, the first choice is used, which is suitable for
+ continuous responses.
+ The link \code{\link{loge}} should be chosen if the data are counts.
+ The links \code{\link{logit}}, \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}} are suitable for binary responses.
+
+ }
+\item{p.ar.lag}{
+ A positive integer,
+ the lag for the autoregressive component.
+ Called \eqn{p} below.
+
+ }
+\item{q.lag.ma}{
+ A non-negative integer,
+ the lag for the moving-average component.
+ Called \eqn{q} below.
+
+ }
+ \item{coefstart}{
+ Starting values for the coefficients.
+ For technical reasons, the
+ argument \code{coefstart} in \code{\link{vglm}} cannot be used.
+
+ }
+ \item{step}{
+ Numeric. Step length, e.g., \code{0.5} means half-stepsizing.
+
+ }
+ \item{constant}{
+ Used when the log or logit link is chosen.
+ For log, zero values are replaced by \code{constant}.
+ For logit, zero values are replaced by \code{constant} and
+ unit values replaced by \code{1-constant}.
+
+ }
+}
+\details{
+ This function draws heavily on Benjamin \emph{et al.} (1998).
+ See also Benjamin \emph{et al.} (2003).
+ GARMA models extend the ARMA time series model to generalized
+ responses in the exponential family, e.g., Poisson counts,
+ binary responses. Currently, this function can handle continuous,
+ count and binary responses only. The possible link functions
+ given in the \code{link} argument reflect this, and the user
+ must choose an appropriate link.
+
+ The GARMA(\eqn{p,q}) model is defined by firstly
+ having a response belonging to the exponential family
+ \deqn{f(y_t|D_t) = \exp
+ \left\{ \frac{y_t \theta_t - b(\theta_t)}{\phi / A_t} +
+ c(y_t, \phi / A_t)
+ \right\}}{%
+ f(y_t|D_t) = \exp
+ [ (y_t theta_t - b(theta_t)) / (phi / A_t) +
+ c(y_t, \phi / A_t)
+ ]
+ }
+ where \eqn{\theta_t}{theta_t} and \eqn{\phi}{phi} are the
+ canonical and scale parameters
+ respectively, and \eqn{A_t} are known prior weights.
+ The mean
+ \eqn{\mu_t=E(Y_t|D_t)=b'(\theta_t)}{mu_t=E(Y_t|D_t)=b'(theta_t)}
+ is related to
+ the linear predictor \eqn{\eta_t}{eta_t} by the link
+ function \eqn{g}.
+ Here,
+ \eqn{D_t=\{x_t,\ldots,x_1,y_{t-1},\ldots,y_1,\mu_{t-1},\ldots,\mu_1\}}{
+ D_t={x_t,\ldots,x_1,y_(t-1),\ldots,y_1,mu_(t-1),\ldots,mu_1}}
+ is the previous information set.
+ Secondly, the GARMA(\eqn{p,q}) model is defined by
+ \deqn{g(\mu_t) = \eta_t = x_t^T \beta +
+ \sum_{k=1}^p \phi_k (g(y_{t-k}) - x_{t-k}^T \beta) +
+ \sum_{k=1}^q \theta_k (g(y_{t-k}) - \eta_{t-k}).}{%
+ g(mu_t) = eta_t = x_t^T beta +
+ \sum_{k=1}^p phi_k (g(y_{t-k}) - x_{t-k}^T beta) +
+ \sum_{k=1}^q theta_k (g(y_{t-k}) - eta_{t-k}).}
+ Parameter vectors \eqn{\beta}{beta}, \eqn{\phi}{phi} and \eqn{\theta}{theta}
+ are estimated by maximum likelihood.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}.
+
+}
+\references{
+ Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (1998)
+ Fitting Non-Gaussian Time Series Models. Pages 191--196 in:
+ \emph{Proceedings in Computational Statistics COMPSTAT 1998} by
+ Payne, R. and P. J. Green. Physica-Verlag.
+
+ Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (2003)
+ Generalized Autoregressive Moving Average Models.
+ \emph{Journal of the American Statistical Association},
+ \bold{98}: 214--223.
+
+ Zeger, S. L. and Qaqish, B. (1988)
+ Markov regression models for time series: a quasi-likelihood approach.
+ \emph{Biometrics},
+ \bold{44}: 1019--1031.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ This function is unpolished and is requires lots
+ of improvements. In particular, initialization is quite poor,
+ and could be improved.
+ A limited amount of experience has shown that half-stepsizing is
+ often needed for convergence, therefore choosing \code{crit="coef"}
+ is not recommended.
+
+ Overdispersion is not handled.
+
+}
+\section{Warning}{
+ This \pkg{VGAM} family function is `non-standard' in that the model does need
+ some coercing to get it into the VGLM framework.
+ Special code is required to get it running.
+ A consequence is that some methods functions may give wrong results
+ when applied to the fitted object.
+
+}
+\seealso{
+% \code{\link{identity}},
+% \code{\link{logit}}.
+
+ The site \url{http://www.stat.auckland.ac.nz/~yee} contains
+ more documentation about this family function.
+
+}
+
+\examples{
+# See Zeger and Qaqish (1988)
+interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
+59, 73, 6, 44, 72, 66, 59, 60, 39, 52,
+50, 29, 30, 56, 76, 55, 73, 104, 104, 52,
+25, 33, 20, 60, 47, 6, 47, 22, 35, 30,
+29, 58, 24, 34, 36, 34, 6, 19, 28, 16,
+36, 33, 12, 26, 36, 39, 24, 14, 28, 13,
+ 2, 30, 18, 17, 28, 9, 28, 20, 17, 12,
+19, 18, 14, 23, 18, 22, 18, 19, 26, 27,
+23, 24, 35, 22, 29, 28, 17, 30, 34, 17,
+20, 49, 29, 35, 49, 25, 55, 42, 29, 16)
+spikenum = seq(interspike)
+fit = vglm(interspike ~ 1, garma("loge",p=2,coef=c(4,.3,.4)), tra=TRUE)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit) # A bug here
+\dontrun{
+plot(interspike, ylim=c(0,120), las=1, font=1, xlab="Spike Number",
+ ylab="Inter-Spike Time (ms)", col="blue")
+lines(spikenum[-(1:fit at misc$plag)], fitted(fit), col="green")
+abline(h=mean(interspike), lty=2)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
new file mode 100644
index 0000000..d18a74d
--- /dev/null
+++ b/man/gaussianff.Rd
@@ -0,0 +1,144 @@
+\name{gaussianff}
+%\alias{gaussian}
+\alias{gaussianff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gaussian (normal) Family Function }
+\description{
+ Fits a generalized linear model to a response with Gaussian (normal)
+ errors. The dispersion parameter may be known or unknown.
+
+}
+\usage{
+gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{parallel}{
+ A logical or formula. If a formula, the response of the formula should
+ be a logical and the terms of the formula indicates whether or not
+ those terms are parallel.
+
+ }
+ \item{dispersion}{
+ Dispersion parameter.
+ If 0 then it is estimated and the moment estimate is put in
+ \code{object at misc$dispersion}; it is assigned the value
+ \deqn{ \sum_{i=1}^n \; (y_i - \eta_i)^T W_i (y_i - \eta_i) / (nM-p) }{%
+ sum_{i=1}^n (y_i - eta_i)^T W_i (y_i - \eta_i) / (nM-p) }
+ where \eqn{p} is the total number of parameters estimated
+ (for RR-VGLMs the value used is the number of columns in the large
+ \eqn{X} model matrix; this may not be correct).
+ If the argument is assigned a positive quantity then it is assumed to
+ be known with that value.
+
+% zz 28/8/06 check for RR-VGLMs
+
+% By default, maximum likelihood is used to
+% By default, maximum likelihood is used to
+% estimate the model because it is known. However, the user can specify
+% \code{dispersion = 0} to have it estimated.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\} where \eqn{M} is the number of columns of the
+ matrix response.
+
+ }
+}
+\details{
+ This function is usually used in conjunction with \code{\link{vglm}}, else
+ \code{vlm} is recommended instead.
+ The notation \eqn{M} is used to denote the number of
+ linear/additive predictors.
+ This function can handle any finite \eqn{M}, and the default is to
+ use ordinary least squares.
+ A vector linear/additive model can be fitted by minimizing
+ \deqn{ \sum_{i=1}^n \; (y_i - \eta_i)^T W_i (y_i - \eta_i) }{%
+ sum_{i=1}^n (y_i - eta_i)^T W_i (y_i - \eta_i) }
+ where \eqn{y_i} is a \eqn{M}-vector,
+ \eqn{\eta_i}{eta_i} is the vector of linear/additive predictors.
+ The \eqn{W_i} is any positive-definite matrix, and the default is the
+ order-\eqn{M} identity matrix.
+ The \eqn{W_i} can be inputted using the \code{weights} argument of
+ \code{vlm}/\code{\link{vglm}}/\code{\link{vgam}} etc., and the
+ format is the \emph{matrix-band} format whereby it is a \eqn{n \times
+ A}{n * A} matrix with the diagonals are passed first, followed by next
+ the upper band, all the way to the \eqn{(1,M)} element. Here, \eqn{A}
+ has maximum value of \eqn{M(M+1)/2} and a minimum value of \eqn{M}.
+ Usually the \code{weights} argument of
+ \code{vlm}/\code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}}
+ is just a vector,
+ in which case each element is multiplied by a order-\eqn{M}
+ identity matrix.
+ If in doubt, type something like \code{weights(object, type="working")}
+ after the model has been fitted.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+ Yee, T. W. and Wild, C. J. (1996)
+ Vector generalized additive models.
+ \emph{Journal of the Royal Statistical Society, Series B, Methodological},
+ \bold{58}, 481--493.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ This \pkg{VGAM} family function is supposed to be similar to
+ \code{\link[stats]{gaussian}} but is is not compatible with
+ \code{\link[stats]{glm}}.
+ The \code{"ff"} in the name is added to avoid any masking problems.
+
+}
+
+% \section{Warning }{
+% This function probably contains some bugs, so the user is advised to
+% be cautious.
+%
+%
+
+\seealso{
+ \code{\link{normal1}},
+ \code{vlm},
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}}.
+
+}
+
+\examples{
+d = data.frame(x = sort(runif(n <- 40)))
+d = transform(d, y1 = 1 + 2*x + rnorm(n, sd=0.1),
+ y2 = 3 + 4*x + rnorm(n, sd=0.1),
+ y3 = 7 + 4*x + rnorm(n, sd=0.1))
+fit = vglm(cbind(y1,y2) ~ x, gaussianff, data=d)
+coef(fit, matrix=TRUE)
+
+# For comparison:
+coef( lmfit <- lm(y1 ~ x, data=d))
+coef(glmfit <- glm(y2 ~ x, data=d, gaussian))
+vcov(fit)
+vcov(lmfit)
+
+t(weights(fit, type="prior")) # Unweighted observations
+weights(fit, type="working")[1:4,] # Identity matrices
+
+# Reduced-rank VLM (rank-1)
+fit2 = rrvglm(cbind(y1,y2,y3) ~ x, gaussianff, data=d)
+Coef(fit2)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
new file mode 100644
index 0000000..5629bd4
--- /dev/null
+++ b/man/genbetaII.Rd
@@ -0,0 +1,123 @@
+\name{genbetaII}
+\alias{genbetaII}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Beta Distribution of the Second Kind }
+\description{
+ Maximum likelihood estimation of the 4-parameter
+ generalized beta II distribution.
+}
+\usage{
+genbetaII(link.a = "loge", link.scale = "loge",
+ link.p = "loge", link.q = "loge",
+ init.a = NULL, init.scale = NULL, init.p = 1, init.q = 1,
+ zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale, link.p, link.q}{
+ Parameter link functions applied to the
+ shape parameter \code{a},
+ scale parameter \code{scale},
+ shape parameter \code{p}, and
+ shape parameter \code{q}.
+ All four parameters are positive.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale}{
+ Optional initial values for \code{a} and \code{scale}.
+ A \code{NULL} means a value is computed internally.
+
+ }
+ \item{init.p, init.q}{
+ Optional initial values for \code{p} and \code{q}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2,3,4\} which correspond to
+ \code{a}, \code{scale}, \code{p}, \code{q}, respectively.
+
+ }
+}
+\details{
+ This distribution is most useful for unifying a substantial
+ number of size distributions. For example, the Singh-Maddala,
+ Dagum, Fisk (log-logistic), Lomax (Pareto type II),
+ inverse Lomax, beta distribution of the second kind
+ distributions are all special cases.
+ Full details can be found in Kleiber and Kotz (2003), and
+ Brazauskas (2002).
+
+The 4-parameter generalized beta II distribution has density
+ \deqn{f(y) = a y^{ap-1} / [b^{ap} B(p,q) \{1 + (y/b)^a\}^{p+q}]}{%
+ f(y) = a y^(ap-1) / [b^(ap) B(p,q) (1 + (y/b)^a)^(p+q)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y > 0}.
+Here \eqn{B} is the beta function, and
+\eqn{b} is the scale parameter \code{scale},
+while the others are shape parameters.
+The mean is
+ \deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(q - 1/a) / (\Gamma(p) \, \Gamma(q))}{%
+ E(Y) = b gamma(p + 1/a) gamma(q - 1/a) / ( gamma(p) gamma(q))}
+provided \eqn{-ap < 1 < aq}.
+
+%The distribution is motivated by the incomplete beta function
+%\eqn{B_y(p,q)} which is the integral from 0 to \eqn{y} of the integrand
+%\eqn{u^{p-1} (1-u)^{q-1}}{u^(p-1) (1-u)^(q-1)} where \eqn{y>0}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+Brazauskas, V. (2002)
+Fisher information matrix for the Feller-Pareto distribution.
+\emph{Statistics & Probability Letters},
+\bold{59}, 159--167.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+Successful convergence depends on having very
+good initial values. This is rather difficult for this distribution!
+More improvements could be made here.
+
+}
+
+\seealso{
+ \code{\link{lino}},
+ \code{\link{betaff}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{lomax}},
+ \code{\link{invlomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rsinmad(n=3000, 4, 6, 2) # Not very good data!
+fit = vglm(y ~ 1, genbetaII, trace=TRUE)
+fit = vglm(y ~ 1, genbetaII(init.p=1.0, init.a=4, init.sc=7, init.q=2.3),
+ trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
new file mode 100644
index 0000000..a265019
--- /dev/null
+++ b/man/genpoisson.Rd
@@ -0,0 +1,72 @@
+\name{genpoisson}
+\alias{genpoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Poisson distribution }
+\description{
+ Estimation of the two parameters of a generalized Poisson distribution.
+}
+\usage{
+genpoisson(llambda = "logit", ltheta = "loge",
+ ilambda = 0.5, itheta = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llambda}{
+ Parameter link function for \code{lambda}.
+ See \code{\link{Links}} for more choices.
+ The parameter lies in the unit interval, therefore the
+ logit link is a reasonable default.
+
+ }
+ \item{ltheta}{
+ Parameter link function for \code{theta}.
+ See \code{\link{Links}} for more choices.
+ The parameter is positive, therefore the default is the log link.
+
+ }
+ \item{ilambda}{ Optional initial value for \code{lambda}. }
+ \item{itheta}{ Optional initial value for \code{theta}. }
+ \item{zero}{ An integer vector, containing the value 1 or 2. If so,
+ \code{lambda} or \code{theta} respectively are modelled as an intercept only.
+ The default value \code{NULL} means both linear/additive predictors
+ are modelled as functions of the explanatory variables.
+}
+}
+\details{
+ See Consul (1989) for details.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Jorgensen, B. (1997)
+\emph{The Theory of Dispersion Models}.
+London: Chapman & Hall
+
+Consul, P. C. (1989)
+\emph{Generalized Poisson Distributions: Properties and Applications},
+Marcel Dekker.
+
+}
+\author{ T. W. Yee }
+\note{
+ Convergence problems may occur when \code{lambda} is very close to 0.
+ This distribution is useful for dispersion modelling.
+}
+\seealso{
+ \code{\link{poissonff}}.
+}
+
+\examples{
+y = rpois(n=100, lam=24)
+fit = vglm(y ~ 1, genpoisson, trace=TRUE)
+fitted(fit)[1:5]
+mean(y)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/geometric.Rd b/man/geometric.Rd
new file mode 100644
index 0000000..43e8dae
--- /dev/null
+++ b/man/geometric.Rd
@@ -0,0 +1,77 @@
+\name{geometric}
+\alias{geometric}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Geometric Distribution }
+\description{
+ Maximum likelihood estimation for the geometric distribution.
+}
+\usage{
+geometric(link = "logit", expected = TRUE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the
+ parameter \eqn{p}{prob}, which lies in the unit interval.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{expected}{
+ Logical.
+ Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
+
+ }
+}
+\details{
+ A random variable \eqn{Y} has a 1-parameter geometric distribution
+ if \eqn{P(Y=y) = p (1-p)^y}{P(Y=y) = prob * (1-prob)^y}
+ for \eqn{y=0,1,2,\ldots}{y=0,1,2,...}.
+ Here, \eqn{p}{prob} is the probability of success,
+ and \eqn{Y} is the number of (independent) trials that are fails
+ until a success occurs.
+ Thus the response \eqn{Y} should be a non-negative integer.
+ The mean of \eqn{Y} is \eqn{E(Y) = (1-p)/p}{E(Y) = (1-prob)/prob}
+ and its variance is \eqn{Var(Y) = (1-p)/p^2}{Var(Y) = (1-prob)/prob^2}.
+ The geometric distribution is a special case of the
+ negative binomial distribution (see \code{\link{negbinomial}}).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Evans, M., Hastings, N. and Peacock, B. (2000)
+ \emph{Statistical Distributions},
+ New York: Wiley-Interscience, Third edition.
+}
+
+\author{ T. W. Yee }
+%\note{
+%
+%}
+
+\seealso{
+ \code{\link{negbinomial}},
+ \code{\link[stats]{Geometric}},
+ \code{\link{betageometric}},
+ \code{\link{rbetageom}}.
+}
+\examples{
+x1 = runif(n <- 1000) - 0.5
+x2 = runif(n) - 0.5
+x3 = runif(n) - 0.5
+eta = 0.2 - 0.7 * x1 + 1.9 * x2
+prob = logit(eta, inverse=TRUE)
+y = rgeom(n, prob)
+table(y)
+fit = vglm(y ~ x1 + x2 + x3, geometric, trace=TRUE, crit="coef")
+coef(fit)
+coef(fit, mat=TRUE)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/get.smart.Rd b/man/get.smart.Rd
new file mode 100644
index 0000000..36a6106
--- /dev/null
+++ b/man/get.smart.Rd
@@ -0,0 +1,64 @@
+\name{get.smart}
+\alias{get.smart}
+\title{ Retrieve One Component of ``.smart.prediction'' }
+\description{
+Retrieve one component of the list \code{.smart.prediction} from
+\code{smartpredenv} (\R) or
+frame 1 (S-PLUS).
+}
+\usage{
+get.smart()
+}
+\value{
+Returns with one list component of \code{.smart.prediction} from
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS),
+in fact, \code{.smart.prediction[[.smart.prediction.counter]]}.
+The whole procedure mimics a first-in first-out stack (better known
+as a \emph{queue}).
+}
+\section{Side Effects}{
+The variable \code{.smart.prediction.counter} in
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS)
+is incremented beforehand, and then written back to
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS).
+}
+\details{
+ \code{get.smart} is used in \code{"read"} mode within a smart function:
+ it retrieves parameters saved at the time of fitting, and
+ is used for prediction.
+ \code{get.smart} is only used in smart functions such as
+ \code{\link[stats]{poly}};
+ \code{get.smart.prediction} is only used in modelling functions
+ such as \code{\link[stats]{lm}} and \code{\link[stats]{glm}}.
+ The function
+ \code{\link{get.smart}} gets only a part of \code{.smart.prediction} whereas
+ \code{\link{get.smart.prediction}} gets the entire \code{.smart.prediction}.
+}
+\seealso{
+\code{\link{get.smart.prediction}}.
+}
+\examples{
+"my1" <- function(x, minx=min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if(smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
+ if(smart.mode.is("write"))
+ put.smart(list(minx=minx))
+ sqrt(x-minx)
+}
+attr(my1, "smart") <- TRUE
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/get.smart.prediction.Rd b/man/get.smart.prediction.Rd
new file mode 100644
index 0000000..2735986
--- /dev/null
+++ b/man/get.smart.prediction.Rd
@@ -0,0 +1,44 @@
+\name{get.smart.prediction}
+\alias{get.smart.prediction}
+\title{ Retrieves ``.smart.prediction'' }
+\description{
+Retrieves \code{.smart.prediction} from
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS).
+}
+\usage{
+get.smart.prediction()
+}
+\value{
+Returns with the list \code{.smart.prediction} from
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS).
+}
+\details{
+A smart modelling function such as \code{\link[stats]{lm}} allows
+smart functions such as \code{\link[splines]{bs}}
+to write to
+a data structure called \code{.smart.prediction} in
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS).
+At the end of fitting,
+\code{get.smart.prediction} retrieves this data structure.
+It is then attached to the object, and used for prediction later.
+}
+\seealso{
+ \code{\link{get.smart}},
+ \code{\link[stats]{lm}}.
+}
+\examples{
+\dontrun{# Put at the end of lm
+fit$smart <- get.smart.prediction() }
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/gev.Rd b/man/gev.Rd
new file mode 100644
index 0000000..630d13b
--- /dev/null
+++ b/man/gev.Rd
@@ -0,0 +1,274 @@
+\name{gev}
+\alias{gev}
+\alias{egev}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Extreme Value Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the
+ 3-parameter generalized extreme value (GEV) distribution.
+
+}
+\usage{
+gev(llocation = "identity", lscale = "loge", lshape = "logoff",
+ elocation = list(), escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
+ percentiles = c(95, 99),
+ iscale=NULL, ishape = NULL,
+ method.init = 1, gshape=c(-0.45, 0.45), tshape0=0.001, zero = 3)
+egev(llocation = "identity", lscale = "loge", lshape = "logoff",
+ elocation = list(), escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else list(),
+ percentiles = c(95, 99),
+ iscale=NULL, ishape = NULL,
+ method.init=1, gshape=c(-0.45, 0.45), tshape0=0.001, zero = 3)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation, lscale, lshape}{
+ Parameter link function for \eqn{\mu}{mu}, \eqn{\sigma}{sigma} and
+ \eqn{\xi}{xi}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{elocation, escale, eshape}{
+ Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+ For the shape parameter,
+ if the \code{\link{logoff}} link is chosen then the offset is
+ called \eqn{A} below; and then the linear/additive predictor is
+ \eqn{\log(\xi+A)}{log(xi+A)} which means that
+ \eqn{\xi > -A}{xi > -A}.
+ For technical reasons (see \bold{Details}) it is a good idea for \eqn{A=0.5}.
+
+ }
+
+% \item{Offset}{
+% Numeric, of length 1.
+% Called \eqn{A} below.
+% Offset value if \code{lshape="logoff"}.
+% Then the linear/additive predictor is
+% \eqn{\log(\xi+A)}{log(xi+A)} which means that
+% \eqn{\xi > -A}{xi > -A}.
+% For technical reasons (see \bold{Details}) it is a good idea for
+% \code{Offset=0.5}.
+
+% }
+ \item{percentiles}{
+ Numeric vector of percentiles used
+ for the fitted values. Values should be between 0 and 100.
+ However, if \code{percentiles=NULL}, then the mean
+ \eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi}
+ is returned, and this is only defined if \eqn{\xi<1}{xi<1}.
+
+ }
+ \item{iscale, ishape}{
+ Numeric. Initial value for \eqn{\sigma}{sigma} and
+ \eqn{\xi}{xi}. A \code{NULL} means a value is computed internally.
+ The argument \code{ishape} is more important than the other two because
+ they are initialized from the initial \eqn{\xi}{xi}.
+ If a failure to converge occurs, or even to obtain initial values occurs,
+ try assigning \code{ishape} some value.
+ Also, in general, a larger value of \code{iscale} is better than a
+ smaller value.
+
+ }
+% \item{rshape}{
+% Numeric, of length 2.
+% Range of \eqn{\xi}{xi} if \code{lshape="elogit"} is chosen.
+% The rationale for the default values is given below.
+
+% }
+% \item{mean}{
+% Logical. If \code{TRUE}, the mean is computed and returned
+% as the fitted values. This argument overrides the
+% \code{percentiles} argument.
+% See \bold{Details} for more details.
+
+% }
+ \item{method.init}{
+ Initialization method. Either the value 1 or 2.
+ Method 1 involves choosing the best \eqn{\xi}{xi} on a course grid with
+ endpoints \code{gshape}.
+ Method 2 is similar to the method of moments.
+ If both methods fail try using \code{ishape}.
+
+ }
+ \item{gshape}{
+ Numeric, of length 2.
+ Range of \eqn{\xi}{xi} used for a grid search for a good initial value
+ for \eqn{\xi}{xi}.
+ Used only if \code{method.init} equals 1.
+
+ }
+ \item{tshape0}{
+ Positive numeric.
+ Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
+ If the absolute value of the estimate of \eqn{\xi}{xi} is less than
+ this value then it will be assumed zero and Gumbel derivatives etc. will
+ be used.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\} corresponding
+ respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi}.
+ If \code{zero=NULL} then all linear/additive predictors are modelled as
+ a linear combination of the explanatory variables.
+ For many data sets having \code{zero=3} is a good idea.
+
+ }
+}
+\details{
+ The GEV distribution function can be written
+ \deqn{G(y) = \exp( -[ (y-\mu)/ \sigma ]_{+}^{- 1/ \xi}) }{%
+ G(y) = exp( -[ (y- mu)/ sigma ]_{+}^{- 1/ xi}) }
+ where \eqn{\sigma > 0}{sigma > 0},
+ \eqn{-\infty < \mu < \infty}{-Inf < mu < Inf},
+ and \eqn{1 + \xi(y-\mu)/\sigma > 0}{1 + xi*(y-mu)/sigma > 0}.
+ Here, \eqn{x_+ = \max(x,0)}{x_+ = max(x,0)}.
+ The \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi} are known as the
+ \emph{location}, \emph{scale} and \emph{shape} parameters respectively.
+ The cases
+ \eqn{\xi>0}{xi>0},
+ \eqn{\xi<0}{xi<0},
+ \eqn{\xi=0}{xi=0}
+ correspond to the Frechet, Weibull, and Gumbel types respectively.
+ It can be noted that the Gumbel (or Type I) distribution accommodates
+ many commonly-used distributions such as the normal, lognormal,
+ logistic, gamma, exponential and Weibull.
+
+ For the GEV distribution, the \eqn{k}th moment about the mean exists
+ if \eqn{\xi < 1/k}{xi < 1/k}.
+ Provided they exist, the mean and variance are given by
+ \eqn{\mu+\sigma\{ \Gamma(1-\xi)-1\}/ \xi}{mu + sigma \{ Gamma(1-xi)-1\} / xi}
+ and
+ \eqn{\sigma^2 \{ \Gamma(1-2\xi) - \Gamma^2(1-\xi) \} / \xi^2}{sigma^2
+ \{ Gamma(1-2 xi) - Gamma^2 (1- xi) \} / xi^2}
+ respectively,
+ where \eqn{\Gamma}{Gamma} is the gamma function.
+
+ Smith (1985) established that when \eqn{\xi > -0.5}{xi > -0.5},
+ the maximum likelihood estimators are completely regular.
+ To have some control over the estimated \eqn{\xi}{xi} try
+ using \code{lshape="logoff"} and the \code{eshape=list(offset=0.5)}, say,
+ or \code{lshape="elogit"} and \code{eshape=list(min=-0.5, max=0.5)}, say.
+
+% and when \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5} they exist but are
+% non-regular; and when \eqn{\xi < -1}{xi < -1} then the maximum
+% likelihood estimators do not exist. In most environmental data
+% sets \eqn{\xi > -1}{xi > -1} so maximum likelihood works fine.
+
+}
+\section{Warning }{
+ Currently, if an estimate of \eqn{\xi}{xi} is too close to zero then
+ an error will occur for \code{gev()} with multivariate responses.
+ In general, \code{egev()} is more reliable than \code{gev()}.
+
+ Fitting the GEV by maximum likelihood estimation can be numerically
+ fraught. If \eqn{1 + \xi (y-\mu)/ \sigma \leq 0}{1 + xi*(y-mu)/sigma <=
+ 0} then some crude evasive action is taken but the estimation process
+ can still fail. This is particularly the case if \code{\link{vgam}}
+ with \code{\link{s}} is used. Then smoothing is best done with
+ \code{\link{vglm}} with regression splines (\code{\link[splines]{bs}}
+ or \code{\link[splines]{ns}}) because \code{\link{vglm}} implements
+ half-stepsizing whereas \code{\link{vgam}} doesn't. Half-stepsizing
+ helps handle the problem of straying outside the parameter space.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+ Tawn, J. A. (1988)
+ An extreme-value theory model for dependent observations.
+ \emph{Journal of Hydrology}, \bold{101}, 227--250.
+
+ Prescott, P. and Walden, A. T. (1980)
+ Maximum likelihood estimation of the parameters of the
+ generalized extreme-value distribution.
+ \emph{Biometrika}, \bold{67}, 723--724.
+
+ Smith, R. L. (1985)
+ Maximum likelihood estimation in a class of nonregular cases.
+ \emph{Biometrika}, \bold{72}, 67--90.
+
+}
+\author{ T. W. Yee }
+
+\note{
+ The \pkg{VGAM} family function \code{gev} can handle a multivariate
+ (matrix) response. If so, each row of the matrix is sorted into
+ descending order. With a vector or one-column matrix response using
+ \code{egev} will give the same result but be faster and it handles
+ the \eqn{\xi=0}{xi=0} case.
+ The function \code{gev} implements Tawn (1988) while
+ \code{egev} implements Prescott and Walden (1980).
+
+ The shape parameter \eqn{\xi}{xi} is difficult to estimate
+ accurately unless there is a lot of data.
+ Convergence is slow when \eqn{\xi}{xi} is near \eqn{-0.5}.
+ Given many explanatory variables, it is often a good idea
+ to make sure \code{zero=3}.
+ The range restrictions of the parameter \eqn{\xi}{xi} are not
+ enforced; thus it is possible for a violation to occur.
+
+ Successful convergence often depends on having a reasonably good initial
+ value for \eqn{\xi}{xi}. If failure occurs try various values for the
+ argument \code{ishape}, and if there are covariates, setting \code{zero=3}
+ is advised.
+
+}
+
+\seealso{
+%\code{\link{egev}},
+%\code{\link{ogev}},
+\code{\link{rgev}},
+\code{\link{gumbel}},
+\code{\link{egumbel}},
+\code{\link{guplot}},
+\code{\link{rlplot.egev}},
+\code{\link{gpd}},
+\code{\link{elogit}},
+\code{\link{oxtemp}},
+\code{\link{venice}}.
+}
+
+\examples{
+# Multivariate example
+data(venice)
+y = as.matrix(venice[,paste("r", 1:10, sep="")])
+fit1 = vgam(y[,1:2] ~ s(year, df=3), gev(zero=2:3), venice, trace=TRUE)
+coef(fit1, matrix=TRUE)
+fitted(fit1)[1:4,]
+\dontrun{
+par(mfrow=c(1,2), las=1)
+plot(fit1, se=TRUE, lcol="blue", scol="forestgreen",
+ main="Fitted mu(year) function (centered)")
+attach(venice)
+matplot(year, y[,1:2], ylab="Sea level (cm)", col=1:2,
+ main="Highest 2 annual sealevels & fitted 95 percentile")
+lines(year, fitted(fit1)[,1], lty="dashed", col="blue")
+detach(venice)
+}
+
+
+# Univariate example
+data(oxtemp)
+(fit = vglm(maxtemp ~ 1, egev, data=oxtemp, trace=TRUE))
+fitted(fit)[1:3,]
+coef(fit, mat=TRUE)
+Coef(fit)
+vcov(fit)
+vcov(fit, untransform=TRUE)
+sqrt(diag(vcov(fit))) # Approximate standard errors
+\dontrun{ rlplot(fit) }
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
new file mode 100644
index 0000000..8cfaff3
--- /dev/null
+++ b/man/gevUC.Rd
@@ -0,0 +1,79 @@
+\name{gevUC}
+\alias{gevUC}
+\alias{dgev}
+\alias{pgev}
+\alias{qgev}
+\alias{rgev}
+\title{The Generalized Extreme Value Distribution }
+\description{
+ Density, distribution function, quantile function and random
+ generation for the generalized extreme value distribution (GEV) with
+ location parameter \code{location},
+ scale parameter \code{scale} and
+ shape parameter \code{shape}.
+}
+\usage{
+dgev(x, location=0, scale=1, shape=0)
+pgev(q, location=0, scale=1, shape=0)
+qgev(p, location=0, scale=1, shape=0)
+rgev(n, location=0, scale=1, shape=0)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{location}{the location parameter \eqn{\mu}{mu}.}
+ \item{scale}{the scale parameter \eqn{\sigma}{sigma}.
+ Must consist of positive values. }
+ \item{shape}{the shape parameter \eqn{\xi}{xi}.}
+}
+\value{
+ \code{dgev} gives the density,
+ \code{pgev} gives the distribution function,
+ \code{qgev} gives the quantile function, and
+ \code{rgev} generates random deviates.
+}
+\references{
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{gev}}, the \pkg{VGAM} family function
+ for estimating the two parameters by maximum likelihood estimation,
+ for formulae and other details.
+ Apart from \code{n}, all the above arguments may be vectors and
+ are recyled to the appropriate length if necessary.
+}
+\note{
+ The default value of \eqn{\xi=0}{xi=0} means the default distribution
+ is the Gumbel.
+
+ Currently, these functions have different argument names compared with
+ those in the \pkg{evd} package.
+
+}
+\seealso{
+ \code{\link{gev}}.
+}
+\examples{
+\dontrun{
+x = seq(-3, 3, by=0.01)
+loc = 0; sigma = 1; xi = -0.4
+plot(x, dgev(x, loc, sigma, xi), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", ylab="", las=1)
+abline(h=0, col="blue", lty=2)
+lines(qgev(seq(0.05,0.95,by=0.05), loc, sigma, xi),
+ dgev(qgev(seq(0.05,0.95,by=0.05), loc, sigma, xi), loc, sigma, xi),
+ col="purple", lty=3, type="h")
+lines(x, pgev(x, loc, sigma, xi), type="l", col="red")
+abline(h=0, lty=2)
+
+pgev(qgev(seq(0.05,0.95,by=0.05), loc, sigma, xi), loc, sigma, xi)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/gew.Rd b/man/gew.Rd
new file mode 100644
index 0000000..ba593b3
--- /dev/null
+++ b/man/gew.Rd
@@ -0,0 +1,40 @@
+\name{gew}
+\alias{gew}
+\docType{data}
+\title{ General Electric and Westinghouse Data }
+\description{
+ General Electric and Westinghouse capital data.
+}
+\usage{data(gew)}
+\format{
+ A data frame with 20 observations on the following 6 variables.
+ \describe{
+ \item{y1}{a numeric vector which may be regarded as investment
+ figures for the two companies}
+ \item{x1}{market values}
+ \item{x2}{capital stocks}
+ \item{y2}{a numeric vector which may be regarded as investment
+ figures for the two companies}
+ \item{x3}{market values}
+ \item{x4}{capital stocks}
+ }
+}
+\details{
+ The period is 1934 to 1953.
+}
+\source{
+ Unknown.
+}
+\references{
+Zellner, A. (1962)
+An efficient method of estimating seemingly unrelated regressions
+and tests for aggregation bias.
+\emph{Journal of the American Statistical Association},
+\bold{57}, 348--368.
+
+}
+\examples{
+data(gew)
+str(gew)
+}
+\keyword{datasets}
diff --git a/man/ggamma.Rd b/man/ggamma.Rd
new file mode 100644
index 0000000..79c8c81
--- /dev/null
+++ b/man/ggamma.Rd
@@ -0,0 +1,126 @@
+\name{ggamma}
+\alias{ggamma}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Gamma distribution family function }
+\description{
+ Estimation of the 3-parameter generalized gamma distribution proposed by
+ Stacy (1962).
+
+}
+\usage{
+ggamma(lscale="loge", ld="loge", lk="loge",
+ iscale=NULL, id=NULL, ik=NULL, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lscale, ld, lk}{
+ Parameter link function applied to each of the positive parameters
+ \eqn{b}, \eqn{d} and \eqn{k}, respectively.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iscale, id, ik}{
+ Initial value for \eqn{b}, \eqn{d} and \eqn{k}, respectively.
+ The defaults mean an initial value is determined internally for each.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+}
+\details{
+ The probability density function can be written
+ \deqn{f(y;b,d,k) = d b^{-d k} y^{d k-1} \exp[-(y/b)^d] / \Gamma(k)}{%
+ f(y;b,d,k) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k)}
+ for scale parameter \eqn{b > 0}, and \eqn{d > 0}, \eqn{k > 0},
+ and \eqn{y > 0}.
+ The mean of \eqn{Y} is \eqn{bk}{b*k} (returned as the fitted values).
+
+There are many special cases, as given in Table 1 of Stacey and Mihram (1965).
+In the following, the parameters are in the order \eqn{b,d,k}.
+The special cases are:
+Exponential \eqn{f(y;b,1,1)},
+Gamma \eqn{f(y;b,1,k)},
+Weibull \eqn{f(y;b,d,1)},
+Chi Squared \eqn{f(y;2,1,a/2)} with \eqn{a} degrees of freedom,
+Chi \eqn{f(y;\sqrt{2},2,a/2)}{f(y;sqrt(2),2,a/2)} with \eqn{a} degrees of freedom,
+Half-normal \eqn{f(y;\sqrt{2},2,1/2)}{f(y;sqrt(2),2,1/2)},
+Circular normal \eqn{f(y;\sqrt{2},2,1)}{f(y;sqrt(2),2,1)},
+Spherical normal \eqn{f(y;\sqrt{2},2,3/2)}{f(y;sqrt(2),2,3/2)},
+Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Stacy, E. W. (1962)
+ A generalization of the gamma distribution.
+ \emph{Annals of Mathematical Statistics}, \bold{33}, 1187--1192.
+
+ Stacy, E. W. and Mihram, G. A. (1965)
+ Parameter estimation for a generalized gamma distribution.
+ \emph{Technometrics}, \bold{7}, 349--358.
+
+ Prentice, R. L. (1974)
+ A log gamma model and its maximum likelihood estimation.
+ \emph{Biometrika}, \bold{61}, 539--544.
+
+}
+\section{Warning }{
+ Several authors have considered maximum likelihood estimation for the
+ generalized gamma distribution and have found that the Newton-Raphson
+ algorithm does not work very well and that the existence of solutions
+ to the log-likelihood equations is sometimes in doubt.
+ Although Fisher scoring is used here, it is likely that the same
+ problems will be encountered. It appears that large samples
+ are required, for example, the estimator of \eqn{k} became asymptotically
+ normal only with 400 or more observations.
+ It is not uncommon for maximum likelihood estimates to fail to converge
+ even with two or three hundred observations.
+ With covariates, even more observations are needed to increase the
+ chances of convergence.
+
+}
+\author{ T. W. Yee }
+\note{
+ The notation used here differs from Stacy (1962) and Prentice (1974).
+ Poor initial values may result in failure to converge so
+ if there are covariates and there are convergence problems,
+ try using the \code{zero} argument (e.g., \code{zero=2:3})
+ or the \code{ik} argument.
+
+}
+\seealso{
+ \code{\link{rggamma}},
+ \code{\link{gamma1}},
+ \code{\link{gamma2}},
+ \code{\link{prentice74}}.
+
+}
+\examples{
+k = exp(-1)
+Scale = exp(1)
+y = rgamma(n=1000, shape=k, scale=Scale)
+fit = vglm(y ~ 1, ggamma, trace=TRUE)
+coef(fit, matrix=TRUE)
+
+# Another example
+x = runif(n <- 5000)
+Scale = exp(1)
+d = exp(0 + 1.2*x)
+k = exp(-1 + 2*x)
+y = rggamma(n, scale=Scale, d=d, k=k)
+fit = vglm(y ~ x, ggamma(zero=1, iscal=6), trace=TRUE)
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/ggammaUC.Rd b/man/ggammaUC.Rd
new file mode 100644
index 0000000..9b44b7e
--- /dev/null
+++ b/man/ggammaUC.Rd
@@ -0,0 +1,68 @@
+\name{ggammaUC}
+\alias{ggammaUC}
+\alias{dggamma}
+\alias{pggamma}
+\alias{qggamma}
+\alias{rggamma}
+\title{The Generalized Gamma Distribution }
+\description{
+ Density, distribution function, quantile function and random
+ generation for the generalized gamma distribution with
+ scale parameter \code{scale},
+ and parameters \code{d} and \code{k}.
+}
+\usage{
+dggamma(x, scale=1, d=1, k=1)
+pggamma(q, scale=1, d=1, k=1)
+qggamma(p, scale=1, d=1, k=1)
+rggamma(n, scale=1, d=1, k=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{scale}{the (positive) scale parameter \eqn{b}.}
+ \item{d, k}{the (positive) parameters \eqn{d} and \eqn{k}.}
+}
+\value{
+ \code{dggamma} gives the density,
+ \code{pggamma} gives the distribution function,
+ \code{qggamma} gives the quantile function, and
+ \code{rggamma} generates random deviates.
+}
+\references{
+Stacy, E. W. and Mihram, G. A. (1965)
+Parameter estimation for a generalized gamma distribution.
+\emph{Technometrics}, \bold{7}, 349--358.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{ggamma}}, the \pkg{VGAM} family function
+ for estimating the generalized gamma distribution
+ by maximum likelihood estimation,
+ for formulae and other details.
+ Apart from \code{n}, all the above arguments may be vectors and
+ are recyled to the appropriate length if necessary.
+}
+% \note{
+% }
+\seealso{
+ \code{\link{ggamma}}.
+}
+\examples{
+\dontrun{
+x=seq(0, 14, by=0.01); d=1.5; Scale=2; k=6
+plot(x, dggamma(x, Scale, d, k), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
+abline(h=0, col="blue", lty=2)
+lines(qggamma(seq(0.05,0.95,by=0.05), Scale, d, k),
+ dggamma(qggamma(seq(0.05,0.95,by=0.05), Scale, d, k), Scale, d, k),
+ col="purple", lty=3, type="h")
+lines(x, pggamma(x, Scale, d, k), type="l", col="red")
+abline(h=0, lty=2)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/golf.Rd b/man/golf.Rd
new file mode 100644
index 0000000..a800d42
--- /dev/null
+++ b/man/golf.Rd
@@ -0,0 +1,176 @@
+\name{golf}
+\alias{golf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gamma-Ordinal Link Function }
+\description{
+ Computes the gamma-ordinal transformation, including its inverse
+ and the first two derivatives.
+
+}
+\usage{
+golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Extra argument for passing in additional information.
+ This must be list with component \code{lambda}.
+ Here, \code{lambda} is the shape parameter
+ in \code{\link{gamma2}}.
+ A component in the list called \code{cutpoint} is optional; if omitted
+ then \code{cutpoint} is ignored from the GOLF definition.
+ If \code{golf()} is used as the link function in
+ \code{\link{cumulative}} then, if the cutpoints are known, then
+ one should choose
+ \code{reverse=TRUE, parallel=TRUE, intercept.apply=TRUE}.
+ If the cutpoints are unknown, then choose
+ \code{reverse=TRUE, parallel=TRUE, intercept.apply=FALSE}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The gamma-ordinal link function (GOLF) can be applied to a
+ parameter lying in the unit interval.
+ Its purpose is to link cumulative probabilities associated with
+ an ordinal response coming from an underlying 2-parameter gamma
+ distribution.
+
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+ See \code{\link{Links}} for general information about \pkg{VGAM}
+ link functions.
+
+}
+\value{
+ See Yee (2006) for details.
+
+}
+\references{
+ Yee, T. W. (2006)
+ \emph{Link functions for ordinal count data},
+ (submitted for publication).
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical values of \code{theta} too close to 0 or 1 or out of range
+ result in large positive or negative values, or maybe 0 depending on
+ the arguments.
+ Although measures have been taken to handle cases where
+ \code{theta} is too close to 1 or 0,
+ numerical instabilities may still arise.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the
+ gamma distribution (see \code{\link{gamma2}}) that has been
+ recorded as an ordinal response using known cutpoints.
+
+}
+\section{Warning }{
+ Prediction may not work on \code{\link{vglm}} or
+ \code{\link{vgam}} etc. objects if this link function is used.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{gamma2}},
+ \code{\link{polf}},
+ \code{\link{nbolf}},
+ \code{\link{cumulative}}.
+
+}
+\examples{
+golf("prob", short=FALSE)
+golf("prob", tag=TRUE)
+
+p = seq(0.02, 0.98, len=201)
+earg = list(lambda=1)
+y = golf(p, earg=earg)
+y. = golf(p, earg=earg, deriv=1)
+max(abs(golf(y, earg=earg, inv=TRUE) - p)) # Should be 0
+
+\dontrun{
+par(mfrow=c(2,1), las=1)
+plot(p, y, type="l", col="blue", main="golf()")
+abline(h=0, v=0.5, col="red", lty="dashed")
+
+plot(p, y., type="l", col="blue",
+ main="(Reciprocal of) first GOLF derivative")
+}
+
+
+# Another example
+nn = 1000
+x2 = sort(runif(nn))
+x3 = runif(nn)
+mymu = exp( 3 + 1 * x2 - 2 * x3)
+lambda = 4
+y1 = rgamma(nn, shape=lambda, scale=mymu/lambda)
+cutpoints = c(-Inf, 10, 20, Inf)
+cuty = Cut(y1, breaks=cutpoints)
+\dontrun{
+plot(x2, x3, col=cuty, pch=as.character(cuty))
+}
+table(cuty) / sum(table(cuty))
+fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="golf",
+ reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
+ mv=TRUE, earg=list(cutpoint=cutpoints[2:3], lambda=lambda)),
+ trace=TRUE)
+fit at y[1:5,]
+fitted(fit)[1:5,]
+predict(fit)[1:5,]
+coef(fit)
+coef(fit, matrix=TRUE)
+constraints(fit)
+fit at misc$earg
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
+% # Another example
+% nn = 1000
+% x2 = sort(runif(nn))
+% x3 = runif(nn)
+% shape = exp(0.0)
+% mymu = exp( 3 + 1 * x2 - 2 * x3)
+% y1 = rnbinom(nn, mu=mymu, size=shape)
+% cuty = Cut(y1)
+% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="golf", rev=TRUE,
+% mv=TRUE, parallel=TRUE, earg=list(lambda=shape)))
+% coef(fit)
+% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="probit", rev=TRUE,
+% mv=TRUE, parallel=TRUE))
+% coef(fit, matrix=TRUE)
+% coef(fit)
+
+
+
diff --git a/man/gpd.Rd b/man/gpd.Rd
new file mode 100644
index 0000000..51a936a
--- /dev/null
+++ b/man/gpd.Rd
@@ -0,0 +1,237 @@
+\name{gpd}
+\alias{gpd}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Pareto Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ generalized Pareto distribution (GPD).
+
+}
+\usage{
+gpd(threshold = 0, lscale = "loge", lshape = "logoff",
+ escale = list(),
+ eshape = if(lshape=="logoff") list(offset=0.5) else
+ if(lshape=="elogit") list(min=-0.5, max=0.5) else NULL,
+ percentiles = c(90, 95), iscale = NULL, ishape = NULL,
+ tshape0=0.001, method.init=1, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{threshold}{
+ Numeric of length 1. The threshold value.
+ Only values of the response which are greater than this value
+ are kept. The response actually worked on internally is the difference.
+ Only those observations greater than the threshold value are
+ returned in the \code{y} slot of the object.
+
+ }
+ \item{lscale}{
+ Parameter link function for the scale parameter \eqn{\sigma}{sigma}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lshape}{
+ Parameter link function for the shape parameter \eqn{\xi}{xi}.
+ See \code{\link{Links}} for more choices.
+ The default constrains the parameter
+ to be greater than \eqn{-0.5} (the negative of \code{Offset}).
+ This is because if \eqn{\xi \leq -0.5}{xi <= -0.5} then Fisher
+ scoring does not work.
+ However, it can be a little more interpretable if \code{Offset=1}.
+ See the Details section below for more information.
+
+ }
+ \item{escale, eshape}{
+ Extra argument for the \code{lscale} and \code{lshape} arguments.
+ See \code{earg} in \code{\link{Links}} for general information.
+ For the shape parameter,
+ if the \code{\link{logoff}} link is chosen then the offset is
+ called \eqn{A} below; and then the second linear/additive predictor is
+ \eqn{\log(\xi+A)}{log(xi+A)} which means that
+ \eqn{\xi > -A}{xi > -A}.
+ The working weight matrices are positive definite if \eqn{A=0.5}.
+
+ }
+
+% \item{Offset}{
+% Numeric, of length 1.
+% Called \eqn{A} below.
+% Offset value if \code{lshape="logoff"}.
+% Then the second linear/additive predictor is
+% \eqn{\log(\xi+A)}{log(xi+A)} which means that
+% \eqn{\xi > -A}{xi > -A}.
+% The working weight matrices are positive definite if \code{Offset=0.5}.
+
+% }
+ \item{percentiles}{
+ Numeric vector of percentiles used
+ for the fitted values. Values should be between 0 and 100.
+ See the example below for illustration.
+ However, if \code{percentiles=NULL} then the mean
+ \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)} is returned;
+ this is only defined if \eqn{\xi<1}{xi<1}.
+
+ }
+ \item{iscale, ishape}{
+ Numeric. Optional initial values for \eqn{\sigma}{sigma}
+ and \eqn{\xi}{xi}.
+ The default is to use \code{method.init} and compute a value internally for
+ each parameter.
+ Values of \code{ishape} should be between \eqn{-0.5} and \eqn{1}.
+ Values of \code{iscale} should be positive.
+
+ }
+% \item{rshape}{
+% Numeric, of length 2.
+% Range of \eqn{\xi}{xi} if \code{lshape="elogit"} is chosen.
+% The default values ensures the algorithm works (\eqn{\xi > -0.5}{xi > -0.5})
+% and the variance exists (\eqn{\xi < 0.5}{xi < 0.5}).
+
+% }
+ \item{tshape0}{
+ Positive numeric.
+ Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
+ If the absolute value of the estimate of \eqn{\xi}{xi} is less than
+ this value then it will be assumed zero and exponential distribution
+ derivatives etc. will be used.
+
+ }
+ \item{method.init}{
+ Method of initialization, either 1 or 2. The first is the method of
+ moments, and the second is a variant of this. If neither work try
+ assigning values to arguments \code{ishape} and/or \code{iscale}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The value must be from the set \{1,2\} corresponding
+ respectively to \eqn{\sigma}{sigma} and \eqn{\xi}{xi}.
+ It is often a good idea for the \eqn{\sigma}{sigma} parameter only
+ to be modelled through
+ a linear combination of the explanatory variables because the
+ shape parameter is probably best left as an intercept only:
+ \code{zero=2}.
+ Setting \code{zero=NULL} means both parameters are modelled with
+ explanatory variables.
+
+ }
+}
+\details{
+ The distribution function of the GPD can be written
+ \deqn{G(y) = 1 - [1 + \xi (y-\mu) / \sigma ]_{+}^{- 1/ \xi} }{%
+ G(y) = 1 - [1 + xi (y-mu)/ sigma ]_{+}^{- 1/ xi} }
+ where
+ \eqn{\mu}{mu} is the location parameter (known with value \code{threshold}),
+ \eqn{\sigma > 0}{sigma > 0} is the scale parameter,
+ \eqn{\xi}{xi} is the shape parameter, and
+ \eqn{h_+ = \max(h,0)}{h_+ = max(h,0)}.
+ The function \eqn{1-G} is known as the \emph{survivor function}.
+ The limit \eqn{\xi \rightarrow 0}{xi --> 0}
+ gives the \emph{shifted exponential} as a special case:
+ \deqn{G(y) = 1 - \exp[-(y-\mu)/ \sigma]. }{%
+ G(y) = 1 - exp[-(y-mu)/ sigma]. }
+ The support is \eqn{y>\mu}{y>mu} for \eqn{\xi>0}{xi>0},
+ and
+ \eqn{\mu < y <\mu-\sigma / \xi}{mu < y <mu-sigma / xi} for \eqn{\xi<0}{xi<0}.
+
+ Smith (1985) showed that if \eqn{\xi <= -0.5}{xi <= -0.5} then this
+ is known as the nonregular case and problems/difficulties can arise
+ both theoretically and numerically. For the (regular) case \eqn{\xi >
+ -0.5}{xi > -0.5} the classical asymptotic theory of maximum likelihood
+ estimators is applicable; this is the default because \code{Offset=0.5}.
+
+ Although for \eqn{\xi < -0.5}{xi < -0.5} the usual asymptotic properties
+ do not apply, the maximum likelihood estimator generally exists and
+ is superefficient for \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5}, so it is
+ ``better'' than normal. To allow for \eqn{-1 < \xi < -0.5}{-1 < xi <
+ -0.5} set \code{Offset=1}. When \eqn{\xi < -1}{xi < -1} the maximum
+ likelihood estimator generally does not exist as it effectively becomes
+ a two parameter problem.
+
+ The mean of \eqn{Y} does not exist unless \eqn{\xi < 1}{xi < 1}, and
+ the variance does not exist unless \eqn{\xi < 0.5}{xi < 0.5}. So if
+ you want to fit a model with finite variance use \code{lshape="elogit"}.
+
+}
+\note{
+ With functions \code{\link{rgpd}}, \code{\link{dgpd}}, etc., the
+ argument \code{location} matches with the argument \code{threshold}
+ here.
+
+}
+\section{Warning}{
+ Fitting the GPD by maximum likelihood estimation can be numerically
+ fraught. If \eqn{1 + \xi (y-\mu)/ \sigma \leq 0}{1 + xi*(y-mu)/sigma <=
+ 0} then some crude evasive action is taken but the estimation process
+ can still fail. This is particularly the case if \code{\link{vgam}}
+ with \code{\link{s}} is used. Then smoothing is best done with
+ \code{\link{vglm}} with regression splines (\code{\link[splines]{bs}}
+ or \code{\link[splines]{ns}}) because \code{\link{vglm}} implements
+ half-stepsizing whereas \code{\link{vgam}} doesn't. Half-stepsizing
+ helps handle the problem of straying outside the parameter space.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+ However, for this \pkg{VGAM} family function, \code{\link{vglm}}
+ is probably preferred over \code{\link{vgam}} when there is smoothing.
+
+}
+\references{
+ Coles, S. (2001)
+ \emph{An Introduction to Statistical Modeling of Extreme Values}.
+ London: Springer-Verlag.
+
+ Smith, R. L. (1985)
+ Maximum likelihood estimation in a class of nonregular cases.
+ \emph{Biometrika}, \bold{72}, 67--90.
+
+}
+\author{ T. W. Yee }
+
+\seealso{
+ \code{\link{rgpd}},
+ \code{\link{meplot}},
+ \code{\link{gev}},
+ \code{\link{pareto1}}.
+}
+
+\examples{
+# Simulated data from an exponential distribution (xi=0)
+y = rexp(n=3000, rate=2)
+fit = vglm(y ~ 1, gpd(threshold=0.5), trace=TRUE)
+fitted(fit)[1:5,]
+coef(fit, matrix=TRUE) # xi should be close to 0
+Coef(fit)
+summary(fit)
+
+yt = y[y>fit at extra$threshold] # Note the threshold is stored here
+all.equal(c(yt), c(fit at y)) # TRUE
+# Check the 90 percentile
+i = yt < fitted(fit)[1,"90\%"]
+100*table(i)/sum(table(i)) # Should be 90%
+
+# Check the 95 percentile
+i = yt < fitted(fit)[1,"95\%"]
+100*table(i)/sum(table(i)) # Should be 95%
+
+\dontrun{
+plot(yt, col="blue", las=1, main="Fitted 90\% and 95\% quantiles")
+matlines(1:length(yt), fitted(fit), lty=2:3, lwd=2)
+}
+
+
+# Another example
+nn = 2000; threshold = 0; x = runif(nn)
+xi = exp(-0.8)-0.5
+y = rgpd(nn, scale=exp(1+0.2*x), shape=xi)
+fit = vglm(y ~ x, gpd(threshold), trace=TRUE)
+Coef(fit)
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
new file mode 100644
index 0000000..92c2c98
--- /dev/null
+++ b/man/gpdUC.Rd
@@ -0,0 +1,75 @@
+\name{gpdUC}
+\alias{gpdUC}
+\alias{dgpd}
+\alias{pgpd}
+\alias{qgpd}
+\alias{rgpd}
+\title{The Generalized Pareto Distribution }
+\description{
+ Density, distribution function, quantile function and random
+ generation for the generalized Pareto distribution (GPD) with
+ location parameter \code{location},
+ scale parameter \code{scale} and
+ shape parameter \code{shape}.
+}
+\usage{
+dgpd(x, location=0, scale=1, shape=0)
+pgpd(q, location=0, scale=1, shape=0)
+qgpd(p, location=0, scale=1, shape=0)
+rgpd(n, location=0, scale=1, shape=0)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{location}{the location parameter \eqn{\mu}{mu}.}
+ \item{scale}{the scale parameter \eqn{\sigma}{sigma}.}
+ \item{shape}{the shape parameter \eqn{\xi}{xi}.}
+}
+\value{
+ \code{dgpd} gives the density,
+ \code{pgpd} gives the distribution function,
+ \code{qgpd} gives the quantile function, and
+ \code{rgpd} generates random deviates.
+}
+\references{
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{gpd}}, the \pkg{VGAM} family function
+ for estimating the two parameters by maximum likelihood estimation,
+ for formulae and other details.
+ Apart from \code{n}, all the above arguments may be vectors and
+ are recyled to the appropriate length if necessary.
+}
+\note{
+ The default values of all three parameters, especially
+ \eqn{\xi=0}{xi=0}, means the default distribution is the exponential.
+
+ Currently, these functions have different argument names compared
+ with those in the \pkg{evd} package.
+}
+\seealso{
+ \code{\link{gpd}}.
+}
+\examples{
+\dontrun{
+x = seq(-0.2, 3, by=0.01)
+loc = 0; sigma = 1; xi = -0.4
+plot(x, dgpd(x, loc, sigma, xi), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", ylab="", las=1)
+abline(h=0, col="blue", lty=2)
+lines(qgpd(seq(0.05,0.95,by=0.05), loc, sigma, xi),
+ dgpd(qgpd(seq(0.05,0.95,by=0.05), loc, sigma, xi), loc, sigma, xi),
+ col="purple", lty=3, type="h")
+lines(x, pgpd(x, loc, sigma, xi), type="l", col="red")
+abline(h=0, lty=2)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/grc.Rd b/man/grc.Rd
new file mode 100644
index 0000000..469e24d
--- /dev/null
+++ b/man/grc.Rd
@@ -0,0 +1,132 @@
+\name{grc}
+\alias{grc}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Fitting Goodman's RC Association Model }
+\description{
+ Fits a Goodman's RC Association Model to a matrix of counts
+
+}
+\usage{
+grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
+ Structural.zero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ \item{y}{
+ A matrix of counts. Output from \code{table()} is acceptable;
+ it is converted into a matrix.
+ \code{y} must be at least 3 by 3.
+ }
+ \item{Rank}{
+ An integer in the range
+ {1,\ldots,\code{min(nrow(y), ncol(y))}}.
+ This is the dimension of the fit.
+
+ }
+ \item{Index.corner}{
+ A \code{Rank}-vector of integers.
+ These are used to store the \code{Rank} by \code{Rank}
+ identity matrix in the
+ \code{A} matrix; corner constraints are used.
+
+ }
+ \item{Structural.zero}{
+ An integer in the range {1,\ldots,\code{min(nrow(y), ncol(y))}},
+ specifying the row that is used as the structural zero.
+ }
+ \item{summary.arg}{
+ Logical. If \code{TRUE}, a summary is returned.
+ If \code{TRUE}, \code{y} may be the output (fitted
+ object) of \code{grc()}.
+
+ }
+ \item{h.step}{
+ A small positive value that is passed into
+ \code{summary.rrvglm()}. Only used when \code{summary.arg=TRUE}. }
+ \item{\dots}{ Arguments that are passed into \code{rrvglm.control()}.
+
+ }
+}
+\details{
+ Goodman's RC association model can fit a reduced-rank approximation
+ to a table of counts. The log of each cell mean is decomposed as an
+ intercept plus a row effect plus a column effect plus a reduced-rank
+ part. The latter can be collectively written \code{A \%*\% t(C)},
+ the product of two `thin' matrices.
+ Indeed, \code{A} and \code{C} have \code{Rank} columns.
+By default, the first column and row of the interaction matrix
+\code{A \%*\% t(C)} is chosen
+to be structural zeros, because \code{Structural.zero=1}.
+This means the first row of \code{A} are all zeros.
+
+This function uses \code{options()$contrasts} to set up the row and
+column indicator variables.
+
+}
+\value{
+ An object of class \code{"grc"}, which currently is the same as
+ an \code{"rrvglm"} object.
+}
+\references{
+Goodman, L. A. (1981)
+Association models and canonical correlation in the analysis
+of cross-classifications having ordered categories.
+\emph{Journal of the American Statistical Association},
+\bold{76}, 320--334.
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information about the setting up of the
+indicator variables.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ This function sets up variables etc. before calling \code{rrvglm()}.
+ The \code{...} is passed into \code{rrvglm.control()}, meaning, e.g.,
+ \code{Rank=1} is default. Seting \code{trace=TRUE} may be useful for
+ monitoring convergence.
+
+ Using \code{criterion="coefficients"} can result in slow convergence.
+
+ If \code{summary=TRUE}, then \code{y} can be a \code{"grc"} object,
+ in which case a summary can be returned. That is,
+ \code{grc(y, summary=TRUE)} is equivalent to
+ \code{summary(grc(y))}.
+
+}
+
+\section{Warning}{
+ This function temporarily creates a permanent data frame called
+ \code{.grc.df}, which used to be needed by \code{summary.rrvglm()}.
+ Then \code{.grc.df} is deleted before exiting the function. If an
+ error occurs, then \code{.grc.df} may be present in the workspace.
+
+}
+
+\seealso{
+\code{\link{rrvglm}},
+\code{\link{rrvglm.control}},
+\code{\link{rrvglm-class}},
+\code{summary.grc},
+\code{\link{auuc}}.
+}
+
+\examples{
+# Some undergraduate student enrolments at the University of Auckland in 1990
+data(auuc)
+g1 = grc(auuc, Rank=1)
+fitted(g1)
+summary(g1)
+
+g2 = grc(auuc, Rank=2, Index.corner=c(2,5))
+fitted(g2)
+summary(g2)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
new file mode 100644
index 0000000..ca7bfe4
--- /dev/null
+++ b/man/gumbel.Rd
@@ -0,0 +1,238 @@
+\name{gumbel}
+\alias{gumbel}
+\alias{egumbel}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gumbel Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ Gumbel distribution.
+
+}
+\usage{
+gumbel(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ iscale = NULL, R = NA, percentiles = c(95, 99), mpv = FALSE,
+ zero = NULL)
+egumbel(llocation = "identity", lscale = "loge",
+ elocation = list(), escale = list(),
+ iscale = NULL, R = NA, percentiles = c(95, 99), mpv = FALSE,
+ zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation, lscale}{
+ Parameter link functions for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{elocation, escale}{
+ Extra argument for the \code{llocation} and \code{lscale} arguments.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{iscale}{
+ Numeric and positive.
+ Optional initial value for \eqn{\sigma}{sigma}.
+ Recycled to the appropriate length.
+ In general, a larger value is better than a smaller value.
+ A \code{NULL} means an initial value is computed internally.
+
+ }
+
+ \item{R}{
+ Numeric. Maximum number of values possible.
+ See \bold{Details} for more details.
+
+ }
+ \item{percentiles}{
+ Numeric vector of percentiles used
+ for the fitted values. Values should be between 0 and 100.
+ This argument uses the argument \code{R} if assigned.
+ If \code{percentiles=NULL} then the mean will be returned as the
+ fitted values.
+
+% This argument is ignored if \code{mean=TRUE}.
+
+ }
+ \item{mpv}{
+ Logical. If \code{mpv=TRUE} then the \emph{median predicted value} (MPV)
+ is computed and returned as the (last) column of the fitted values.
+ This argument is ignored if \code{percentiles=NULL}.
+ See \bold{Details} for more details.
+
+% This argument is ignored if \code{mean=TRUE}.
+
+ }
+% \item{mean}{
+% Logical. If \code{TRUE}, the mean is computed and returned
+% as the fitted values. This argument overrides the
+% \code{percentiles} and \code{mpv} arguments.
+% See \bold{Details} for more details.
+
+% }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The value (possibly values) must
+ be from the set \{1,2\} corresponding respectively to \eqn{\mu}{mu}
+ and \eqn{\sigma}{sigma}. By default all linear/additive predictors
+ are modelled as a linear combination of the explanatory variables.
+
+ }
+}
+\details{
+ The Gumbel distribution is a generalized extreme value (GEV)
+ distribution with \emph{shape} parameter \eqn{\xi=0}{xi=0}.
+ Consequently it is more easily estimated than the GEV.
+ See \code{\link{gev}} for more details.
+
+ The quantity \eqn{R} is the maximum number of observations possible,
+ for example, in the Venice data below, the top 10 daily values
+ are recorded for each year, therefore \eqn{R=365} because there are
+ about 365 days per year.
+ The MPV is the value of the response such that the probability
+ of obtaining a value greater than the MPV is 0.5 out of
+ \eqn{R} observations.
+ For the Venice data, the MPV is the sea level such that there
+ is an even chance that the highest level for a particular year
+ exceeds the MPV.
+ When \code{mpv=TRUE}, the column labelled \code{"MPV"} contains
+ the MPVs when \code{fitted()} is applied to the fitted object.
+
+ The formula for the mean of a response \eqn{Y} is
+ \eqn{\mu+\sigma \times Euler} where \eqn{Euler} is a constant
+ that has value approximately equal to 0.5772.
+ The formula for the percentiles are (if \code{R} is not given)
+ \eqn{\mu-\sigma \times \log[-\log(P/100)]}{location-
+ scale*log[-log(P/100)]}
+ where \eqn{P} is the \code{percentile} argument value(s).
+ If \code{R} is given then the percentiles are
+ \eqn{\mu-\sigma \times \log[R(1-P/100)]}{location-
+ scale*log[-log(R*(1-P/100))]}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+ Smith, R. L. (1986)
+ Extreme value theory based on the \emph{r} largest annual events.
+ \emph{Journal of Hydrology},
+ \bold{86}, 27--43.
+
+ Rosen, O. and Cohen, A. (1996)
+ Extreme percentile regression.
+ In: Haerdle, W. and Schimek, M. G. (eds.),
+ \emph{Statistical Theory and Computational Aspects of Smoothing:
+ Proceedings of the COMPSTAT '94 Satellite Meeting held in
+ Semmering, Austria, 27--28 August 1994}, pp.200--214,
+ Heidelberg: Physica-Verlag.
+
+ Coles, S. (2001)
+ \emph{An Introduction to Statistical Modeling of Extreme Values}.
+ London: Springer-Verlag.
+
+}
+\author{ T. W. Yee }
+
+\section{Warning}{
+ When \code{R} is not given (the default) the fitted percentiles are
+ that of the data, and not of the
+ overall population. For example, in the example below, the 50
+ percentile is approximately the running median through the data,
+ however, the data are the highest sea level measurements recorded each
+ year (it therefore equates to the median predicted value or MPV).
+
+}
+
+\note{
+ \code{egumbel()} only handles a univariate response, and is
+ preferred to \code{gumbel()} because it is faster.
+
+ \code{gumbel()} can handle a multivariate response, i.e., a
+ matrix with more than one column. Each row of the matrix is
+ sorted into descending order.
+ Missing values in the response are allowed but require
+ \code{na.action=na.pass}. The response matrix needs to be
+ padded with any missing values. With a multivariate response
+ one has a matrix \code{y}, say, where
+ \code{y[,2]} contains the second order statistics etc.
+
+% If a random variable \eqn{Y} has a \emph{reverse}
+% \eqn{Gumbel(\mu,\sigma)}{Gumbel(mu,sigma)} distribution then \eqn{-Y}
+% has a \eqn{Gumbel(-\mu,\sigma)}{Gumbel(-mu,sigma)} distribution.
+% It appears that some definite the reverse Gumbel the same as others
+% who define the ordinary Gumbel distribution, e.g., in \pkg{gamlss}.
+
+}
+
+\seealso{
+ \code{\link{rgumbel}},
+ \code{\link{cgumbel}},
+ \code{\link{guplot}},
+ \code{\link{gev}},
+ \code{\link{egev}},
+%\code{\link{ogev}},
+ \code{\link{venice}}.
+}
+
+\examples{
+# Example 1: Simulated data
+y = rgumbel(n=1000, loc = 100, scale=exp(1))
+fit = vglm(y ~ 1, egumbel(perc=NULL), trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fitted(fit)[1:4,]
+mean(y)
+
+
+# Example 2: Venice data
+data(venice)
+(fit = vglm(cbind(r1,r2,r3,r4,r5) ~ year, data=venice,
+ gumbel(R=365, mpv=TRUE), trace=TRUE))
+fitted(fit)[1:5,]
+coef(fit, mat=TRUE)
+vcov(summary(fit))
+sqrt(diag(vcov(summary(fit)))) # Standard errors
+
+
+# Example 3: Try a nonparametric fit ---------------------
+# Use the entire data set, including missing values
+y = as.matrix(venice[,paste("r",1:10,sep="")])
+fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
+ data=venice, trace=TRUE, na.action=na.pass)
+fit1 at y[4:5,] # NAs used to pad the matrix
+
+\dontrun{
+# Plot the component functions
+par(mfrow=c(2,1), mar=c(5,4,.2,1)+0.1, xpd=TRUE)
+plot(fit1, se=TRUE, lcol="blue", scol="green", lty=1,
+ lwd=2, slwd=2, slty="dashed")
+
+# Quantile plot --- plots all the fitted values
+par(mfrow=c(1,1), bty="l", mar=c(4,4,.2,3)+0.1, xpd=TRUE, las=1)
+qtplot(fit1, mpv=TRUE, lcol=c(1,2,5), tcol=c(1,2,5), lwd=2,
+ pcol="blue", tadj=0.1, ylab="Sea level (cm)")
+
+# Plot the 99 percentile only
+par(mfrow=c(1,1), mar=c(3,4,.2,1)+0.1, xpd=TRUE)
+year = venice[["year"]]
+matplot(year, y, ylab="Sea level (cm)", type="n")
+matpoints(year, y, pch="*", col="blue")
+lines(year, fitted(fit1)[,"99\%"], lwd=2, col="red")
+
+# Check the 99 percentiles with a smoothing spline.
+# Nb. (1-0.99) * 365 = 3.65 is approx. 4, meaning the 4th order
+# statistic is approximately the 99 percentile.
+par(mfrow=c(1,1), mar=c(3,4,2,1)+0.1, xpd=TRUE, lwd=2)
+plot(year, y[,4], ylab="Sea level (cm)", type="n",
+ main="Red is 99 percentile, Green is a smoothing spline")
+points(year, y[,4], pch="4", col="blue")
+lines(year, fitted(fit1)[,"99\%"], lty=1, col="red")
+lines(smooth.spline(year, y[,4], df=4), col="darkgreen", lty=2)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
new file mode 100644
index 0000000..713c102
--- /dev/null
+++ b/man/gumbelUC.Rd
@@ -0,0 +1,109 @@
+\name{gumbelUC}
+\alias{dgumbel}
+\alias{pgumbel}
+\alias{qgumbel}
+\alias{rgumbel}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Gumbel Distribution }
+\description{
+ Density, distribution function, quantile function and random
+ generation for the Gumbel distribution with
+ location parameter \code{location} and
+ scale parameter \code{scale}.
+}
+\usage{
+dgumbel(x, location=0, scale=1)
+pgumbel(q, location=0, scale=1)
+qgumbel(p, location=0, scale=1)
+rgumbel(n, location=0, scale=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{location}{the location parameter \eqn{\mu}{mu}.
+ This is not the mean
+ of the Gumbel distribution (see \bold{Details} below). }
+ \item{scale}{the scale parameter \eqn{\sigma}{sigma}.
+ This is not the standard deviation
+ of the Gumbel distribution (see \bold{Details} below). }
+
+}
+\details{
+ The Gumbel distribution is a special case of the
+ \emph{generalized extreme value} (GEV) distribution where
+ the shape parameter \eqn{\xi}{xi} = 0.
+ The latter has 3 parameters, so the Gumbel distribution has two.
+ The Gumbel distribution function is
+ \deqn{G(y) = \exp \left( - \exp \left[ - \frac{y-\mu}{\sigma} \right]
+ \right) }{%
+ G(y) = exp( -exp[ - (y-mu)/sigma ] ) }
+ where \eqn{-\infty<y<\infty}{-Inf<y<Inf},
+ \eqn{-\infty<\mu<\infty}{-Inf<mu<Inf} and
+ \eqn{\sigma>0}{sigma>0}.
+ Its mean is
+ \deqn{\mu - \sigma * \gamma}{%
+ mu - sigma * gamma}
+ and its variance is
+ \deqn{\sigma^2 * \pi^2 / 6}{%
+ sigma^2 * pi^2 / 6}
+ where \eqn{\gamma}{gamma} is Euler's constant (which can be
+ obtained as \code{-digamma(1)}).
+
+ See \code{\link{gumbel}}, the \pkg{VGAM} family function
+ for estimating the two parameters by maximum likelihood estimation,
+ for formulae and other details.
+ Apart from \code{n}, all the above arguments may be vectors and
+ are recyled to the appropriate length if necessary.
+}
+\value{
+ \code{dgumbel} gives the density,
+ \code{pgumbel} gives the distribution function,
+ \code{qgumbel} gives the quantile function, and
+ \code{rgumbel} generates random deviates.
+}
+\references{
+ Coles, S. (2001)
+ \emph{An Introduction to Statistical Modeling of Extreme Values}.
+ London: Springer-Verlag.
+}
+\author{ T. W. Yee }
+\note{
+ The \pkg{VGAM} family function \code{\link{gumbel}}
+ can estimate the parameters of a Gumbel distribution using
+ maximum likelihood estimation.
+
+}
+
+\seealso{
+ \code{\link{gumbel}},
+ \code{\link{egumbel}},
+ \code{\link{gev}}.
+}
+\examples{
+mu = 1
+sigma = 2
+y = rgumbel(n=100, loc=mu, scale=sigma)
+mean(y)
+mu - sigma * digamma(1) # population mean
+var(y)
+sigma^2 * pi^2 / 6 # population variance
+
+
+\dontrun{
+x = seq(-2.5, 3.5, by=0.01)
+loc = 0; sigma = 1
+plot(x, dgumbel(x, loc, sigma), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", ylab="", las=1)
+abline(h=0, col="blue", lty=2)
+lines(qgumbel(seq(0.05,0.95,by=0.05), loc, sigma),
+ dgumbel(qgumbel(seq(0.05,0.95,by=0.05), loc, sigma), loc, sigma),
+ col="purple", lty=3, type="h")
+lines(x, pgumbel(x, loc, sigma), type="l", col="red")
+abline(h=0, lty=2)
+}
+}
+\keyword{distribution}
+
diff --git a/man/guplot.Rd b/man/guplot.Rd
new file mode 100644
index 0000000..00a2361
--- /dev/null
+++ b/man/guplot.Rd
@@ -0,0 +1,78 @@
+\name{guplot}
+\alias{guplot}
+\alias{guplot.default}
+\alias{guplot.vlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gumbel Plot }
+\description{
+ Produces a Gumbel plot,
+ a diagnostic plot for checking whether the data appears to be
+ from a Gumbel distribution.
+}
+\usage{
+guplot(object, ...)
+guplot.default(y, main="Gumbel Plot",
+ xlab="Reduced data", ylab="Observed data", type="p", ...)
+guplot.vlm(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{y}{ A numerical vector. \code{NA}s etc. are not allowed.}
+ \item{main}{Character. Overall title for the plot. }
+ \item{xlab}{Character. Title for the x axis. }
+ \item{ylab}{Character. Title for the y axis. }
+ \item{type}{Type of plot. The default means points are
+ plotted. }
+ \item{object}{ An object that inherits class \code{"vlm"},
+ usually of class \code{\link{vglm-class}} or
+ \code{\link{vgam-class}}. }
+ \item{\dots}{ Graphical argument passed into
+ \code{\link[graphics]{plot}}. See \code{\link[graphics]{par}}
+ for an exhaustive list. The arguments \code{xlim} and
+ \code{ylim} are particularly useful. }
+}
+\details{
+ If \eqn{Y} has a Gumbel distribution then plotting the sorted
+ values \eqn{y_i} versus the \emph{reduced values} \eqn{r_i} should
+ appear linear. The reduced values are given by
+ \deqn{r_i = -\log(-\log(p_i)) }{%
+ r_i = -\log(-\log(p_i)) }
+ where \eqn{p_i} is the \eqn{i}th plotting position, taken
+ here to be \eqn{(i-0.5)/n}. Here, \eqn{n} is the number of
+ observations.
+ Curvature upwards/downwards may indicate a Frechet/Weibull
+ distribution, respectively. Outliers may also be detected
+ using this plot.
+
+ The function \code{guplot} is generic, and
+ \code{guplot.default} and \code{guplot.vlm} are some
+ methods functions for Gumbel plots.
+
+}
+\value{
+ A list is returned invisibly with the following components.
+ \item{x }{The reduced data. }
+ \item{y }{The sorted y data. }
+}
+
+%% zz not sure about the reference
+\references{
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+}
+\author{ T. W. Yee }
+\note{ The Gumbel distribution is a special case of the
+ GEV distribution with shape parameter equal to zero.
+}
+\seealso{
+ \code{\link{gumbel}},
+ \code{\link{egumbel}},
+ \code{\link{gev}}.
+}
+\examples{\dontrun{guplot(rnorm(500), las=1) -> i
+names(i)
+}}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/hspider.Rd b/man/hspider.Rd
new file mode 100644
index 0000000..b55ddbe
--- /dev/null
+++ b/man/hspider.Rd
@@ -0,0 +1,74 @@
+\name{hspider}
+\alias{hspider}
+\docType{data}
+\title{ Hunting Spider Data }
+\description{
+ Abundance of hunting spiders in a Dutch dune area.
+}
+\usage{data(hspider)}
+\format{
+ A data frame with 28 observations (sites) on the following 18 variables.
+ \describe{
+ \item{WaterCon}{Log percentage of soil dry mass.}
+ \item{BareSand}{Log percentage cover of bare sand.}
+ \item{FallTwig}{Log percentage cover of fallen leaves and twigs.}
+ \item{CoveMoss}{Log percentage cover of the moss layer.}
+ \item{CoveHerb}{Log percentage cover of the herb layer.}
+ \item{ReflLux}{Reflection of the soil surface with cloudless sky.}
+ \item{Alopacce}{Abundance of \emph{Alopecosa accentuata}.}
+ \item{Alopcune}{Abundance of \emph{Alopecosa cuneata}.}
+ \item{Alopfabr}{Abundance of \emph{Alopecosa fabrilis}.}
+ \item{Arctlute}{Abundance of \emph{Arctosa lutetiana}.}
+ \item{Arctperi}{Abundance of \emph{Arctosa perita}.}
+ \item{Auloalbi}{Abundance of \emph{Aulonia albimana}.}
+ \item{Pardlugu}{Abundance of \emph{Pardosa lugubris}.}
+ \item{Pardmont}{Abundance of \emph{Pardosa monticola}.}
+ \item{Pardnigr}{Abundance of \emph{Pardosa nigriceps}.}
+ \item{Pardpull}{Abundance of \emph{Pardosa pullata}.}
+ \item{Trocterr}{Abundance of \emph{Trochosa terricola}.}
+ \item{Zoraspin}{Abundance of \emph{Zora spinimana}.}
+ }
+}
+\details{
+The data, which originally came from Van der Aart and Smeek-Enserink
+(1975) consists of abundances (numbers trapped over a 60 week period)
+and 6 environmental variables. There were 28 sites.
+
+This data set has been often used to illustrate ordination, e.g., using
+canonical correspondence analysis (CCA). In the example below, the
+data is used for constrained quadratic ordination (CQO; formerly called
+canonical Gaussian ordination or CGO), a numerically intensive method
+that has many superior qualities. See \code{\link{cqo}} for details.
+
+}
+%\source{
+%}
+\references{
+Van der Aart, P. J. M. and Smeek-Enserink, N. (1975)
+Correlations between distributions of hunting spiders
+(Lycosidae, Ctenidae) and environmental characteristics
+in a dune area.
+\emph{Netherlands Journal of Zoology},
+\bold{25}, 1--45.
+}
+\examples{
+data(hspider)
+str(hspider)
+
+\dontrun{
+set.seed(111) # This leads to the global solution
+hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = poissonff, data = hspider, Crow1posit=FALSE)
+nos = ncol(p1 at y)
+lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos)
+Coef(p1)
+summary(p1)
+}
+}
+\keyword{datasets}
+
+
+
diff --git a/man/hunua.Rd b/man/hunua.Rd
new file mode 100644
index 0000000..649098a
--- /dev/null
+++ b/man/hunua.Rd
@@ -0,0 +1,66 @@
+\name{hunua}
+\alias{hunua}
+\non_function{}
+\title{Hunua Ranges data}
+\usage{data(hunua)}
+\description{
+ The \code{hunua} data frame has 392 rows and 18 columns.
+ Altitude is explanatory, and there are binary responses
+ (presence/absence = 1/0 respectively) for 17 plant species.
+
+}
+\format{
+ This data frame contains the following columns:
+ \describe{
+ \item{agaaus}{Agathis australis, or Kauri}
+ \item{beitaw}{Beilschmiedia tawa, or Tawa}
+ \item{corlae}{Corynocarpus laevigatus}
+ \item{cyadea}{Cyathea dealbata}
+ \item{cyamed}{Cyathea medullaris}
+ \item{daccup}{Dacrydium cupressinum}
+ \item{dacdac}{Dacrycarpus dacrydioides}
+ \item{eladen}{Elaecarpus dentatus}
+ \item{hedarb}{Hedycarya arborea}
+ \item{hohpop}{Species name unknown}
+ \item{kniexc}{Knightia excelsa, or Rewarewa}
+ \item{kuneri}{Kunzea ericoides}
+ \item{lepsco}{Leptospermum scoparium}
+ \item{metrob}{Metrosideros robusta}
+ \item{neslan}{Nestegis lanceolata}
+ \item{rhosap}{Rhopalostylis sapida}
+ \item{vitluc}{Vitex lucens, or Puriri}
+ \item{altitude}{meters above sea level}
+ }
+}
+\details{
+ These were collected from the Hunua Ranges, a small forest in southern
+ Auckland, New Zealand. At 392 sites in the forest, the presence/absence
+ of 17 plant species was recorded, as well as the altitude.
+ Each site was of area size 200\eqn{m^2}{m^2}.
+
+}
+\source{
+ Dr Neil Mitchell, University of Auckland.
+}
+%\references{
+% None.
+%}
+\seealso{
+ \code{\link{waitakere}}.
+}
+\examples{
+# Fit a GAM using vgam() and compare it with the Waitakere Ranges one
+data(hunua)
+fit.h = vgam(agaaus ~ s(altitude), binomialff, hunua)
+\dontrun{
+plot(fit.h, se=TRUE, lcol="red", scol="red",
+ main="Red is Hunua, Blue is Waitakere") }
+predict(fit.h, hunua, type="response")[1:3]
+
+data(waitakere)
+fit.w = vgam(agaaus ~ s(altitude), binomialff, waitakere)
+\dontrun{
+plot(fit.w, se=TRUE, lcol="blue", scol="blue", add=TRUE) }
+predict(fit.w, hunua, type="response")[1:3] # Same as above?
+}
+\keyword{datasets}
diff --git a/man/hyper.Rd b/man/hyper.Rd
new file mode 100644
index 0000000..635db2c
--- /dev/null
+++ b/man/hyper.Rd
@@ -0,0 +1,129 @@
+\name{hyper}
+%\alias{hyper}
+\alias{hyper}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Hypergeometric Family Function }
+\description{
+ Family function for a hypergeometric distribution where either the
+ number of white balls or the total number of white and black balls
+ are unknown.
+
+}
+\usage{
+hyper(N=NULL, D=NULL, lprob="logit", iprob=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{N}{
+ Total number of white and black balls in the urn.
+ Must be a vector with positive values, and is recycled, if necessary,
+ to the same length as the response.
+ One of \code{N} and \code{D} must be specified.
+ }
+ \item{D}{
+ Number of white balls in the urn.
+ Must be a vector with positive values, and is recycled, if necessary,
+ to the same length as the response.
+ One of \code{N} and \code{D} must be specified.
+ }
+
+ \item{lprob}{
+ Link function for the probabilities.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iprob}{
+ Optional initial value for the probabilities.
+ The default is to choose initial values internally.
+
+ }
+}
+
+\details{
+ Consider the scenario from
+ \code{\link[stats:Hypergeometric]{Hypergeometric}} where there
+ are \eqn{N=m+n} balls in an urn, where \eqn{m} are white and \eqn{n}
+ are black. A simple random sample (i.e., \emph{without} replacement) of
+ \eqn{k} balls is taken.
+ The response here is the sample \emph{proportion} of white balls.
+ In this document,
+ \code{N} is \eqn{N=m+n},
+ \code{D} is \eqn{m} (for the number of ``defectives'', in quality
+ control terminology, or equivalently, the number of marked individuals).
+ The parameter to be estimated is the population proportion of
+ white balls, viz. \eqn{prob = m/(m+n)}.
+
+ Depending on which one of \code{N} and \code{D} is inputted, the
+ estimate of the other parameter can be obtained from the equation
+ \eqn{prob = m/(m+n)}, or equivalently, \code{prob = D/N}. However,
+ the log-factorials are computed using \code{\link[base]{lgamma}}
+ and both \eqn{m} and \eqn{n} are not restricted to being integer.
+ Thus if an integer \eqn{N} is to be estimated, it will be necessary to
+ evaluate the likelihood function at integer values about the estimate,
+ i.e., at \code{trunc(Nhat)} and \code{ceiling(Nhat)} where \code{Nhat}
+ is the (real) estimate of \eqn{N}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ and \code{\link{cao}}.
+
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ The response can be of one of three formats: a factor (first level taken
+ as success), a vector of proportions of success, or a 2-column matrix
+ (first column = successes) of counts. The argument \code{weights}
+ in the modelling function can also be specified. In particular, for a
+ general vector of proportions, you will need to specify \code{weights}
+ because the number of trials is needed.
+
+}
+\seealso{
+ \code{\link[stats:Hypergeometric]{Hypergeometric}},
+ \code{\link{binomialff}}.
+}
+\section{Warning }{
+ No checking is done to ensure that certain values are within range,
+ e.g., \eqn{k \leq N}{k <= N}.
+
+}
+
+\examples{
+nn = 100
+m = 5 # number of white balls in the population
+k = rep(4, len=nn) # sample sizes
+n = 4 # number of black balls in the population
+y = rhyper(nn=nn, m=m, n=n, k=k)
+yprop = y / k # sample proportions
+
+# N is unknown, D is known. Both models are equivalent:
+fit = vglm(cbind(y,k-y) ~ 1, hyper(D=m), trace=TRUE, crit="c")
+fit = vglm(yprop ~ 1, hyper(D=m), weight=k, trace=TRUE, crit="c")
+
+# N is known, D is unknown. Both models are equivalent:
+fit = vglm(cbind(y,k-y) ~ 1, hyper(N=m+n), trace=TRUE, crit="l")
+fit = vglm(yprop ~ 1, hyper(N=m+n), weight=k, trace=TRUE, crit="l")
+
+coef(fit, matrix=TRUE)
+Coef(fit) # Should be equal to the true population proportion
+unique(m / (m+n)) # The true population proportion
+fit at extra
+fitted(fit)[1:4]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/hzeta.Rd b/man/hzeta.Rd
new file mode 100644
index 0000000..5b37a82
--- /dev/null
+++ b/man/hzeta.Rd
@@ -0,0 +1,95 @@
+\name{hzeta}
+\alias{hzeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Haight's Zeta Family Function }
+\description{
+ Estimating the parameter of Haight's Zeta function.
+}
+\usage{
+hzeta(link = "loglog", init.alpha = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function for the parameter.
+ See \code{\link{Links}} for more choices.
+ Here, a log-log link keeps the parameter greater than one, meaning
+ the mean is finite.
+
+ }
+ \item{init.alpha}{
+ Optional initial value for the (positive) parameter.
+ The default is to obtain an initial value internally. Use this argument
+ if the default fails.
+
+ }
+}
+\details{
+ The probability function is
+ \deqn{f(y) = (2y-1)^{(-\alpha)} - (2y+1)^{(-\alpha)},}{%
+ f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),}
+ where the parameter \eqn{\alpha>0}{alpha>0}
+ and \eqn{y=1,2,\ldots}{y=1,2,...}.
+ The function \code{\link{dhzeta}} computes this probability function.
+ The mean of \eqn{Y}, which is returned as fitted values, is
+ \eqn{(1-2^{-\alpha}) \zeta(\alpha)}{(1-2^(-alpha))*zeta(alpha)}
+ provided \eqn{\alpha > 1}{alpha > 1}, where \eqn{\zeta}{zeta} is
+ Riemann's zeta function.
+ The mean is a decreasing function of \eqn{\alpha}{alpha}.
+ The mean is infinite if \eqn{\alpha \leq 1}{alpha <= 1}, and
+ the variance is infinite if \eqn{\alpha \leq 2}{alpha <= 2}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+
+ Page 470 of
+ Johnson N. L., Kotz S. and Kemp, A. W. (1993)
+ \emph{Univariate Discrete Distributions},
+ 2nd edition,
+ Volume 2,
+ New York: Wiley.
+
+}
+\author{ T. W. Yee }
+%\note{
+%}
+
+\seealso{
+ \code{\link{Hzeta}},
+ \code{\link{zeta}},
+ \code{\link{zetaff}},
+ \code{\link{loglog}}.
+}
+\examples{
+alpha = exp(0.1) # The parameter
+y = rhzeta(n=400, alpha) # Generate some hzeta random variates
+fit = vglm(y ~ 1, hzeta, trace = TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit) # Useful for intercept-only models; should be same as alpha
+fitted(fit)[1:4,]
+}
+\keyword{models}
+\keyword{regression}
+
+
+%# Generate some hzeta random variates
+%set.seed(123)
+%nn = 400
+%x = 1:20
+%alpha = 1.1 # The parameter
+%probs = dhzeta(x, alpha)
+%\dontrun{
+%plot(x, probs, type="h", log="y")}
+%cs = cumsum(probs)
+%tab = table(cut(runif(nn), brea = c(0,cs,1)))
+%index = (1:length(tab))[tab>0]
+%y = rep(index, times=tab[index])
+
+
+
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
new file mode 100644
index 0000000..1773d33
--- /dev/null
+++ b/man/hzetaUC.Rd
@@ -0,0 +1,84 @@
+\name{Hzeta}
+\alias{Hzeta}
+\alias{dhzeta}
+\alias{phzeta}
+\alias{qhzeta}
+\alias{rhzeta}
+\title{ Haight's Zeta Function }
+\description{
+ Density, distribution function, quantile function and random generation
+ for Haight's Zeta function distribution with parameter \code{alpha}.
+
+}
+\usage{
+dhzeta(x, alpha)
+phzeta(q, alpha)
+qhzeta(p, alpha)
+rhzeta(n, alpha)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{
+ Vector of quantiles. For the density, it should be a vector with
+ positive integer values in order for the probabilities to be positive.
+
+ }
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. A single positive integer.}
+ \item{alpha}{
+ The parameter value. Must contain positive values and is recycled to
+ the length of \code{x} or \code{p} or \code{q} if necessary.
+
+ }
+}
+\details{
+ The probability function is
+ \deqn{f(x) = (2x-1)^{(-\alpha)} - (2x+1)^{(-\alpha)},}{%
+ f(x) = (2x-1)^(-alpha) - (2x+1)^(-alpha),}
+ where \eqn{\alpha>0}{alpha>0} and \eqn{x=1,2,\ldots}{x=1,2,...}.
+
+}
+\value{
+ \code{dhzeta} gives the density,
+ \code{phzeta} gives the distribution function,
+ \code{qhzeta} gives the quantile function, and
+ \code{rhzeta} generates random deviates.
+
+}
+\references{
+
+Page 470 of
+Johnson N. L., Kotz S. and Kemp, A. W. (1993)
+\emph{Univariate Discrete Distributions},
+2nd edition,
+Volume 2,
+New York: Wiley.
+
+}
+\author{ T. W. Yee }
+\note{
+ Given some response data, the \pkg{VGAM} family function
+ \code{\link{hzeta}} estimates the parameter \code{alpha}.
+}
+
+\seealso{
+ \code{\link{hzeta}},
+ \code{\link{zeta}},
+ \code{\link{zetaff}}.
+}
+\examples{
+dhzeta(1:20, 0.5)
+rhzeta(20, 0.5)
+
+round(1000 * dhzeta(1:8, 2))
+table(rhzeta(1000, 2))
+
+\dontrun{
+alpha = 0.5; x = 1:10
+plot(x, dhzeta(x, alpha=alpha), type="h", ylim=0:1,
+ sub="alpha=0.5", las=1, col="blue", ylab="Probability",
+ main="Haight's zeta: blue=density; red=distribution function")
+lines(x+0.1, phzeta(x, alpha=alpha), col="red", lty=3, type="h")
+}
+}
+\keyword{distribution}
diff --git a/man/iam.Rd b/man/iam.Rd
new file mode 100644
index 0000000..624f2c5
--- /dev/null
+++ b/man/iam.Rd
@@ -0,0 +1,99 @@
+\name{iam}
+\alias{iam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Index from Array to Matrix }
+\description{
+ Maps the elements of an array containing symmetric positive-definite
+ matrices to a matrix with sufficient columns to hold them
+ (called matrix-band format.)
+}
+\usage{
+iam(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{j}{ An integer from the set \{\code{1:M}\} giving the row number
+ of an element.}
+ \item{k}{ An integer from the set \{\code{1:M}\} giving the column number
+ of an element.}
+ \item{M}{ The number of linear/additive predictors. This is the
+ dimension of each positive-definite symmetric matrix. }
+ \item{hbw}{ Defunct. }
+ \item{both}{ Logical. Return both the row and column indices?
+ See below for more details. }
+ \item{diagonal}{ Logical. Return the indices for the diagonal elements?
+ If \code{FALSE} then only the strictly upper triangular part of the matrix
+ elements are used. }
+}
+\details{
+ Suppose we have \eqn{n} symmetric positive-definite square matrices,
+ each \eqn{M} by \eqn{M}, and
+ these are stored in an \code{array} of dimension \code{c(n,M,M)}.
+ Then these can be more compactly represented by a
+ \code{matrix} of dimension \code{c(n,K)} where \code{K} is an integer
+ between \code{M} and \code{M*(M+1)/2} inclusive.
+ The mapping between these two representations is given by this function.
+ It firstly enumerates by the diagonal elements, followed by the band
+ immediately above the diagonal, then the band above that one, etc.
+ The last element is \code{(1,M)}.
+ This function performs the mapping from elements \code{(j,k)}
+ of symmetric positive-definite square matrices to the columns of
+ another matrix representing such.
+ This is called the \emph{matrix-band} format and is used by
+ the \pkg{VGAM} package.
+
+}
+\value{
+This function has a dual purpose depending on the value of \code{both}.
+ If \code{both=FALSE} then the column number corresponding to the
+ \code{j}-\code{k} element of the matrix is returned.
+ If \code{both=TRUE} then \code{j} and \code{k} are ignored and a list
+ with the following components are returned.
+ \item{row.index}{The row indices of the upper triangular part of the
+ matrix (This may or may not include the diagonal elements, depending
+ on the argument \code{diagonal}).
+ }
+ \item{col.index}{The column indices of the upper triangular part of the
+ matrix (This may or may not include the diagonal elements, depending
+ on the argument \code{diagonal}).
+ }
+}
+\references{
+The website \url{http://www.stat.auckland.ac.nz/~yee} contains
+some additional information.
+}
+\author{ T. W. Yee }
+\note{
+This function is used in the \code{weight} slot of many
+\pkg{VGAM} family functions (see \code{\link{vglmff-class}}),
+especially those whose \eqn{M} is determined by the data,
+e.g., \code{\link{dirichlet}}, \code{\link{multinomial}}.
+}
+
+\seealso{
+\code{\link{vglmff-class}}.
+%\code{ima}.
+}
+\examples{
+iam(1, 2, M=3) # The 4th column represents element (1,2) of a 3x3 matrix
+iam(NULL, NULL, M=3, both=TRUE) # Return the row & column indices
+
+
+dirichlet()@weight
+
+
+M = 4
+temp1 = iam(NA, NA, M=M, both=TRUE)
+mat1 = matrix(NA, M, M)
+mat1[cbind(temp1$row, temp1$col)] = 1:length(temp1$row)
+mat1 # More commonly used
+
+
+M = 4
+temp2 = iam(NA, NA, M=M, both=TRUE, diagonal=FALSE)
+mat2 = matrix(NA, M, M)
+mat2[cbind(temp2$row, temp2$col)] = 1:length(temp2$row)
+mat2 # Rarely used
+}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/identity.Rd b/man/identity.Rd
new file mode 100644
index 0000000..e99d90d
--- /dev/null
+++ b/man/identity.Rd
@@ -0,0 +1,103 @@
+\name{identity}
+\alias{identity}
+\alias{nidentity}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Identity Link Function }
+\description{
+ Computes the identity transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+identity(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+ }
+ \item{earg}{
+ Extra argument for passing in additional information.
+ Here, the argument is unused.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The identity link function \eqn{g(\theta)=\theta}{g(theta)=theta}
+ should be available to every parameter
+ estimated by the \pkg{VGAM} library. However, it usually results in
+ numerical problems because the estimates lie outside the permitted
+ range. Consequently, the result may contain
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+ The function \code{nidentity} is the negative-identity link function and
+ corresponds to \eqn{g(\theta)=-\theta}{g(theta)=-theta}.
+ This is useful for some models, e.g., in the literature supporting the
+ \code{\link{egev}} function it seems that half of the authors use
+ \eqn{\xi=-k}{xi=-k} for the shape parameter and the other half use \eqn{k}
+ instead of \eqn{\xi}{xi}.
+
+}
+\value{
+ For \code{identity()}:
+ for \code{deriv = 0}, the identity of \code{theta}, i.e.,
+ \code{theta} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then \code{theta}.
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ For \code{nidentity()}: the results are similar to \code{identity()}
+ except for a sign change in most cases.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{loge}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{powl}.
+ }
+\examples{
+identity((-5):5)
+identity((-5):5, deriv=1)
+identity((-5):5, deriv=2)
+nidentity((-5):5)
+nidentity((-5):5, deriv=1)
+nidentity((-5):5, deriv=2)
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
new file mode 100644
index 0000000..3d0d527
--- /dev/null
+++ b/man/inv.gaussianff.Rd
@@ -0,0 +1,97 @@
+\name{inv.gaussianff}
+\alias{inv.gaussianff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Inverse Gaussian Distribution Family Function }
+\description{
+ Estimates the two parameters of the inverse Gaussian distribution by
+ maximum likelihood estimation.
+
+}
+\usage{
+inv.gaussianff(lmu="loge", llambda="loge", ilambda=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, llambda}{
+ Parameter link functions for the \eqn{\mu}{mu} and
+ \eqn{\lambda}{lambda} parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ilambda}{
+ Initial value for the \eqn{\lambda}{lambda} parameter.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors \eqn{\eta_j}{eta_j}
+ are modelled as intercepts only.
+ The values must be from the set \{1,2\}.
+
+ }
+}
+\details{
+ The inverse Gaussian distribution has a density
+ that can be written as
+ \deqn{f(y;\mu,\lambda) = \sqrt{\lambda/(2\pi y^3)}
+ \exp\left(-\lambda (y-\mu)^2/(2 \mu^2 y)\right)}{%
+ f(y;mu,lambda) = sqrt(lambda/(2*pi*y^3)) *
+ exp(-lambda*(y-mu)^2/(2*mu^2*y))
+ }
+ where \eqn{y>0},
+ \eqn{\mu>0}{mu>0}, and
+ \eqn{\lambda>0}{lambda>0}.
+ The mean of \eqn{Y} is \eqn{\mu}{mu} and its variance is
+ \eqn{\mu^3/\lambda}{mu^3/lambda}.
+ By default, \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
+ \eqn{\eta_2=\log(\lambda)}{eta2=log(lambda)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
+\emph{Continuous Univariate Distributions},
+2nd edition, Volume 1, New York: Wiley.
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+\note{
+ The inverse Gaussian distribution can be fitted (to a
+ certain extent) using the usual GLM framework involving
+ a scale parameter. This family function is different from
+ that approach in that it estimates both parameters by
+ full maximum likelihood estimation.
+
+}
+
+\seealso{
+ \code{\link{Inv.gaussian}},
+ \code{\link{wald}},
+ \code{\link{bisa}}.
+
+ The \R{} package \pkg{SuppDists} has several functions for evaluating
+ the density, distribution function, quantile function and generating
+ random numbers from the inverse Gaussian distribution.
+
+}
+\examples{
+n = 1000
+shape = 5
+y = rgamma(n=n, shape=shape) # Not inverse Gaussian!!
+fit = vglm(y ~ 1, inv.gaussianff, trace=TRUE, crit="coef")
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/invlomax.Rd b/man/invlomax.Rd
new file mode 100644
index 0000000..17e7c54
--- /dev/null
+++ b/man/invlomax.Rd
@@ -0,0 +1,93 @@
+\name{invlomax}
+\alias{invlomax}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Inverse Lomax Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ inverse Lomax distribution.
+}
+\usage{
+invlomax(link.scale = "loge", link.p = "loge",
+ init.scale = NULL, init.p = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.scale, link.p}{
+ Parameter link functions applied to the
+ (positive) scale parameter \code{scale} and
+ (positive) shape parameter \code{p}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.scale, init.p}{
+ Optional initial values for \code{scale} and \code{p}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2\} which correspond to
+ \code{scale}, \code{p}, respectively.
+
+ }
+}
+\details{
+ The 2-parameter inverse Lomax distribution is the 4-parameter
+ generalized beta II distribution with shape parameters \eqn{a=q=1}.
+ It is also the 3-parameter Dagum distribution
+ with shape parameter \eqn{a=1}, as well as the
+ beta distribution of the second kind with \eqn{q=1}.
+ More details can be found in Kleiber and Kotz (2003).
+
+The inverse Lomax distribution has density
+ \deqn{f(y) = p y^{p-1} / [b^p \{1 + y/b\}^{p+1}]}{%
+ f(y) = p y^(p-1) / [b^p (1 + y/b)^(p+1)]}
+ for \eqn{b > 0}, \eqn{p > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and \code{p} is a shape parameter.
+The mean does not seem to exist.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Invlomax}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rinvlomax(n=2000, 6, 2)
+fit = vglm(y ~ 1, invlomax, trace=TRUE)
+fit = vglm(y ~ 1, invlomax, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/invparalogistic.Rd b/man/invparalogistic.Rd
new file mode 100644
index 0000000..85799cc
--- /dev/null
+++ b/man/invparalogistic.Rd
@@ -0,0 +1,96 @@
+\name{invparalogistic}
+\alias{invparalogistic}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Inverse Paralogistic Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ inverse paralogistic distribution.
+}
+\usage{
+invparalogistic(link.a = "loge", link.scale = "loge",
+ init.a = 1, init.scale = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale}{
+ Parameter link functions applied to the
+ (positive) shape parameter \code{a} and
+ (positive) scale parameter \code{scale}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale}{
+ Optional initial values for \code{a} and \code{scale}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2\} which correspond to
+ \code{a}, \code{scale}, respectively.
+
+ }
+}
+\details{
+ The 2-parameter inverse paralogistic distribution is the 4-parameter
+ generalized beta II distribution with shape parameter \eqn{q=1} and
+ \eqn{a=p}.
+It is the 3-parameter Dagum distribution with \eqn{a=p}.
+ More details can be found in Kleiber and Kotz (2003).
+
+The inverse paralogistic distribution has density
+ \deqn{f(y) = a^2 y^{a^2-1} / [b^{a^2} \{1 + (y/b)^a\}^{a+1}]}{%
+ f(y) = a^2 y^(a^2-1) / [b^(a^2) (1 + (y/b)^a)^(a+1)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and \eqn{a} is the shape parameter.
+The mean is
+ \deqn{E(Y) = b \, \Gamma(a + 1/a) \, \Gamma(1 - 1/a) / \Gamma(a)}{%
+ E(Y) = b gamma(a + 1/a) gamma(1 - 1/a) / gamma(a)}
+provided \eqn{a > 1}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Invparalogistic}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}}.
+}
+
+\examples{
+y = rinvparalogistic(n=3000, 4, 6)
+fit = vglm(y ~ 1, invparalogistic, trace=TRUE)
+fit = vglm(y ~ 1, invparalogistic(init.a=2.7, init.sc=3.3),
+ trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/is.smart.Rd b/man/is.smart.Rd
new file mode 100644
index 0000000..c97235e
--- /dev/null
+++ b/man/is.smart.Rd
@@ -0,0 +1,64 @@
+\name{is.smart}
+\alias{is.smart}
+\title{
+ Test For a Smart Object
+}
+\description{
+ Tests an object to see if it is smart.
+}
+\usage{
+ is.smart(object)
+}
+\arguments{
+\item{object}{
+ a function or a fitted model.
+}
+}
+\value{
+ Returns \code{TRUE} or \code{FALSE}, according to whether the \code{object}
+ is smart or not.
+
+}
+\details{
+ If \code{object} is a function then this function looks to see whether
+ \code{object} has the logical attribute \code{"smart"}. If so then
+ this is returned, else \code{FALSE}.
+
+ If \code{object} is a fitted model then this function looks to see whether
+ \code{object at smart.prediction} or
+ \code{object\$smart.prediction} exists.
+ If it does and it is not equal to \code{list(smart.arg=FALSE)} then
+ a \code{TRUE} is returned, else \code{FALSE}.
+ The reason for this is because, e.g., \code{lm(...,smart=FALSE)}
+ and \code{vglm(...,smart=FALSE)}, will return such a specific list.
+
+ Writers of smart functions manually have to assign this attribute to
+ their smart function after it has been written.
+
+}
+\examples{
+is.smart(my1) # TRUE
+is.smart(poly) # TRUE
+if(is.R()) library(splines)
+is.smart(bs) # TRUE
+is.smart(ns) # TRUE
+is.smart(tan) # FALSE
+if(!is.R()) is.smart(lm) # TRUE
+\dontrun{
+library(VGAM)
+x = rnorm(9)
+fit1 = vglm(rnorm(9) ~ x, normal1)
+is.smart(fit1) # TRUE
+fit2 = vglm(rnorm(9) ~ x, normal1, smart=FALSE)
+is.smart(fit2) # FALSE
+fit2 at smart.prediction
+}
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.6.1.
+
+
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
new file mode 100644
index 0000000..87a7e2b
--- /dev/null
+++ b/man/laplaceUC.Rd
@@ -0,0 +1,95 @@
+\name{laplaceUC}
+\alias{dlaplace}
+\alias{plaplace}
+\alias{qlaplace}
+\alias{rlaplace}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Laplace Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the Laplace distribution with location parameter \code{location}
+ and scale parameter \code{scale}.
+
+}
+\usage{
+dlaplace(x, location=0, scale=1)
+plaplace(q, location=0, scale=1)
+qlaplace(p, location=0, scale=1)
+rlaplace(n, location=0, scale=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{location}{the location parameter \eqn{\mu}{mu}, which
+ is the mean. }
+ \item{scale}{the scale parameter \eqn{b}. Must consist of
+ positive values. }
+}
+\details{
+ The Laplace distribution is often known as the double-exponential
+ distribution and, for modelling, has heavier tail
+ than the normal distribution.
+ The Laplace density function is
+ \deqn{f(y) = \frac{1}{2b} \exp \left( - \frac{|y-\mu|}{b}
+ \right) }{%
+ f(y) = (1/(2b)) exp( -|y-mu|/b ) }
+ where \eqn{-\infty<y<\infty}{-Inf<y<Inf},
+ \eqn{-\infty<\mu<\infty}{-Inf<mu<Inf} and
+ \eqn{b>0}.
+ The mean is \eqn{\mu}{mu} and the variance is \eqn{2b^2}.
+
+% See \code{\link{laplace}}, the \pkg{VGAM} family function
+% for estimating the two parameters by maximum likelihood estimation,
+% for formulae and details.
+% Apart from \code{n}, all the above arguments may be vectors and
+% are recyled to the appropriate length if necessary.
+}
+\value{
+ \code{dlaplace} gives the density,
+ \code{plaplace} gives the distribution function,
+ \code{qlaplace} gives the quantile function, and
+ \code{rlaplace} generates random deviates.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+%\note{
+% The \pkg{VGAM} family function \code{\link{laplace}}
+% estimates the two parameters by maximum likelihood estimation.
+%}
+
+%\seealso{
+% \code{\link{laplace}}.
+%}
+\examples{
+loc = 1; b = 2
+y = rlaplace(n=100, loc=loc, scale=b)
+mean(y)
+loc # population mean
+var(y)
+2 * b^2 # population variance
+
+
+\dontrun{
+x = seq(-5, 5, by=0.01)
+loc = 0; b = 1.5
+plot(x, dlaplace(x, loc, b), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
+abline(h=0, col="blue", lty=2)
+lines(qlaplace(seq(0.05,0.95,by=0.05), loc, b),
+ dlaplace(qlaplace(seq(0.05,0.95,by=0.05), loc, b), loc, b),
+ col="purple", lty=3, type="h")
+lines(x, plaplace(x, loc, b), type="l", col="red")
+abline(h=0, lty=2)
+}
+
+plaplace(qlaplace(seq(0.05,0.95,by=0.05), loc, b), loc, b)
+}
+\keyword{distribution}
+
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
new file mode 100644
index 0000000..ed6787c
--- /dev/null
+++ b/man/leipnik.Rd
@@ -0,0 +1,105 @@
+\name{leipnik}
+\alias{leipnik}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Leipnik Distribution Family Function}
+\description{
+ Estimates the two parameters of a (transformed) Leipnik distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, llambda}{
+ Link function for the \eqn{\mu}{mu} and \eqn{\lambda}{lambda} parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{imu, ilambda}{
+ Numeric. Optional initial values for \eqn{\mu}{mu} and
+ \eqn{\lambda}{lambda}.
+
+ }
+}
+\details{
+ The (transformed) Leipnik distribution has density function
+ \deqn{f(y;\mu,\lambda) = \frac{ \{ y(1-y) \}^{-\frac12}}{
+ \mbox{Beta}( \frac{\lambda+1}{2}, \frac12 )}
+ \left[ 1 + \frac{(y-\mu)^2 }{y(1-y)}
+ \right]^{ -\frac{\lambda}{2}}}{%
+ f(y;mu,lambda) =
+ (y(1-y))^(-1/2) * (1 + (y-mu)^2 / (y*(1-y)))^(-lambda/2) /
+ Beta((lambda+1)/2, 1/2)}
+ where \eqn{0 < y < 1} and \eqn{\lambda > -1}{lambda > -1}.
+ The mean is \eqn{\mu}{mu} (returned as the fitted values) and the variance is
+ \eqn{1/\lambda}{1/lambda}.
+
+ Jorgensen (1997) calls the above the \bold{transformed} Leipnik
+ distribution, and if \eqn{y = (x+1)/2} and \eqn{\mu = (\theta+1)/2}{mu
+ = (theta+1)/2}, then the distribution of \eqn{X} as a function of
+ \eqn{x} and \eqn{\theta}{theta} is known as the the (untransformed)
+ Leipnik distribution. Here, both \eqn{x} and \eqn{\theta}{theta}
+ are in \eqn{(-1,1)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Jorgensen, B. (1997)
+ \emph{The Theory of Dispersion Models}.
+ London: Chapman & Hall
+
+ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+ \emph{Continuous Univariate Distributions},
+ 2nd edition,
+ Volume 2,
+ New York: Wiley.
+ (pages 612--617).
+
+}
+\author{ T. W. Yee }
+\note{
+ Convergence may be slow or fail.
+ Until better initial value estimates are forthcoming try assigning the
+ argument \code{ilambda} some numerical value if it fails to converge.
+ Currently, Newton-Raphson is implemented, not Fisher scoring.
+ Currently, this family function probably only really works for
+ intercept-only models, i.e., \code{y ~ 1} in the formula.
+
+}
+
+\section{Warning }{
+ If \code{llambda="identity"} then it is possible that the
+ \code{lambda} estimate becomes less than \eqn{-1}, i.e., out of
+ bounds. One way to stop this is to choose \code{llambda="loge"},
+ however, \code{lambda} is then constrained to be positive.
+
+}
+
+\seealso{
+ \code{\link{mccullagh89}}.
+}
+\examples{
+y = rnorm(n=2000, mean=0.5, sd=0.1) # Not good data
+fit = vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, checkwz=FALSE)
+fit = vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
+
+fitted(fit)[1:5]
+mean(y)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+sum(weights(fit)) # sum of the prior weights
+sum(weights(fit, type="w")) # sum of the working weights
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lerch.Rd b/man/lerch.Rd
new file mode 100644
index 0000000..36e4e59
--- /dev/null
+++ b/man/lerch.Rd
@@ -0,0 +1,108 @@
+\name{lerch}
+\alias{lerch}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Lerch Phi Function }
+\description{
+ Computes the Lerch transcendental Phi function.
+
+}
+\usage{
+lerch(x, s, v, tolerance=1.0e-10, iter=100)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, s, v}{
+ Numeric.
+ This function recyles values of \code{x}, \code{s}, and
+ \code{v} if necessary.
+
+ }
+ \item{tolerance}{
+ Numeric. Accuracy required, must be positive and less than 0.01.
+
+ }
+ \item{iter}{
+ Maximum number of iterations allowed to obtain convergence.
+ If \code{iter} is too small then a result of \code{NA} may occur;
+ if so, try increasing its value.
+
+ }
+}
+\details{
+ The Lerch transcendental function is defined by
+ \deqn{\Phi(x,s,v) = \sum_{n=0}^{\infty} \frac{x^n}{(n+v)^s}}{%
+ Phi(x,s,v) = sum_{n=0}^{infty} x^n / (n+v)^s}
+ where \eqn{|x|<1} and
+ \eqn{v \neq 0, -1, -2, \ldots}{v != 0, -1, -2, \ldots}.
+ Actually, \eqn{x} may be complex but this function only works
+ for real \eqn{x}.
+ The algorithm used is based on the relation
+ \deqn{\Phi(x,s,v) = x^m \Phi(x,s,v+m) + \sum_{n=0}^{m-1}
+ \frac{x^n}{(n+v)^s} .}{%
+ Phi(x,s,v) = x^m Phi(x,s,v+m) + sum_{n=0}^{m-1} x^n / (n+v)^s . }
+ See the URL below for more information.
+ This function is a wrapper function for the C code described below.
+
+}
+\value{
+ Returns the value of the function evaluated at the values of
+ \code{x}, \code{s}, \code{v}.
+ If the above ranges of \eqn{x} and \eqn{v} are not satisfied,
+ or some numeric problems occur, then
+ this function will return a \code{NA} for those values.
+
+}
+\references{
+ \url{http://aksenov.freeshell.org/lerchphi/source/lerchphi.c}.
+
+ Bateman, H. (1953)
+ \emph{Higher Transcendental Functions}.
+ Volume 1. McGraw-Hill, NY, USA.
+
+}
+\author{
+ S. V. Aksenov and U. D. Jentschura wrote the C code.
+ The R wrapper function was written by T. W. Yee.
+
+}
+\note{
+ There are a number of special cases, e.g.,
+ the Riemann zeta-function is given by
+ \eqn{\zeta(s) = \Phi(x=1,s,v=1)}{zeta(s) = Phi(x=1,s,v=1)}.
+ The special case of \eqn{s=1} corresponds to the hypergeometric 2F1,
+ and this is implemented in the \pkg{gsl} package.
+ The Lerch transcendental Phi function should not be confused with the
+ Lerch zeta function though they are quite similar.
+
+}
+\section{Warning }{
+ This function has not been thoroughly tested and contains bugs,
+ for example,
+ the zeta function cannot be computed with this function even though
+ \eqn{\zeta(s) = \Phi(x=1,s,v=1)}{zeta(s) = Phi(x=1,s,v=1)}.
+ There are many sources of problems such as lack of convergence, overflow
+ and underflow, especially near singularities. If any problems occur
+ then a \code{NA} will be returned.
+
+}
+
+\seealso{
+ \code{\link{zeta}}.
+
+}
+\examples{
+\dontrun{
+x = seq(-1.1, 1.1, len=201)
+s=2; v=1
+plot(x, lerch(x, s=s, v=v), type="l", col="red", las=1,
+ main=paste("lerch(x, s=",s,", v=",v,")",sep=""))
+abline(v=0, h=1, lty="dashed")
+
+s = rnorm(n=100)
+max(abs(zeta(s)-lerch(x=1,s=s,v=1))) # This fails (a bug); should be 0
+}
+}
+\keyword{math}
+
+
+
diff --git a/man/levy.Rd b/man/levy.Rd
new file mode 100644
index 0000000..f617813
--- /dev/null
+++ b/man/levy.Rd
@@ -0,0 +1,118 @@
+\name{levy}
+\alias{levy}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Levy Distribution Family Function }
+\description{
+Estimates the two parameters of the Levy distribution
+by maximum likelihood estimation.
+}
+\usage{
+levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{delta}{
+ Location parameter. May be assigned a known value,
+ otherwise it is estimated (the default).
+
+ }
+ \item{link.gamma}{
+ Parameter link function for the (positive) \eqn{\gamma}{gamma} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{idelta}{
+ Initial value for the \eqn{\delta}{delta} parameter
+ (if it is to be estimated).
+ By default, an initial value is chosen internally.
+
+ }
+ \item{igamma}{
+ Initial value for the \eqn{\gamma}{gamma} parameter.
+ By default, an initial value is chosen internally.
+
+ }
+}
+\details{
+ The Levy distribution is one of three stable distributions
+ whose density function has a tractable form.
+ The formula for the density is
+ \deqn{f(y;\gamma,\delta) = \sqrt{\frac{\gamma}{2\pi}}
+ \exp \left( \frac{-\gamma}{2(y - \delta)} \right) /
+ (y - \delta)^{3/2} }{%
+ f(y;gamma,delta) = sqrt(gamma / (2 pi))
+ exp( -gamma / (2(y - delta))) /
+ (y - \delta)^{3/2} }
+ where \eqn{\delta<y<\infty}{delta<y<Inf} and \eqn{\gamma>0}{gamma>0}.
+ The mean does not exist.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Nolan, J. P. (2005)
+ \emph{Stable Distributions: Models for Heavy Tailed Data}.
+
+}
+\author{ T. W. Yee }
+\note{
+ If \eqn{\delta}{delta} is given, then only one parameter is estimated
+ and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}. If
+ \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}.
+
+}
+
+
+\seealso{
+
+ The Nolan article is at
+ \url{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
+
+ Documentation accompanying the \pkg{VGAM} package at
+ \url{http://www.stat.auckland.ac.nz/~yee} contains further information
+ and examples.
+
+}
+\examples{
+n = 1000
+mygamma = 1 # log link ==> 0 is the answer
+delta = 0
+y = delta + mygamma / rnorm(n)^2 # This is Levy(mygamma, delta)
+
+# Cf. Table 1.1 of Nolan for Levy(1,0)
+sum(y > 1) / length(y) # Should be 0.6827
+sum(y > 2) / length(y) # Should be 0.5205
+
+fit = vglm(y ~ 1, levy(delta=delta), trace=TRUE) # 1 parameter
+fit = vglm(y ~ 1, levy(idelta=delta, igamma=mygamma),
+ trace=TRUE) # 2 parameters
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+weights(fit, type="w")[1:4,]
+}
+\keyword{models}
+\keyword{regression}
+
+
+%%\eqn{\delta + \gamma \Gamma(-0.5) / (2\sqrt{\pi})}{delta +
+%% gamma * gamma(-0.5) / (2*sqrt(pi))}
+%%where \code{gamma} is a parameter but \code{gamma()} is the gamma function.
+
+%%mygamma = exp(1) # log link ==> 1 is the answer
+%% alternative:
+%%w = rgamma(n, shape=0.5) # W ~ Gamma(0.5) distribution
+%%mean(w) # 0.5
+%%mean(1/w)
+%%y = delta + mygamma / (2 * w) # This is Levy(mygamma, delta)
+%%mean(y)
+%%set.seed(123)
+
+%%sum(y > 3) / length(y) # Should be 0.4363
+%%sum(y > 4) / length(y) # Should be 0.3829
+%%sum(y > 5) / length(y) # Should be 0.3453
+
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
new file mode 100644
index 0000000..68e91fb
--- /dev/null
+++ b/man/lgammaUC.Rd
@@ -0,0 +1,79 @@
+\name{lgammaUC}
+\alias{lgammaUC}
+\alias{dlgamma}
+\alias{plgamma}
+\alias{qlgamma}
+\alias{rlgamma}
+\title{The Log-Gamma Distribution }
+\description{
+ Density, distribution function, quantile function and random
+ generation for the log-gamma distribution with
+ location parameter \code{location},
+ scale parameter \code{scale} and
+ shape parameter \code{k}.
+
+}
+\usage{
+dlgamma(x, location=0, scale=1, k=1)
+plgamma(q, location=0, scale=1, k=1)
+qlgamma(p, location=0, scale=1, k=1)
+rlgamma(n, location=0, scale=1, k=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Positive integer of length 1.}
+ \item{location}{the location parameter \eqn{a}.}
+ \item{scale}{the (positive) scale parameter \eqn{b}.}
+ \item{k}{the (positive) shape parameter \eqn{k}.}
+}
+\value{
+ \code{dlgamma} gives the density,
+ \code{plgamma} gives the distribution function,
+ \code{qlgamma} gives the quantile function, and
+ \code{rlgamma} generates random deviates.
+}
+\references{
+Kotz, S. and Nadarajah, S. (2000)
+\emph{Extreme Value Distributions: Theory and Applications},
+pages 48--49,
+London: Imperial College Press.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{lgammaff}}, the \pkg{VGAM} family function for
+ estimating the one parameter standard log-gamma distribution by maximum
+ likelihood estimation, for formulae and other details. Apart from
+ \code{n}, all the above arguments may be vectors and are recyled to
+ the appropriate length if necessary.
+
+}
+\note{
+ The \pkg{VGAM} family function \code{\link{lgamma3ff}} is
+ for the three parameter (nonstandard) log-gamma distribution.
+}
+\seealso{
+ \code{\link{lgammaff}},
+ \code{\link{prentice74}}.
+}
+\examples{
+\dontrun{
+x = seq(-3.2, 5, by=0.01)
+loc = 1
+Scale = 1.5
+k = 1.4
+plot(x, dlgamma(x, loc, Scale, k), type="l", col="blue", ylim=c(0,1),
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
+abline(h=0, col="blue", lty=2)
+lines(qlgamma(seq(0.05,0.95,by=0.05), loc, Scale, k),
+ dlgamma(qlgamma(seq(0.05,0.95,by=0.05), loc, Scale, k), loc, Scale, k),
+ col="purple", lty=3, type="h")
+lines(x, plgamma(x, loc, Scale, k), type="l", col="red")
+abline(h=0, lty=2)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
new file mode 100644
index 0000000..1a07fbc
--- /dev/null
+++ b/man/lgammaff.Rd
@@ -0,0 +1,126 @@
+\name{lgammaff}
+\alias{lgammaff}
+\alias{lgamma3ff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Log-gamma Distribution Family Function }
+\description{
+ Estimation of the parameter of the standard and nonstandard log-gamma
+ distribution.
+
+}
+\usage{
+lgammaff(link = "loge", init.k = NULL)
+lgamma3ff(llocation="identity", lscale="loge", lshape="loge",
+ ilocation=NULL, iscale=NULL, ishape=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation}{
+ Parameter link function applied to the
+ location parameter \eqn{a}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lscale}{
+ Parameter link function applied to the
+ positive scale parameter \eqn{b}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{link, lshape}{
+ Parameter link function applied to
+ the positive shape parameter \eqn{k}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.k, ishape}{
+ Initial value for \eqn{k}.
+ If given, it must be positive.
+ If failure to converge occurs, try some other value.
+ The default means an initial value is determined internally. }
+ \item{ilocation, iscale}{ Initial value for \eqn{a} and \eqn{b}.
+ The defaults mean an initial value is determined internally for each.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+}
+\details{
+ The probability density function of the standard log-gamma distribution
+ is given by
+ \deqn{f(y)=\exp[ky - \exp(y)] / \Gamma(k),}{%
+ f(y) = exp[ky - exp(y)]/gamma(k),}
+ for parameter \eqn{k>0}{k>0} and all real \eqn{y}.
+ The mean of \eqn{Y} is \code{digamma(k)} (returned as
+ the fitted values) and its variance is \code{trigamma(k)}.
+
+ For the non-standard log-gamma distribution, one replaces \eqn{y}
+ by \eqn{(y-a)/b}, where \eqn{a} is the location parameter
+ and \eqn{b} is the positive scale parameter.
+ Then the density function is
+ \deqn{f(y)=\exp[k(y-a)/b - \exp((y-a)/b)] / (b \Gamma(k)).}{%
+ f(y) = exp[k(y-a)/b - exp((y-a)/b)]/(b*gamma(k)).}
+ The mean and variance of \eqn{Y} are \code{a + b*digamma(k)} (returned as
+ the fitted values) and \code{b^2 * trigamma(k)}, respectively.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Kotz, S. and Nadarajah, S. (2000)
+\emph{Extreme Value Distributions: Theory and Applications},
+pages 48--49,
+London: Imperial College Press.
+
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+\emph{Continuous Univariate Distributions},
+2nd edition, Volume 2, p.89,
+New York: Wiley.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ The standard log-gamma distribution can be viewed as a generalization
+ of the standard type 1 extreme value density: when \eqn{k=1}
+ the distribution of \eqn{-Y} is the standard type 1 extreme value
+ distribution.
+
+ The standard log-gamma distribution is fitted with \code{lgammaff}
+ and the non-standard (3-parameter) log-gamma distribution is fitted
+ with \code{lgamma3ff}.
+
+}
+\seealso{
+\code{\link{rlgamma}},
+\code{\link{ggamma}},
+\code{\link{prentice74}},
+\code{\link[base:Special]{lgamma}}.
+}
+\examples{
+y = rlgamma(n <- 100, k=exp(1))
+fit = vglm(y ~ 1, lgammaff, trace=TRUE, crit="c")
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+# Another example
+x = runif(n <- 5000)
+loc = -1 + 2*x
+Scale = exp(1+x)
+y = rlgamma(n, loc=loc, scale=Scale, k=exp(0))
+fit = vglm(y ~ x, lgamma3ff(zero=3), trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/lino.Rd b/man/lino.Rd
new file mode 100644
index 0000000..7f3d106
--- /dev/null
+++ b/man/lino.Rd
@@ -0,0 +1,123 @@
+\name{lino}
+\alias{lino}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Generalized Beta Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 3-parameter
+ generalized beta distribution as proposed by Libby and Novick (1982).
+
+}
+\usage{
+lino(lshape1="loge", lshape2="loge", llambda="loge",
+ ishape1=NULL, ishape2=NULL, ilambda=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape1, lshape2}{
+ Parameter link functions applied to the two
+ (positive) shape parameters \eqn{a} and \eqn{b}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{llambda}{
+ Parameter link function applied to the
+ parameter \eqn{\lambda}{lambda}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ishape1, ishape2, ilambda}{
+ Initial values for the parameters. A \code{NULL} value means
+ one is computed internally. The argument \code{ilambda} must
+ be numeric, and the default corresponds to a standard beta distribution.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2,3\} which correspond to
+ \eqn{a}, \eqn{b}, \eqn{\lambda}{lambda}, respectively.
+
+ }
+}
+\details{
+ Proposed by Libby and Novick (1982),
+ this distribution has density
+ \deqn{f(y;a,b,\lambda) = \frac{\lambda^{a} y^{a-1} (1-y)^{b-1}}{
+ B(a,b) \{1 - (1-\lambda) y\}^{a+b}}}{%
+ f(y;a,b,lambda) = lambda^a y^(a-1) (1-y)^(b-1) /
+ [B(a,b) (1 - (1-lambda)*y)^(a+b)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{\lambda > 0}{lambda > 0}, \eqn{0 < y < 1}.
+ Here \eqn{B} is the beta function (see \code{\link[base:Special]{beta}}).
+ The mean is a complicated function involving the Gauss hypergeometric
+ function.
+ If \eqn{X} has a \code{lino} distribution with parameters
+ \code{shape1}, \code{shape2}, \code{lambda}, then
+ \eqn{Y=\lambda X/(1-(1-\lambda)X)}{Y = \lambda*X / (1 - (1-\lambda)*X)}
+ has a standard beta distribution with parameters \code{shape1},
+ \code{shape2}.
+
+ Since \eqn{\log(\lambda)=0}{log(lambda)=0} corresponds to the standard
+ beta distribution, a \code{summary} of the fitted model performs a
+ t-test for whether the data belongs to a standard beta distribution
+ (provided the \code{\link{loge}} link for \eqn{\lambda}{lambda} is used;
+ this is the default).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Libby, D. L. and Novick, M. R. (1982)
+ Multivariate generalized beta distributions with applications to
+ utility assessment.
+ \emph{Journal of Educational Statistics},
+ \bold{7}, 271--294.
+
+ Gupta, A. K. and Nadarajah, S. (2004)
+ \emph{Handbook of Beta Distribution and Its Applications},
+ NY: Marcel Dekker, Inc.
+}
+
+\author{ T. W. Yee }
+\note{
+ The fitted values, which is usually the mean, have not been implemented
+ yet and consequently are \code{NA}s.
+
+ Although Fisher scoring is used, the working weight matrices
+ are positive-definite only in a certain region of the parameter
+ space. Problems with this indicate poor initial values or an
+ ill-conditioned model or insufficient data etc.
+
+ This model is can be difficult to fit. A reasonably good value of
+ \code{ilambda} seems to be needed so if the self-starting initial
+ values fail, try experimenting with the initial value arguments.
+ Experience suggests \code{ilambda} is better a little larger, rather
+ than smaller, compared to the true value.
+
+}
+
+\seealso{
+ \code{\link{Lino}},
+ \code{\link{genbetaII}}.
+}
+
+\examples{
+y = rbeta(n=1000, exp(0.5), exp(1)) # Standard beta distribution
+fit = vglm(y ~ 1, lino, trace=TRUE)
+coef(fit, mat=TRUE)
+Coef(fit)
+fitted(fit)[1:4]
+summary(fit)
+
+
+# Nonstandard beta distribution
+y = rlino(n=1000, shape1=2, shape2=3, lambda=exp(1))
+fit = vglm(y ~ 1, lino(lshape1=identity, lshape2=identity, ilambda=10),
+ trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
new file mode 100644
index 0000000..9fbdbf6
--- /dev/null
+++ b/man/linoUC.Rd
@@ -0,0 +1,77 @@
+\name{Lino}
+\alias{Lino}
+\alias{dlino}
+\alias{plino}
+\alias{qlino}
+\alias{rlino}
+\title{The Generalized Beta Distribution (Libby and Novick, 1982)}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the generalized beta distribution, as proposed
+ by Libby and Novick (1982).
+}
+\usage{
+dlino(x, shape1, shape2, lambda=1)
+plino(q, shape1, shape2, lambda=1)
+qlino(p, shape1, shape2, lambda=1)
+rlino(n, shape1, shape2, lambda=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape1, shape2, lambda}{ see \code{\link{lino}}. }
+}
+\value{
+ \code{dlino} gives the density,
+ \code{plino} gives the distribution function,
+ \code{qlino} gives the quantile function, and
+ \code{rlino} generates random deviates.
+}
+%\references{
+% Libby, D. L. and Novick, M. R. (1982)
+% Multivariate generalized beta distributions with applications to
+% utility assessment.
+% \emph{Journal of Educational Statistics},
+% \bold{7}, 271--294.
+%
+% Gupta, A. K. and Nadarajah, S. (2004)
+% \emph{Handbook of Beta Distribution and Its Applications},
+% NY: Marcel Dekker, Inc.
+%
+%}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{lino}}, the \pkg{VGAM} family function
+ for estimating the parameters,
+ for the formula of the probability density function and other details.
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{lino}}.
+}
+\examples{
+\dontrun{
+lambda = 0.4
+shape1 = exp(1.3)
+shape2 = exp(1.3)
+x = seq(0.0, 1.0, len=101)
+plot(x, dlino(x, shape1=shape1, shape2=shape2, lambda=lambda),
+ type="l", col="blue", las=1, ylab="",
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles")
+abline(h=0, col="blue", lty=2)
+lines(x, plino(x, shape1=shape1, shape2=shape2, l=lambda), col="red")
+probs = seq(0.1, 0.9, by=0.1)
+Q = qlino(probs, shape1=shape1, shape2=shape2, lambda=lambda)
+lines(Q, dlino(Q, shape1=shape1, shape2=shape2, lambda=lambda),
+ col="purple", lty=3, type="h")
+plino(Q, shape1=shape1, shape2=shape2, l=lambda) - probs # Should be all 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/lirat.Rd b/man/lirat.Rd
new file mode 100644
index 0000000..1f5c95e
--- /dev/null
+++ b/man/lirat.Rd
@@ -0,0 +1,67 @@
+\name{lirat}
+\alias{lirat}
+\docType{data}
+\title{ Low-iron Rat Teratology Data }
+\description{
+ Low-iron rat teratology data.
+}
+\usage{data(lirat)}
+\format{
+ A data frame with 58 observations on the following 4 variables.
+ \describe{
+ \item{\code{N}}{Litter size.}
+ \item{\code{R}}{Number of dead fetuses.}
+ \item{\code{hb}}{Hemoglobin level.}
+ \item{\code{grp}}{Group number.
+ Group 1 is the untreated (low-iron) group,
+ group 2 received injections on day 7 or day 10 only,
+ group 3 received injections on days 0 and 7, and
+ group 4 received injections weekly.}
+ }
+}
+\details{
+The following description comes from Moore and Tsiatis (1991). The data
+comes from the experimental setup from Shepard et al. (1980), which is
+typical of studies of the effects of chemical agents or dietary regimens
+on fetal development in laboratory rats.
+
+Female rats were put in iron-deficient diets and divided into 4
+groups. One group of controls was given weekly injections of iron
+supplement to bring their iron intake to normal levels, while another
+group was given only placebo injections. Two other groups were given
+fewer iron-supplement injections than the controls. The rats were made
+pregnant, sacrificed 3 weeks later, and the total number of fetuses and
+the number of dead fetuses in each litter were counted.
+
+For each litter the number of dead fetuses may be considered to be
+Binomial(\eqn{N,p}) where \eqn{N} is the litter size and \eqn{p}
+is the probability of a fetus dying. The parameter \eqn{p} is expected
+to vary from litter to litter, therefore the total variance of the
+proportions will be greater than that predicted by a binomial model,
+even when the covariates for hemoglobin level and experimental group
+are accounted for.
+
+}
+\source{
+ Moore, D. F. and Tsiatis, A. (1991)
+ Robust Estimation of the Variance in Moment Methods for
+ Extra-binomial and Extra-Poisson Variation.
+ \emph{Biometrics},
+ \bold{47}, 383--401.
+}
+\references{
+ Shepard, T. H., Mackler, B. and Finch, C. A. (1980)
+ Reproductive studies in the iron-deficient rat.
+ \emph{Teratology}, \bold{22}, 329--334.
+}
+\examples{
+\dontrun{
+data(lirat)
+attach(lirat)
+# cf. Figure 3 of Moore and Tsiatis (1991)
+plot(hb, R/N, pch=as.character(grp), col=grp, las=1,
+ xlab="Hemoglobin level", ylab="Proportion Dead")
+detach(lirat)
+}
+}
+\keyword{datasets}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
new file mode 100644
index 0000000..d9ffac2
--- /dev/null
+++ b/man/lms.bcg.Rd
@@ -0,0 +1,187 @@
+\name{lms.bcg}
+\alias{lms.bcg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ LMS Quantile Regression with a Box-Cox transformation to a Gamma Distribution }
+\description{
+ LMS quantile regression with the Box-Cox transformation
+ to the gamma distribution.
+}
+\usage{
+lms.bcg(percentiles = c(25, 50, 75), zero = NULL,
+ link.sigma = "loge", link.mu="identity",
+ dfmu.init=4, dfsigma.init=2,
+ init.lambda = 1, init.sigma = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{n} is the number of (independent) observations.
+
+ \item{percentiles}{
+ A numerical vector containing values between 0 and 100,
+ which are the quantiles. They will be returned as `fitted values'.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value, \code{NULL}, means they all are
+ functions of the covariates.
+
+ }
+ \item{link.sigma}{
+ Parameter link function applied to the third linear/additive predictor.
+ See \code{\link{Links}} for more choices.
+
+% It is the natural log by default because sigma is positive.
+
+ }
+ \item{link.mu}{
+ Parameter link function applied to the second linear/additive predictor.
+
+% The natural log is offered because mu
+% is positive, but it is not the default because mu is more directly
+% interpretable than log(mu)
+% (it is something similar to the running median).
+
+ }
+ \item{dfmu.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of mu.
+ See \code{\link{vsmooth.spline}}.
+
+ }
+ \item{dfsigma.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of sigma.
+ See \code{\link{vsmooth.spline}}.
+ This argument may be assigned \code{NULL} to get an initial value
+ using some other algorithm.
+
+ }
+ \item{init.lambda}{
+ Initial value for lambda.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+
+ }
+ \item{init.sigma}{
+ Optional initial value for sigma.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+ The default value, \code{NULL}, means an initial value is computed
+ in the \code{@initialize} slot of the family function.
+
+ }
+}
+\details{
+ Given a value of the covariate, this function applies a Box-Cox
+ transformation to the response to best obtain a gamma distribution.
+ The parameters
+ chosen to do this are estimated by maximum likelihood or penalized
+ maximum likelihood.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+
+Lopatatzidis A. and Green, P. J. (unpublished manuscript)
+Semiparametric quantile regression using the gamma distribution.
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+The response must be positive because the
+Box-Cox transformation cannot handle negative values.
+The LMS-Yeo-Johnson-normal method can handle
+both positive and negative values.
+
+In general, the lambda and sigma functions should be more smoother
+than the mean function. Often setting \code{zero=1} or
+\code{zero=3} or \code{zero=c(1,3)} is a good idea.
+See the example below.
+
+While it is usual to regress the response against a single
+covariate, it is possible to add other explanatory variables,
+e.g., sex.
+See
+\url{http://www.stat.auckland.ac.nz/~yee}
+for further information and examples about this feature.
+
+}
+\section{Warning }{
+The computations are not simple, therefore convergence may fail.
+In that case, try different starting values.
+Also, the estimate may diverge quickly near the solution,
+in which case try prematurely
+stopping the iterations by assigning \code{maxits} to be the iteration
+number corresponding to the highest likelihood value.
+See the example below.
+
+The expected value of the second derivative with respect to lambda may
+be incorrect (my calculations do not agree with the Lopatatzidis and
+Green manuscript.)
+
+}
+\seealso{
+\code{\link{lms.bcn}},
+\code{\link{lms.yjn}},
+\code{\link{qtplot.lmscreg}},
+\code{\link{deplot.lmscreg}},
+\code{\link{cdf.lmscreg}},
+\code{\link{bminz}}.
+}
+
+\examples{
+data(bminz)
+
+# This converges, but can't be deplot()'ed or qtplot()'ed
+fit = vglm(BMI ~ bs(age, df=4), fam=lms.bcg(zero=c(1,3)), data=bminz, tr=TRUE)
+coef(fit, matrix=TRUE)
+\dontrun{
+par(mfrow=c(1,1))
+plotvgam(fit, se=TRUE) # Plot mu function (only)
+}
+
+# Difficult to get a model that converges
+# Here, we prematurely stop iterations because it fails near the solution
+fit = vgam(BMI ~ s(age, df=c(4,2)), maxit=4,
+ fam=lms.bcg(zero=1, init.lam=3), data=bminz, tr=TRUE)
+
+predict(fit)[1:3,]
+fitted(fit)[1:3,]
+bminz[1:3,]
+# Person 1 is near the lower quartile of BMI amongst people his age
+cdf(fit)[1:3]
+
+\dontrun{
+# Quantile plot
+par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
+qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
+ xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+
+# Density plot
+ygrid = seq(15, 43, len=100) # BMI ranges
+par(mfrow=c(1,1), lwd=2)
+a = deplot(fit, x0=20, y=ygrid, xlab="BMI", col="black",
+ main="Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
+a
+a = deplot(fit, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
+a = deplot(fit, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+a at post$deplot # Contains density function values
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
new file mode 100644
index 0000000..77565ca
--- /dev/null
+++ b/man/lms.bcn.Rd
@@ -0,0 +1,170 @@
+\name{lms.bcn}
+\alias{lms.bcn}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ LMS Quantile Regression with a Box-Cox Transformation to Normality }
+\description{
+ LMS quantile regression with the Box-Cox transformation
+ to normality.
+}
+\usage{
+lms.bcn(percentiles = c(25, 50, 75), zero = NULL,
+ link.sigma = "loge", link.mu="identity",
+ dfmu.init=4, dfsigma.init=2,
+ init.lambda = 1, init.sigma = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{n} is the number of (independent) observations.
+
+ \item{percentiles}{
+ A numerical vector containing values between 0 and 100,
+ which are the quantiles. They will be returned as `fitted values'.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value, \code{NULL}, means they all are
+ functions of the covariates.
+
+ }
+ \item{link.sigma}{
+ Parameter link function applied to the third linear/additive predictor.
+ See \code{\link{Links}} for more choices.
+
+% It is the natural log by default because sigma is positive.
+
+ }
+ \item{link.mu}{
+ Parameter link function applied to the second linear/additive predictor.
+ See \code{\link{Links}} for more choices.
+
+% The natural log is offered because mu is positive, but it is not
+% the default because mu is more directly interpretable than log(mu)
+% (it is something similar to the running median).
+
+ }
+ \item{dfmu.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of mu.
+ See \code{\link{vsmooth.spline}}.
+
+ }
+ \item{dfsigma.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of sigma.
+ See \code{\link{vsmooth.spline}}.
+ This argument may be assigned \code{NULL} to get an initial value
+ using some other algorithm.
+
+ }
+ \item{init.lambda}{
+ Initial value for lambda.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+
+ }
+ \item{init.sigma}{
+ Optional initial value for sigma.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+ The default value, \code{NULL}, means an initial value is computed
+ in the \code{@initialize} slot of the family function.
+
+ }
+}
+\details{
+ Given a value of the covariate, this function applies a Box-Cox
+ transformation to the response to best obtain normality. The parameters
+ chosen to do this are estimated by maximum likelihood or penalized
+ maximum likelihood.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Cole, T. J. and Green, P. J. (1992)
+Smoothing Reference Centile Curves: The LMS Method and
+Penalized Likelihood.
+\emph{Statistics in Medicine},
+\bold{11}, 1305--1319.
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+The response must be positive because the
+Box-Cox transformation cannot handle negative values.
+The LMS-Yeo-Johnson-normal method can handle
+both positive and negative values.
+
+In general, the lambda and sigma functions should be more smoother
+than the mean function. Often setting \code{zero=1} or
+\code{zero=3} or \code{zero=c(1,3)} is a good idea.
+See the example below.
+
+While it is usual to regress the response against a single
+covariate, it is possible to add other explanatory variables,
+e.g., sex.
+See
+\url{http://www.stat.auckland.ac.nz/~yee}
+for further information and examples about this feature.
+
+}
+
+\section{Warning }{
+The computations are not simple, therefore convergence may fail.
+In that case, try different starting values.
+Also, the estimate may diverge quickly near the solution,
+in which case try prematurely
+stopping the iterations by assigning \code{maxits} to be the iteration
+number corresponding to the highest likelihood value.
+}
+\seealso{
+\code{\link{lms.bcg}},
+\code{\link{lms.yjn}},
+\code{\link{qtplot.lmscreg}},
+\code{\link{deplot.lmscreg}},
+\code{\link{cdf.lmscreg}},
+\code{\link{bminz}}.
+}
+
+\examples{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz, tr=TRUE)
+predict(fit)[1:3,]
+fitted(fit)[1:3,]
+bminz[1:3,]
+# Person 1 is near the lower quartile of BMI amongst people his age
+cdf(fit)[1:3]
+
+\dontrun{
+# Quantile plot
+par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
+qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
+ xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+
+# Density plot
+ygrid = seq(15, 43, len=100) # BMI ranges
+par(mfrow=c(1,1), lwd=2)
+a = deplot(fit, x0=20, y=ygrid, xlab="BMI", col="black",
+ main="Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
+a
+a = deplot(fit, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
+a = deplot(fit, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+a at post$deplot # Contains density function values
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
new file mode 100644
index 0000000..7f9f4f0
--- /dev/null
+++ b/man/lms.yjn.Rd
@@ -0,0 +1,206 @@
+\name{lms.yjn}
+\alias{lms.yjn}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ LMS Quantile Regression with a Yeo-Johnson Transformation to Normality }
+\description{
+ LMS quantile regression with the Yeo-Johnson transformation
+ to normality.
+}
+\usage{
+lms.yjn(percentiles = c(25, 50, 75), zero = NULL,
+ link.lambda = "identity", link.sigma = "loge",
+ dfmu.init=4, dfsigma.init=2,
+ init.lambda = 1, init.sigma = NULL,
+ rule = c(10, 5), yoffset = NULL,
+ diagW=FALSE, iters.diagW=6)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{n} is the number of (independent) observations.
+
+ \item{percentiles}{
+ A numerical vector containing values between 0 and 100,
+ which are the quantiles. They will be returned as `fitted values'.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value, \code{NULL}, means they all are
+ functions of the covariates.
+
+ }
+ \item{link.lambda}{
+ Parameter link function applied to the first linear/additive predictor.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{link.sigma}{
+ Parameter link function applied to the third linear/additive predictor.
+ See \code{\link{Links}} for more choices.
+
+% It is the natural log by default because sigma is positive.
+
+ }
+ \item{dfmu.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of mu.
+ See \code{\link{vsmooth.spline}}.
+ }
+ \item{dfsigma.init}{
+ Degrees of freedom for the cubic smoothing spline fit applied to
+ get an initial estimate of sigma.
+ See \code{\link{vsmooth.spline}}.
+ This argument may be assigned \code{NULL} to get an initial value
+ using some other algorithm.
+
+ }
+ \item{init.lambda}{
+ Initial value for lambda.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+
+ }
+ \item{init.sigma}{
+ Optional initial value for sigma.
+ If necessary, it is recycled to be a vector of length \eqn{n}.
+ The default value, \code{NULL}, means an initial value is computed
+ in the \code{@initialize} slot of the family function.
+
+ }
+ \item{rule}{
+ Number of abscissae used in the Gaussian integration
+ scheme to work out elements of the weight matrices.
+ The values given are the possible choices, with the first value
+ being the default.
+ The larger the value, the more accurate the approximation is likely
+ to be but involving more computational expense.
+
+ }
+ \item{yoffset}{
+ A value to be added to the response y, for the purpose
+ of centering the response before fitting the model to the data.
+ The default value, \code{NULL}, means \code{-median(y)} is used, so that
+ the response actually used has median zero. The \code{yoffset} is
+ saved on the object and used during prediction.
+
+ }
+ \item{diagW}{
+ Logical.
+ This argument is offered because the expected information matrix may not
+ be positive-definite. Using the diagonal elements of this matrix results
+ in a higher chance of it being positive-definite, however convergence will
+ be very slow.
+ If \code{TRUE}, then the first \code{iters.diagW} iterations will
+ use the diagonal of the expected information matrix.
+ The default is \code{FALSE}, meaning faster convergence.
+
+ }
+ \item{iters.diagW}{
+ Integer. Number of iterations in which the
+ diagonal elements of the expected information matrix are used.
+ Only used if \code{diagW = TRUE}.
+
+ }
+}
+\details{
+ Given a value of the covariate, this function applies a Yeo-Johnson
+ transformation to the response to best obtain normality. The parameters
+ chosen to do this are estimated by maximum likelihood or penalized
+ maximum likelihood.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Yeo, I.-K. and Johnson, R. A. (2000)
+A new family of power transformations to improve normality or symmetry.
+\emph{Biometrika},
+\bold{87}, 954--959.
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Yee, T. W. (2002)
+An Implementation for Regression Quantile Estimation.
+Pages 3--14.
+In: Haerdle, W. and Ronz, B.,
+\emph{Proceedings in Computational Statistics COMPSTAT 2002}.
+Heidelberg: Physica-Verlag.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+The response may contain both positive and negative values.
+In contrast, the LMS-Box-Cox-normal and LMS-Box-Cox-gamma
+methods only handle a positive response because the
+Box-Cox transformation cannot handle negative values.
+
+In general, the lambda and sigma functions should be more smoother
+than the mean function. Often setting \code{zero=1} or
+\code{zero=3} or \code{zero=c(1,3)} is a good idea.
+See the example below.
+
+While it is usual to regress the response against a single
+covariate, it is possible to add other explanatory variables,
+e.g., sex.
+See
+\url{http://www.stat.auckland.ac.nz/~yee}
+for further information and examples about this feature.
+
+}
+
+\section{Warning }{
+The computations are not simple, therefore convergence may fail.
+In that case, try different starting values.
+
+The generic function \code{predict}, when applied to a
+\code{lms.yjn} fit, does not add back the \code{yoffset} value.
+
+}
+\seealso{
+\code{\link{lms.bcn}},
+\code{\link{lms.bcg}},
+\code{\link{qtplot.lmscreg}},
+\code{\link{deplot.lmscreg}},
+\code{\link{cdf.lmscreg}},
+\code{\link{bminz}}.
+}
+\examples{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(2,4,2)), fam=lms.yjn, data=bminz, tr=TRUE)
+predict(fit)[1:3,]
+fitted(fit)[1:3,]
+bminz[1:3,]
+# Person 1 is near the lower quartile of BMI amongst people his age
+cdf(fit)[1:3]
+
+\dontrun{
+# Quantile plot
+par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
+qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
+ xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+
+# Density plot
+ygrid = seq(15, 43, len=100) # BMI ranges
+par(mfrow=c(1,1), lwd=2)
+a = deplot(fit, x0=20, y=ygrid, xlab="BMI", col="black",
+ main="Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
+a
+a = deplot(fit, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
+a = deplot(fit, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+a at post$deplot # Contains density function values
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/logUC.Rd b/man/logUC.Rd
new file mode 100644
index 0000000..69ba67b
--- /dev/null
+++ b/man/logUC.Rd
@@ -0,0 +1,87 @@
+\name{Log}
+\alias{Log}
+\alias{dlog}
+\alias{plog}
+\alias{qlog}
+\alias{rlog}
+\title{ Logarithmic Distribution }
+\description{
+ Density, distribution function,
+% quantile function
+ and random generation
+ for the logarithmic distribution.
+
+}
+\usage{
+dlog(x, prob)
+plog(q, prob, log.p=FALSE)
+% qlog(p, prob)
+rlog(n, prob, Smallno=1.0e-6)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{
+ Vector of quantiles. For the density, it should be a vector with
+ positive integer values in order for the probabilities to be positive.
+
+ }
+% \item{p}{vector of probabilities.}
+ \item{n}{number of observations. A single positive integer.}
+ \item{prob}{
+ The parameter value \eqn{c} described in in \code{\link{logff}}.
+ Here it is called \code{prob} because \eqn{0<c<1} is the range.
+ For \code{rlog()} this parameter must be of length 1.
+
+ }
+ \item{log.p}{
+ Logical.
+ If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
+
+ }
+ \item{Smallno}{
+ Numeric, a small value used by the rejection method for determining
+ the upper limit of the distribution.
+ That is, \code{plog(U, prob) > 1-Smallno} where \code{U} is the upper limit.
+
+ }
+}
+\details{
+ The details are given in \code{\link{logff}}.
+
+}
+\value{
+ \code{dlog} gives the density,
+ \code{plog} gives the distribution function, and
+% \code{qlog} gives the quantile function, and
+ \code{rlog} generates random deviates.
+
+}
+\references{
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+}
+\author{ T. W. Yee }
+\note{
+ Given some response data, the \pkg{VGAM} family function
+ \code{\link{logff}} estimates the parameter \code{prob}.
+}
+
+\seealso{
+ \code{\link{logff}}.
+}
+\examples{
+dlog(1:20, 0.5)
+rlog(20, 0.5)
+
+\dontrun{
+prob = 0.8; x = 1:10
+plot(x, dlog(x, prob=prob), type="h", ylim=0:1,
+ sub="prob=0.8", las=1, col="blue", ylab="Probability",
+ main="Logarithmic distribution: blue=density; red=distribution function")
+lines(x+0.1, plog(x, prob=prob), col="red", lty=3, type="h")
+}
+}
+\keyword{distribution}
diff --git a/man/logc.Rd b/man/logc.Rd
new file mode 100644
index 0000000..675472d
--- /dev/null
+++ b/man/logc.Rd
@@ -0,0 +1,90 @@
+\name{logc}
+\alias{logc}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Complementary-log Link Function }
+\description{
+ Computes the complentary-log transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+logc(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 1 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
+ \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
+ \item{short}{ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object. }
+ \item{tag}{ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}. }
+}
+\details{
+ The complementary-log link function is suitable for parameters that
+ are less than unity.
+ Numerical values of \code{theta} close to 1 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the log of \code{theta}, i.e.,
+ \code{log(1-theta)} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{1-exp(theta)}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to 1.
+ One way of overcoming this is to use \code{earg}.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{loge}},
+ \code{\link{cloglog}},
+ \code{\link{loglog}},
+ \code{\link{logoff}}.
+
+}
+\examples{
+\dontrun{
+logc(seq(-0.2, 1.1, by=0.1)) # Has NAs
+}
+logc(seq(-0.2, 1.1, by=0.1), earg=list(bval=1-.Machine$double.eps)) # Has no NAs
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/loge.Rd b/man/loge.Rd
new file mode 100644
index 0000000..45d7fe4
--- /dev/null
+++ b/man/loge.Rd
@@ -0,0 +1,107 @@
+\name{loge}
+\alias{loge}
+\alias{nloge}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Log link function }
+\description{
+ Computes the log transformation, including its inverse and the first
+ two derivatives.
+
+}
+\usage{
+loge(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
+ \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The log link function is very commonly used for parameters that
+ are positive.
+ Numerical values of \code{theta} close to 0 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+ The function \code{loge} computes
+ \eqn{\log(\theta)}{log(theta)} whereas \code{nloge} computes
+ \eqn{-\log(\theta)=\log(1/\theta)}{-log(theta)=log(1/theta)}.
+
+}
+\value{
+ The following concerns \code{loge}.
+ For \code{deriv = 0}, the log of \code{theta}, i.e., \code{log(theta)}
+ when \code{inverse = FALSE}, and if \code{inverse = TRUE} then
+ \code{exp(theta)}.
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ This function is called \code{loge} to avoid conflict with the
+ \code{\link[base]{log}} function.
+
+ Numerical instability may occur when \code{theta} is close to 0 unless
+ \code{earg} is used.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{logit}},
+ \code{\link{logc}},
+ \code{\link{loglog}},
+ \code{\link[base]{log}},
+ \code{\link{logoff}}.
+}
+\examples{
+\dontrun{
+loge(seq(-0.2, 0.5, by=0.1))
+loge(seq(-0.2, 0.5, by=0.1), earg=list(bvalue= .Machine$double.xmin))
+nloge(seq(-0.2, 0.5, by=0.1))
+nloge(seq(-0.2, 0.5, by=0.1), earg=list(bvalue= .Machine$double.xmin))
+}
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/logff.Rd b/man/logff.Rd
new file mode 100644
index 0000000..9b28202
--- /dev/null
+++ b/man/logff.Rd
@@ -0,0 +1,82 @@
+\name{logff}
+\alias{logff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Logarithmic Distribution }
+\description{
+ Estimating the parameter of the logarithmic distribution.
+}
+\usage{
+logff(link = "logit", init.c = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the parameter \eqn{c},
+ which lies between 0 and 1.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.c}{
+ Optional initial value for the \eqn{c} parameter.
+ If given, it often pays to start with a larger value, e.g., 0.95.
+ The default is to choose an initial value internally.
+
+ }
+}
+\details{
+ The logarithmic distribution is based on the logarithmic series,
+ and is scaled to a probability function.
+ Its probability function is
+ \eqn{f(y) = a c^y / y}{f(y) = a * c^y / y}, for
+ \eqn{y=1,2,3,\ldots}{y=1,2,3,...},
+ where \eqn{0 < c < 1}, and \eqn{a = -1 / \log(1-c)}{a = -1 / log(1-c)}.
+ The mean is \eqn{a c/(1-c)}{a*c/(1-c)} (returned as the fitted values)
+ and variance is \eqn{a c (1-ac) /(1-c)^2}{a*c*(1-a*c)/(1-c)^2}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Chapter 7 of
+Johnson N. L., Kotz S. and Kemp, A. W. (1993)
+\emph{Univariate Discrete Distributions},
+2nd edition, Volume 2, New York: Wiley.
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+}
+\author{ T. W. Yee }
+\note{
+ The function \code{\link[base]{log}} computes the natural logarithm. In
+ the \pkg{VGAM} library, a link function with option \code{\link{loge}}
+ corresponds to this.
+
+}
+
+\seealso{
+ \code{\link{rlog}},
+ \code{\link[base]{log}},
+ \code{\link{loge}},
+ \code{\link{logoff}}.
+}
+\examples{
+y = rlog(n=1000, prob=logit(0.2, inverse=TRUE))
+fit = vglm(y ~ 1, logff, trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit)
+\dontrun{hist(y, prob=TRUE, breaks=seq(0.5, max(y)+0.5, by=1))
+x = seq(1, max(y), by=1)
+lines(x, dlog(x, Coef(fit)[1]), col="red", type="h")
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/logistic.Rd b/man/logistic.Rd
new file mode 100644
index 0000000..55a9851
--- /dev/null
+++ b/man/logistic.Rd
@@ -0,0 +1,132 @@
+\name{logistic}
+\alias{logistic}
+\alias{logistic1}
+\alias{logistic2}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Logistic Distribution Family Function }
+\description{
+ Estimates the location and scale parameters of the logistic distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+logistic1(llocation="identity", scale.arg=1, method.init=1)
+logistic2(llocation="identity", lscale="loge",
+ ilocation=NULL, iscale=NULL, method.init=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation}{
+ Link function applied to the location parameter \eqn{l}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{scale.arg}{
+ Known positive scale parameter (called \eqn{s} below).
+
+ }
+ \item{lscale}{
+ Parameter link function applied to the
+ scale parameter \eqn{s}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ilocation}{
+ Initial value for the location \eqn{l} parameter.
+ By default, an initial value is chosen internally using
+ \code{method.init}. Assigning a value will override
+ the argument \code{method.init}.
+
+ }
+ \item{iscale}{
+ Initial value for the scale \eqn{s} parameter.
+ By default, an initial value is chosen internally using
+ \code{method.init}. Assigning a value will override
+ the argument \code{method.init}.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The default is none of them. If used,
+ choose one value from the set \{1,2\}.
+
+ }
+}
+\details{
+ The two-parameter logistic distribution
+ has a density that can be written as
+ \deqn{f(y;l,s) = \frac{\exp[-(y-l)/s]}{
+ s\left( 1 + \exp[-(y-l)/s] \right)^2}}{%
+ f(y;l,s) = exp[-(y-l)/s] /
+ [s * ( 1 + exp[-(y-l)/s] )^2]
+ }
+ where \eqn{s>0} is the scale parameter, and \eqn{l} is the location
+ parameter. The response \eqn{-\infty<y<\infty}{-Inf<y<Inf}. The mean
+ of \eqn{Y} (which is the fitted value) is \eqn{l} and its variance is
+ \eqn{\pi^2 s^2 / 3}{pi^2 s^2 / 3}.
+
+ \code{logistic1} estimates the location parameter only while
+ \code{logistic2} estimates both parameters. By default,
+ \eqn{\eta_1=l}{eta1=l} and \eqn{\eta_2=\log(s)}{eta2=log(s)} for
+ \code{logistic2}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+}
+\references{
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
+\emph{Continuous Univariate Distributions},
+2nd edition, Volume 1, New York: Wiley. Chapter 15.
+
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
+\emph{Extreme Value and Related Models with Applications in
+ Engineering and Science},
+Hoboken, N.J.: Wiley-Interscience, p.130.
+
+}
+\author{ T. W. Yee }
+\note{
+ Fisher scoring is used, and the Fisher information matrix is diagonal.
+
+}
+%\section{Warning }{
+%}
+
+\seealso{
+ \code{\link[stats:Logistic]{rlogis}},
+ \code{\link{bilogistic4}}.
+}
+\examples{
+# location unknown, scale known
+n = 500
+x = runif(n)
+y = rlogis(n, loc=1+5*x, scale=4)
+fit = vglm(y ~ x, logistic1(scale=4), trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+
+# Both location and scale unknown
+n = 2000
+x = runif(n)
+y = rlogis(n, loc=1+5*x, scale=exp(0+1*x))
+fit = vglm(y ~ x, logistic2)
+coef(fit, matrix=TRUE)
+vcov(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/logit.Rd b/man/logit.Rd
new file mode 100644
index 0000000..ac6e06e
--- /dev/null
+++ b/man/logit.Rd
@@ -0,0 +1,207 @@
+\name{logit}
+\alias{logit}
+\alias{elogit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Logit Link Function }
+\description{
+ Computes the logit transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+logit(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+elogit(theta, earg = list(min=0, max=1), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by 1 minus the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+ Similarly, for \code{elogit}, values of \code{theta} less than or equal
+ to \eqn{A} or greater than or equal to \eqn{B} can be replaced
+ by the \code{bminvalue} and \code{bmaxvalue} components of the list \code{earg}.
+
+% Extra argument for passing in additional information.
+% For \code{logit}, values of \code{theta} which are equal to 0 or 1 are
+% replaced by \code{earg} or \code{1-earg}
+% (respectively, and if given) before computing the logit.
+
+ For \code{elogit}, \code{earg} should be a list with components
+ \code{min} giving \eqn{A},
+ \code{max} giving \eqn{B}, and for out of range values,
+ \code{bminvalue} and \code{bmaxvalue}.
+ If \code{earg} is used, these
+ component names should not be abbreviated.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The logit link function is very commonly used for parameters that
+ lie in the unit interval.
+ Numerical values of \code{theta} close to 0 or 1 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+
+ The \emph{extended} logit link function \code{elogit} should be used
+ more generally for parameters that lie in the interval \eqn{(A,B)}, say.
+ The formula is
+ \deqn{\log((\theta-A)/(B-\theta))}{%
+ log((theta-A)/(B-theta))}
+ and the default values for \eqn{A} and \eqn{B} correspond to the ordinary
+ logit function.
+ Numerical values of \code{theta} close to \eqn{A} or \eqn{B} or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ However these can be replaced by values \eqn{bminvalue} and
+ \eqn{bmaxvalue} first before computing the link function.
+
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+}
+\value{
+ For \code{logit} with \code{deriv = 0}, the logit of \code{theta}, i.e.,
+ \code{log(theta/(1-theta))} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{exp(theta)/(1+exp(theta))}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is
+ close to 1 or 0 (for \code{logit}), or close to \eqn{A} or \eqn{B} for
+ \code{elogit}.
+ One way of overcoming this is to use \code{earg}.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the univariate
+ logistic distribution (see \code{\link{logistic}}).
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}},
+ \code{\link{loge}}.
+ }
+\examples{
+p = seq(0.01, 0.99, by=0.01)
+logit(p)
+max(abs(logit(logit(p), inverse=TRUE) - p)) # Should be 0
+
+p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
+logit(p) # Has NAs
+logit(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
+
+p = seq(0.9, 2.2, by=0.1)
+elogit(p, earg=list(min=1, max=2,
+ bminvalue = 1 + .Machine$double.eps,
+ bmaxvalue = 2 - .Machine$double.eps)) # Has no NAs
+
+\dontrun{
+par(mfrow=c(2,2))
+y = seq(-4, 4, length=100)
+for(d in 0:1) {
+ matplot(p, cbind(logit(p, deriv=d), probit(p, deriv=d)),
+ type="n", col="purple", ylab="transformation",
+ lwd=2, las=1,
+ main=if(d==0) "Some probability link functions"
+ else "First derivative")
+ lines(p, logit(p, deriv=d), col="limegreen", lwd=2)
+ lines(p, probit(p, deriv=d), col="purple", lwd=2)
+ lines(p, cloglog(p, deriv=d), col="chocolate", lwd=2)
+ lines(p, cauchit(p, deriv=d), col="tan", lwd=2)
+ if(d==0) {
+ abline(v=0.5, h=0, lty="dashed")
+ legend(0, 4.5, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+ } else
+ abline(v=0.5, lty="dashed")
+}
+
+for(d in 0) {
+ matplot(y, cbind(logit(y, deriv=d, inverse=TRUE),
+ probit(y, deriv=d, inverse=TRUE)),
+ type="n", col="purple", xlab="transformation", ylab="p",
+ lwd=2, las=1,
+ main=if(d==0) "Some inverse probability link functions"
+ else "First derivative")
+ lines(y, logit(y, deriv=d, inverse=TRUE), col="limegreen", lwd=2)
+ lines(y, probit(y, deriv=d, inverse=TRUE), col="purple", lwd=2)
+ lines(y, cloglog(y, deriv=d, inverse=TRUE), col="chocolate", lwd=2)
+ lines(y, cauchit(y, deriv=d, inverse=TRUE), col="tan", lwd=2)
+ if(d==0) {
+ abline(h=0.5, v=0, lty="dashed")
+ legend(-4, 1, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+ }
+}
+
+p = seq(0.21, 0.59, by=0.01)
+plot(p, elogit(p, earg=list(min=0.2, max=0.6)), lwd=2,
+ type="l", col="black", ylab="transformation", xlim=c(0,1),
+ las=1, main="elogit(p, earg=list(min=0.2, max=0.6)")
+}
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
+%plot(y, logit(y, inverse=TRUE), type="l", col="limegreen",
+% xlab="transformation", ylab="p",
+% lwd=2, las=1, main="Some inverse probability link functions")
+%lines(y, probit(y, inverse=TRUE), col="purple", lwd=2)
+%lines(y, cloglog(y, inverse=TRUE), col="chocolate", lwd=2)
+%abline(h=0.5, v=0, lty="dashed")
+
+
+
+
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
new file mode 100644
index 0000000..9590718
--- /dev/null
+++ b/man/loglinb2.Rd
@@ -0,0 +1,100 @@
+\name{loglinb2}
+\alias{loglinb2}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Loglinear Model for Two Binary Responses }
+\description{
+ Fits a loglinear model to two binary responses.
+}
+\usage{
+loglinb2(exchangeable = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{exchangeable}{ Logical.
+ If \code{TRUE}, the two marginal probabilities are constrained to
+ be equal. Should be set \code{TRUE} for ears, eyes, etc. data. }
+ \item{zero}{ Which linear/additive predictor is modelled as an
+ intercept only? A \code{NULL} means none of them. }
+}
+\details{
+ The model is
+ \deqn{P(Y_1=y_1,Y_2=y_2) = \exp(u_0+u_1 y_1+u_2 y_2+u_{12} y_1 y_2)}{%
+ P(Y1=y1,Y2=y2) = exp(u0 + u1*y1 + u2*y2 + u12*y1*y2)}
+ where \eqn{y_1}{y1} and \eqn{y_2}{y2} are 0 or 1, and
+ the parameters are \eqn{u_1}{u1}, \eqn{u_2}{u2},
+ \eqn{u_{12}}{u12}.
+ The normalizing parameter \eqn{u_0}{u0} can be expressed as a function
+ of the other parameters, viz.,
+ \deqn{u_0 = -\log[1 + \exp(u_1) + \exp(u_2) + \exp(u_1 + u_2 + u_{12})].}{%
+ u0 = -log[1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)].}
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ four joint probabilities, labelled as
+ \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively.
+}
+
+\references{
+
+Yee, T. W. and Wild, C. J. (2001).
+Discussion to: ``Smoothing spline ANOVA for multivariate Bernoulli
+observations, with application to ophthalmology data (with discussion)''
+by Gao, F., Wahba, G., Klein, R., Klein, B.
+\emph{Journal of the American Statistical Association},
+\bold{96}, 127--160.
+
+McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response must be a two-column matrix of ones and zeros only.
+ This is more restrictive than \code{\link{binom2.or}}, which can handle
+ more types of input formats.
+
+}
+
+\seealso{
+ \code{\link{binom2.or}},
+ \code{\link{binom2.rho}},
+ \code{\link{loglinb3}}.
+}
+\examples{
+data(coalminers)
+coalminers = transform(coalminers, age=(age-42)/5)
+
+# Get the n x 4 matrix of counts
+temp = vglm(cbind(nBnW,nBW,BnW,BW) ~ age, binom2.or, coalminers)
+counts = round(c(weights(temp, type="prior")) * temp at y)
+
+# Create a n x 2 matrix response for loglinb2()
+fred = matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow=TRUE)
+yy = kronecker(matrix(1, nrow(counts), 1), fred)
+wt = c(t(counts))
+age = rep(coalminers$age, rep(4, length(coalminers$age)))
+yy = yy[wt>0,]
+age = age[wt>0]
+wt = wt[wt>0]
+
+fit = vglm(yy ~ age, loglinb2, trace=TRUE, wei=wt)
+coef(fit, mat=TRUE) # Same! (at least for the log odds-ratio)
+summary(fit)
+
+# Try reconcile this with McCullagh and Nelder (1989), p.234
+(0.166-0.131) / 0.027458 # 1.275 is approximately 1.25
+}
+\keyword{models}
+\keyword{regression}
+
+%coalminers$age = (coalminers$age - 42) / 5
+%counts = round(temp at prior.weights * temp at y)
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
new file mode 100644
index 0000000..7c44ece
--- /dev/null
+++ b/man/loglinb3.Rd
@@ -0,0 +1,84 @@
+\name{loglinb3}
+\alias{loglinb3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Loglinear Model for Three Binary Responses }
+\description{
+ Fits a loglinear model to three binary responses.
+}
+\usage{
+loglinb3(exchangeable = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{exchangeable}{ Logical.
+ If \code{TRUE}, the three marginal probabilities are constrained to
+ be equal. }
+ \item{zero}{ Which linear/additive predictor is modelled as an
+ intercept only? A \code{NULL} means none. }
+}
+\details{
+ The model is \eqn{P(Y_1=y_1,Y_2=y_2,Y_3=y_3) =}{P(Y1=y1,Y2=y2,Y3=y3) =}
+ \deqn{\exp(u_0+u_1 y_1+u_2 y_2+u_3 y_3+u_{12} y_1 y_2+u_{12} y_1 y_2+
+ u_{13} y_1 y_3+u_{23} y_2 y_3)}{%
+ exp(u0 + u1*y1 + u2*y2 + u3*y3 + u12*y1*y2 + u13*y1*y3+ u23*y2*y3)}
+ where \eqn{y_1}{y1}, \eqn{y_2}{y2} and \eqn{y_3}{y3} are 0 or 1,
+ and the parameters are \eqn{u_1}{u1}, \eqn{u_2}{u2}, \eqn{u_3}{u3},
+ \eqn{u_{12}}{u12}, \eqn{u_{13}}{u13}, \eqn{u_{23}}{u23}. The
+ normalizing parameter \eqn{u_0}{u0} can be expressed as a function of
+ the other parameters. Note that a third-order association parameter,
+ \eqn{u_{123}}{u123} for the produce \eqn{y_1 y_2 y_3}{y1*y2*y3},
+ is assumed to be zero for this family function.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+ When fitted, the \code{fitted.values} slot of the object contains the
+ eight joint probabilities, labelled as \eqn{(Y_1,Y_2,Y_3)}{(Y1,Y2,Y3)}
+ = (0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0),
+ (1,1,1), respectively.
+
+}
+
+\references{
+
+Yee, T. W. and Wild, C. J. (2001).
+Discussion to: ``Smoothing spline ANOVA for multivariate Bernoulli
+observations, with application to ophthalmology data (with discussion)''
+by Gao, F., Wahba, G., Klein, R., Klein, B.
+\emph{Journal of the American Statistical Association},
+\bold{96}, 127--160.
+
+McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response must be a three-column matrix of ones and zeros only.
+ Note that each of the 8 combinations of the multivariate response
+ need to appear in the data set, therefore data sets will need to
+ be large in order for this family function to work.
+
+}
+
+\seealso{
+ \code{\link{loglinb2}},
+ \code{\link{hunua}}.
+}
+\examples{
+data(hunua)
+fit = vglm(cbind(cyadea,beitaw,kniexc) ~ altitude, loglinb3, data=hunua)
+coef(fit, mat=TRUE)
+fitted(fit)[1:4,]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/loglog.Rd b/man/loglog.Rd
new file mode 100644
index 0000000..ad381e7
--- /dev/null
+++ b/man/loglog.Rd
@@ -0,0 +1,102 @@
+\name{loglog}
+\alias{loglog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Log-log Link Function }
+\description{
+ Computes the log-log transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 1 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The log-log link function is commonly used for parameters that
+ are greater than unity.
+ Numerical values of \code{theta} close to 1 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the log of \code{theta}, i.e.,
+ \code{log(log(theta))} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{exp(exp(theta))}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is
+ close to 1 unless \code{earg} is used.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{loge}},
+ \code{\link{logoff}}.
+}
+\examples{
+x = seq(0.8, 1.5, by=0.1)
+loglog(x) # Has NAs
+loglog(x, earg=list(bvalue=1.0 + .Machine$double.eps)) # Has no NAs
+
+x = seq(1.01, 10, len=100)
+loglog(x)
+max(abs(loglog(loglog(x), inverse=TRUE) - x)) # Should be 0
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
new file mode 100644
index 0000000..9a3fb84
--- /dev/null
+++ b/man/lognormal.Rd
@@ -0,0 +1,122 @@
+\name{lognormal}
+\alias{lognormal}
+\alias{lognormal3}
+%%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Lognormal Distribution }
+\description{
+ Maximum likelihood estimation of the (univariate)
+ lognormal distribution.
+
+}
+\usage{
+lognormal(lmeanlog = "identity", lsdlog = "loge", zero = NULL)
+lognormal3(lmeanlog = "identity", lsdlog = "loge",
+ powers.try = (-3):3, delta = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmeanlog, lsdlog}{
+ Parameter link functions applied to the mean and (positive)
+ \eqn{\sigma}{sigma} (standard deviation) parameter.
+ Both of these are on the log scale.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ For \code{lognormal()},
+ the values must be from the set \{1,2\} which correspond to
+ \code{mu}, \code{sigma}, respectively.
+ For \code{lognormal3()},
+ the values must be from the set \{1,2,3\} where 3 is for
+ \eqn{\lambda}{\lambda}.
+
+ }
+ \item{powers.try}{
+ Numerical vector. The initial \eqn{lambda} is chosen
+ as the best value from \code{min(y) - 10^powers.try} where
+ \code{y} is the response.
+
+ }
+ \item{delta}{
+ Numerical vector. An alternative method for
+ obtaining an initial \eqn{lambda}. Here, \code{delta = min(y)-lambda}.
+ If given, this supersedes the \code{powers.try} argument.
+ The value must be positive.
+
+ }
+}
+\details{
+ A random variable \eqn{Y} has a 2-parameter lognormal distribution
+ if \eqn{\log(Y)}{log(Y)}
+ is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}.
+ The expected value of \eqn{Y}, which is
+ \deqn{E(Y) = \exp(\mu + 0.5 \sigma^2)}{%
+ E(Y) = exp(mu + 0.5 sigma^2)}
+ and not \eqn{\mu}{mu}, make up the fitted values.
+
+ A random variable \eqn{Y} has a 3-parameter lognormal distribution
+ if \eqn{\log(Y-\lambda)}{log(Y-lambda)}
+ is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. Here,
+ \eqn{\lambda < Y}{lambda < Y}.
+ The expected value of \eqn{Y}, which is
+ \deqn{E(Y) = \lambda + \exp(\mu + 0.5 \sigma^2)}{%
+ E(Y) = lambda + exp(mu + 0.5 sigma^2)}
+ and not \eqn{\mu}{mu}, make up the fitted values.
+
+ \code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter
+ lognormal distribution respectively. Clearly, if the location
+ parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+\author{ T. W. Yee }
+%\note{
+% The more commonly used 2-parameter lognormal distribution is the
+% 3-parameter lognormal distribution with \eqn{\lambda}{lambda} equal
+% to zero---see \code{\link{lognormal3}}.
+%
+%}
+
+\seealso{
+% \code{\link{lognormal3}},
+ \code{\link[stats]{rlnorm}},
+ \code{\link{normal1}}.
+}
+
+\examples{
+y = rlnorm(n <- 1000, meanlog=1.5, sdlog=exp(-0.8))
+fit = vglm(y ~ 1, lognormal, trace=TRUE)
+coef(fit, mat=TRUE)
+Coef(fit)
+
+x = runif(n <- 1000)
+y = rlnorm(n, mean=0.5, sd=exp(x))
+fit = vglm(y ~ x, lognormal(zero=1), trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+
+n = 1000
+lambda = 4
+y = lambda + rlnorm(n, mean=1.5, sd=exp(-0.8))
+fit = vglm(y ~ 1, lognormal3, trace=TRUE)
+fit = vglm(y ~ 1, lognormal3, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/logoff.Rd b/man/logoff.Rd
new file mode 100644
index 0000000..55bc609
--- /dev/null
+++ b/man/logoff.Rd
@@ -0,0 +1,91 @@
+\name{logoff}
+\alias{logoff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Log link function with an offset }
+\description{
+ Computes the log transformation with an offset,
+ including its inverse and the first two derivatives.
+}
+\usage{
+logoff(theta, earg = list(offset=0), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ List. Extra argument for passing in additional information.
+ The \code{offset} component of the list \code{earg}
+ is the offset value.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
+ \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
+ \item{short}{ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object. }
+ \item{tag}{ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}. }
+}
+\details{
+ The log-offset link function is very commonly used for parameters that
+ are greater than a certain value.
+ In particular, it is defined by \code{log(theta+offset)} where
+ \code{offset} is the offset value. For example,
+ if \code{offset=0.5} then the value of \code{theta} is restricted
+ to be greater than \eqn{-0.5}.
+
+ Numerical values of \code{theta} close to \code{-offset} or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the log of \code{theta+offset}, i.e.,
+ \code{log(theta+offset)} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{exp(theta)-offset}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ The default means this function is identical to \code{\link{loge}}.
+
+ Numerical instability may occur when \code{theta} is
+ close to \code{-offset}.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{loge}}.
+}
+\examples{
+\dontrun{
+logoff(seq(-0.2, 0.5, by=0.1))
+logoff(seq(-0.2, 0.5, by=0.1), earg=list(offset=0.5))
+log(seq(-0.2, 0.5, by=0.1) + 0.5)
+}
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/lomax.Rd b/man/lomax.Rd
new file mode 100644
index 0000000..fa86229
--- /dev/null
+++ b/man/lomax.Rd
@@ -0,0 +1,99 @@
+\name{lomax}
+\alias{lomax}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Lomax Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ Lomax distribution.
+}
+\usage{
+lomax(link.scale = "loge", link.q = "loge",
+ init.scale = NULL, init.q = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.scale, link.q}{
+ Parameter link function applied to the
+ (positive) parameters \code{scale} and \code{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.scale, init.q}{
+ Optional initial values for \code{scale} and \code{q}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2\} which correspond to
+ \code{scale}, \code{q}, respectively.
+
+ }
+}
+\details{
+ The 2-parameter Lomax distribution is the 4-parameter
+ generalized beta II distribution with shape parameters \eqn{a=p=1}.
+It is probably more widely known as the Pareto (II) distribution.
+ It is also the 3-parameter Singh-Maddala distribution
+ with shape parameter \eqn{a=1}, as well as the
+ beta distribution of the second kind with \eqn{p=1}.
+ More details can be found in Kleiber and Kotz (2003).
+
+The Lomax distribution has density
+ \deqn{f(y) = q / [b \{1 + y/b\}^{1+q}]}{%
+ f(y) = q / [b (1 + y/b)^(1+q)]}
+ for \eqn{b > 0}, \eqn{q > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and \code{q} is a shape parameter.
+The cumulative distribution function is
+ \deqn{F(y) = 1 - [1 + (y/b)]^{-q}.}{%
+ F(y) = 1 - [1 + (y/b)]^(-q).}
+The mean is
+ \deqn{E(Y) = b/(q-1)}{%
+ E(Y) = b/(q-1)}
+provided \eqn{q > 1}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Lomax}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{sinmad}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rlomax(n=2000, 6, 2)
+fit = vglm(y ~ 1, lomax, trace=TRUE)
+fit = vglm(y ~ 1, lomax, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/lv.Rd b/man/lv.Rd
new file mode 100644
index 0000000..01b279c
--- /dev/null
+++ b/man/lv.Rd
@@ -0,0 +1,84 @@
+\name{lv}
+\alias{lv}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Latent Variables }
+\description{
+ Generic function for the \emph{latent variables} of a model.
+}
+\usage{
+lv(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the extraction of latent
+ variables is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. Sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Latent variables occur in reduced-rank regression models,
+ as well as in quadratic and additive ordination models.
+ For the latter two,
+ latent variable values are often called \emph{site scores}
+ by ecologists.
+ Latent variables are linear combinations of the explanatory
+ variables.
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Latent variables are not really applicable to
+ \code{\link{vglm}}/\code{\link{vgam}} models.
+}
+
+
+\seealso{
+ \code{lv.qrrvglm},
+ \code{lv.rrvglm},
+ \code{lv.cao},
+ \code{\link{lvplot}}.
+}
+
+\examples{
+\dontrun{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(123)
+p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider,
+ Rank = 1, df1.nl = c(Zoraspin=2.5, 3),
+ Bestof = 3, Crow1positive = TRUE)
+
+var(lv(p1)) # Scaled to unit variance # Scaled to unit variance
+c(lv(p1)) # Estimated site scores
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lvplot.Rd b/man/lvplot.Rd
new file mode 100644
index 0000000..a0e6193
--- /dev/null
+++ b/man/lvplot.Rd
@@ -0,0 +1,79 @@
+\name{lvplot}
+\alias{lvplot}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Latent Variable Plot }
+\description{
+ Generic function for a \emph{latent variable plot}
+ (also known as an \emph{ordination diagram} by ecologists).
+}
+\usage{
+lvplot(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for a latent
+ variable plot is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. They usually are graphical
+ parameters, and sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Latent variables occur in reduced-rank regression models,
+ as well as in
+ quadratic and additive ordination. For the latter,
+ latent variables are often called the \emph{site scores}.
+ Latent variable plots were coined by Yee (2004), and have
+ the latent variable as at least one of its axes.
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Latent variables are not really applicable to
+ \code{\link{vglm}}/\code{\link{vgam}} models.
+}
+
+
+\seealso{
+ \code{\link{lvplot.qrrvglm}},
+ \code{lvplot.cao},
+ \code{\link{lv}},
+ \code{\link{trplot}}.
+}
+
+\examples{
+\dontrun{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(123)
+p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Bestof = 3,
+ df1.nl = c(Zoraspin=2.5, 3), Crow1positive = TRUE)
+index = 1:ncol(p1 at y)
+lvplot(p1, lcol=index, pcol=index, y=TRUE, las=1)
+
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
new file mode 100644
index 0000000..9b0d348
--- /dev/null
+++ b/man/lvplot.qrrvglm.Rd
@@ -0,0 +1,324 @@
+\name{lvplot.qrrvglm}
+\alias{lvplot.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Latent Variable Plot for QO models }
+\description{
+Produces an ordination diagram (latent variable plot) for quadratic
+ordination (QO) models. For rank-1 models, the x-axis is the first
+ordination/constrained/canonical axis. For rank-2 models, the x- and
+y-axis are the first and second ordination axes respectively.
+
+}
+\usage{
+lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
+ add = FALSE, plot.it = TRUE,
+ rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"),
+ xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
+ ylab = if (Rank == 1) switch(type, predictors = "Predictors",
+ fitted.values = "Fitted values") else "Latent Variable 2",
+ pcex = par()$cex, pcol = par()$col, pch = par()$pch,
+ llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
+ label.arg = FALSE, adj.arg = -0.1,
+ ellipse = 0.95, Absolute = FALSE,
+ elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200,
+ chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
+ cpch = " ",
+ C = FALSE, OriginC = c("origin", "mean"),
+ Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd,
+ Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1,
+ sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
+ sfont = par()$font, check.ok = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A CQO or UQO object. }
+ \item{varlvI}{
+ Logical that is fed into \code{\link{Coef.qrrvglm}}.
+ }
+ \item{reference}{
+ Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
+ }
+ \item{add}{ Logical. Add to an existing plot? If \code{FALSE}, a new
+ plot is made. }
+ \item{plot.it}{ Logical. Plot it? }
+ \item{rug}{ Logical. If \code{TRUE}, a rug plot is plotted at the
+ foot of the plot (applies to rank-1 models only).
+ These values are jittered to expose ties.
+ }
+ \item{y}{ Logical. If \code{TRUE}, the responses will be plotted
+ (applies only to rank-1 models and if \code{type="fitted.values"}.)
+ }
+ \item{type}{ Either \code{"fitted.values"} or \code{"predictors"},
+ specifies whether the y-axis is on the response or eta-scales
+ respectively.
+ }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. }
+ \item{pcex}{ Character expansion of the points.
+Here, for rank-1 models, points are the response \emph{y} data.
+For rank-2 models, points are the optima.
+ See the \code{cex} argument in \code{\link[graphics]{par}}. }
+ \item{pcol}{ Color of the points.
+ See the \code{col} argument in \code{\link[graphics]{par}}. }
+ \item{pch}{ Either an integer specifying a symbol or a single character
+ to be used as the default in plotting points.
+ See \code{\link[graphics]{par}}.
+ The \code{pch} argument can be of length \eqn{M}, the number of species.
+ }
+ \item{llty}{ Line type.
+ Rank-1 models only.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{lcol}{ Line color.
+ Rank-1 models only.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{llwd}{ Line width.
+ Rank-1 models only.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{label.arg}{ Logical. Label the optima and \bold{C}?
+ (applies only to rank-2 models only). }
+ \item{adj.arg}{ Justification of text strings for labelling the optima
+ (applies only to rank-2 models only).
+ See the \code{adj} argument of \code{\link[graphics]{par}}. }
+
+ \item{ellipse}{
+ Numerical, of length 0 or 1 (applies only to rank-2 models only).
+ If \code{Absolute} is \code{TRUE} then \code{ellipse} should be
+ assigned a value that is used for the elliptical contouring.
+ If \code{Absolute} is \code{FALSE} then \code{ellipse}
+ should be assigned a value between 0 and 1, for example,
+ setting \code{ellipse=0.9} means an ellipse with contour
+ = 90\% of the maximum will be plotted about each optimum.
+ If \code{ellipse} is a negative value, then the function checks
+ that the model is an equal-tolerances model and
+ \code{varlvI=FALSE}, and if so, plots circles with
+ radius \code{-ellipse}. For example, setting \code{ellipse=-1}
+ will result in circular contours that have unit radius (in latent
+ variable units). If \code{ellipse} is \code{NULL} or \code{FALSE}
+ then no ellipse is drawn around the optima.
+
+ }
+ \item{Absolute}{ Logical.
+ If \code{TRUE}, the contours corresponding to \code{ellipse}
+ are on an absolute scale.
+ If \code{FALSE}, the contours corresponding to \code{ellipse}
+ are on a relative scale.
+ }
+ \item{elty}{ Line type of the ellipses.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{ecol}{ Line color of the ellipses.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{elwd}{ Line width of the ellipses.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{egrid}{ Numerical. Line resolution of the ellipses.
+ Choosing a larger value will result in smoother ellipses.
+ Useful when ellipses are large.
+ }
+
+ \item{chull.arg}{ Logical. Add a convex hull around the
+ site scores? }
+ \item{clty}{ Line type of the convex hull.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{ccol}{ Line color of the convex hull.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{clwd}{ Line width of the convex hull.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{cpch}{ Character to be plotted at the intersection points of
+ the convex hull. Having white spaces means that site
+ labels are not obscured there.
+ See the \code{pch} argument of \code{\link[graphics]{par}}. }
+
+ \item{C}{ Logical. Add \bold{C} (represented by arrows emanating
+ from \code{OriginC}) to the plot? }
+ \item{OriginC}{ Character or numeric.
+ Where the arrows representing \bold{C} emanate from.
+ If character, it must be one of the choices given. By default the
+ first is chosen.
+ The value \code{"origin"} means \code{c(0,0)}.
+ The value \code{"mean"} means
+ the sample mean of the latent variables (centroid).
+ Alternatively, the user may specify a numerical vector of length 2.
+ }
+ \item{Clty}{ Line type of the arrows representing \bold{C}.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{Ccol}{ Line color of the arrows representing \bold{C}.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{Clwd}{ Line width of the arrows representing \bold{C}.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{Ccex}{ Numeric. Character expansion of the labelling of \bold{C}.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{Cadj.arg}{ Justification of text strings when labelling \bold{C}.
+ See the \code{adj} argument of \code{\link[graphics]{par}}. }
+ \item{stretchC}{ Numerical. Stretching factor for \bold{C}.
+ Instead of using \bold{C}, \code{stretchC * } \bold{C} is used. }
+
+ \item{sites}{ Logical.
+ Add the site scores (aka latent variable
+ values, nu's) to the plot?
+ (applies only to rank-2 models only).
+ }
+ \item{spch}{ Plotting character of the site scores.
+ The default value of \code{NULL} means the row labels of the
+ data frame are used. They often are the site numbers.
+ See the \code{pch} argument of \code{\link[graphics]{par}}.
+ }
+ \item{scol}{ Color of the site scores.
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{scex}{ Character expansion of the site scores.
+ See the \code{cex} argument of \code{\link[graphics]{par}}.
+ }
+ \item{sfont}{ Font used for the site scores.
+ See the \code{font} argument of \code{\link[graphics]{par}}.
+ }
+
+% \item{Rotate}{
+% Numeric or logical.
+% A value from the set \{1,2,\ldots,\eqn{M}\} indicating
+% which species (quadratic predictor) is to be chosen so that
+% its major and semi-minor axes are parallel to the latent variable
+% axes, i.e., that species' Tolerance matrix will be diagonal.
+% If \code{Rotate} is \code{TRUE}, the first species is selected for rotation.
+
+% By default a rotation is performed only if the tolerance matrices are equal,
+% and \code{Rotation} only applies when the rank is greater than one.
+
+% See \code{\link{Coef.qrrvglm}} for details.
+% }
+
+% \item{ITolerances}{
+% Logical.
+% If \code{TRUE}, the tolerances matrices are transformed so that they are
+% the order-\code{Rank} identity matrix. This means that a rank-2
+% latent variable plot
+% can be interpreted naturally in terms of distances and directions.
+% See \code{\link{Coef.qrrvglm}} for details.
+% }
+
+ \item{check.ok}{ Logical. Whether a check is performed to see
+ that \code{Norrr = ~ 1} was used.
+ It doesn't make sense to have a latent variable plot unless this is so.
+ }
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{xlim} and \code{ylim}.
+ }
+}
+\details{
+ This function only works for rank-1 and rank-2 QRR-VGLMs with argument
+ \code{Norrr = ~ 1}.
+
+ For unequal-tolerances models, the latent variable axes can be
+ rotated so that at least one of the tolerance matrices is diagonal;
+ see \code{\link{Coef.qrrvglm}} for details.
+
+ Arguments beginning with ``\code{p}'' correspond to the points e.g.,
+ \code{pcex} and \code{pcol} correspond to the size and color of the
+ points. Such ``\code{p}'' arguments should be vectors of length 1,
+ or \eqn{n}, the number of sites. For the rank-2 model, arguments
+ beginning with ``\code{p}'' correspond to the optima.
+
+}
+\value{
+ Returns a matrix of latent variables (site scores)
+ regardless of whether a plot was produced or not.
+
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ A species which does not have an optimum will not have an ellipse
+ drawn even if requested, i.e., if its tolerance matrix is not
+ positive-definite.
+
+% Stationary points which are not bell-shaped will not be plotted
+% at all.
+
+ Plotting \bold{C} gives a visual display of the weights (loadings)
+ of each of the variables used in the linear combination defining each
+ latent variable.
+
+ The arguments \code{elty}, \code{ecol} and \code{elwd}, may be replaced
+ in the future by \code{llty}, \code{lcol} and \code{llwd}, respectively.
+
+ For rank-1 models, a similar function to this one is
+ \code{\link{persp.qrrvglm}}. It plots the fitted values on a more
+ fine grid rather than at the actual site scores here. The result is a
+ collection of smooth bell-shaped curves. However, it has the weakness
+ that the plot is more divorced from the data; the user thinks it is
+ the truth without an appreciation of the statistical variability in
+ the estimates.
+
+% Yet to do: allow for the contour line to correspond to the tolerance
+% matrix itself. zz ??
+
+ In the example below, the data comes from an equal-tolerances model.
+ The species' tolerance matrices are all the identity matrix,
+ and the optimums are at (0,0), (1,1) and (-2,0) for species 1, 2,
+ 3 respectively.
+
+}
+
+\section{Warning}{
+ Interpretation of a latent variable plot (CQO diagram) is potentially
+ very misleading in terms of distances if (i) the tolerance matrices
+ of the species are unequal and (ii) the contours of these tolerance
+ matrices are not included in the ordination diagram.
+
+}
+
+\seealso{
+\code{\link{lvplot}},
+\code{\link{persp.qrrvglm}},
+\code{\link{Coef.qrrvglm}},
+\code{\link[graphics]{par}},
+\code{\link{cqo}}.
+}
+
+\examples{
+set.seed(123)
+x2 = rnorm(n <- 200) # Has mean 0 (needed when ITol=TRUE)
+x3 = rnorm(n) # Has mean 0 (needed when ITol=TRUE)
+x4 = rnorm(n) # Has mean 0 (needed when ITol=TRUE)
+lv1 = x2 + x3 - 2*x4
+lv2 = -x2 + x3 + 0*x4 # lv2 is weakly correlated with lv1
+lambda1 = exp(6 - 0.5 * (lv1-0)^2 - 0.5 * (lv2-0)^2)
+lambda2 = exp(5 - 0.5 * (lv1-1)^2 - 0.5 * (lv2-1)^2)
+lambda3 = exp(5 - 0.5 * (lv1+2)^2 - 0.5 * (lv2-0)^2)
+spp1 = rpois(n, lambda1)
+spp2 = rpois(n, lambda2)
+spp3 = rpois(n, lambda3)
+set.seed(111)
+p2 = cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff,
+ Rank=2, ITolerances=TRUE,
+ Crow1positive=c(TRUE,FALSE)) # deviance = 505.81
+if(deviance(p2) > 506) stop("suboptimal fit obtained")
+sort(p2 at misc$deviance.Bestof) # A history of the fits
+Coef(p2)
+
+\dontrun{
+lvplot(p2, sites=TRUE, spch="*", scol="darkgreen", scex=1.5,
+ chull=TRUE, label=TRUE, Absolute=TRUE, ellipse=140,
+ adj=-0.5, pcol="blue", pcex=1.3, las=1,
+ C=TRUE, Cadj=c(-.3,-.3,1), Clwd=2, Ccex=1.4, Ccol="red",
+ main=paste("Contours at Abundance=140 with",
+ "convex hull of the site scores"))
+}
+var(lv(p2)) # A diagonal matrix, i.e., uncorrelated latent variables
+var(lv(p2, varlvI=TRUE)) # Identity matrix
+Tol(p2)[,,1:2] # Identity matrix
+Tol(p2, varlvI=TRUE)[,,1:2] # A diagonal matrix
+}
+\keyword{models}
+\keyword{regression}
+\keyword{hplot}
+
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
new file mode 100644
index 0000000..7407647
--- /dev/null
+++ b/man/lvplot.rrvglm.Rd
@@ -0,0 +1,164 @@
+\name{lvplot.rrvglm}
+\alias{lvplot.rrvglm}
+\alias{biplot.rrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Latent Variable Plot for RR-VGLMs }
+\description{
+ Produces an \emph{ordination diagram} (also known as a \emph{biplot} or
+ \emph{latent variable plot}) for \emph{reduced-rank vector generalized
+ linear models} (RR-VGLMs). For rank-2 models only, the x- and y-axis
+ are the first and second canonical axes respectively.
+
+}
+\usage{
+lvplot.rrvglm(object,
+ A = TRUE, C = TRUE, scores = FALSE, plot.it = TRUE,
+ groups = rep(1, n), gapC = sqrt(sum(par()$cxy^2)),
+ scaleA = 1,
+ xlab = "Latent Variable 1", ylab = "Latent Variable 2",
+ Alabels = if (length(object at misc$predictors.names))
+ object at misc$predictors.names else paste("LP", 1:M, sep = ""),
+ Aadj = par()$adj, Acex = par()$cex, Acol = par()$col,
+ Apch = NULL,
+ Clabels = dimnames(Cmat)[[1]], Cadj = par()$adj,
+ Ccex = par()$cex, Ccol = par()$col, Clty = par()$lty,
+ Clwd = par()$lwd,
+ chull.arg = FALSE, ccex = par()$cex, ccol = par()$col,
+ clty = par()$lty, clwd = par()$lwd,
+ spch = NULL, scex = par()$cex, scol = par()$col,
+ slabels = dimnames(x2mat)[[1]], ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ Object of class \code{"rrvglm"}. }
+ \item{A}{ Logical. Allow the plotting of \bold{A}? }
+ \item{C}{ Logical. Allow the plotting of \bold{C}? If \code{TRUE} then
+ \bold{C} is represented by arrows emenating from the origin. }
+ \item{scores}{ Logical. Allow the plotting of the \eqn{n} scores?
+ The scores are the values of the latent variables for each
+ observation. }
+ \item{plot.it}{ Logical. Plot it? If \code{FALSE}, no plot is produced
+ and the matrix of scores (\eqn{n} latent variable values) is returned.
+ If \code{TRUE}, the rank of \code{object} need not be 2.
+ }
+ \item{groups}{ A vector whose distinct values indicate which group the
+ observation belongs to. By default, all the observations belong to a
+ single group. Useful for the multinomial logit model (see
+ \code{\link{multinomial}}.}
+ \item{gapC}{ The gap between the end of the arrow and the text labelling
+ of \bold{C}, in latent variable units.}
+ \item{scaleA}{ Numerical value that is multiplied by \bold{A}, so that
+ \bold{C} is divided by this value. }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. }
+ \item{Alabels}{ Character vector to label \bold{A}. Must be of length
+ \eqn{M}. }
+ \item{Aadj}{ Justification of text strings for labelling \bold{A}.
+ See the \code{adj} argument of \code{\link[graphics]{par}}. }
+ \item{Acex}{ Numeric. Character expansion of the labelling of \bold{A}.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{Acol}{ Line color of the arrows representing \bold{C}.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{Apch}{ Either an integer specifying a symbol or a single character
+ to be used as the default in plotting points.
+ See \code{\link[graphics]{par}}.
+ The \code{pch} argument can be of length \eqn{M}, the number of species. }
+ \item{Clabels}{ Character vector to label \bold{C}. Must be of length
+ \eqn{p2}. }
+ \item{Cadj}{ Justification of text strings for labelling \bold{C}.
+ See the \code{adj} argument of \code{\link[graphics]{par}}. }
+ \item{Ccex}{ Numeric. Character expansion of the labelling of \bold{C}.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{Ccol}{ Line color of the arrows representing \bold{C}.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{Clty}{ Line type of the arrows representing \bold{C}.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{Clwd}{ Line width of the arrows representing \bold{C}.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{chull.arg}{ Logical. Plot the convex hull of the scores? This is
+ done for each group (see the \code{group} argument). }
+ \item{ccex}{ Numeric. Character expansion of the labelling of the convex hull.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{ccol}{ Line color of the convex hull.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{clty}{ Line type of the convex hull.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{clwd}{ Line width of the convex hull.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ \item{spch}{ Either an integer specifying a symbol or a single character
+ to be used as the default in plotting points.
+ See \code{\link[graphics]{par}}.
+ The \code{spch} argument can be of length \eqn{M}, the number of species. }
+ \item{scex}{ Numeric. Character expansion of the labelling of the scores.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{scol}{ Line color of the arrows representing \bold{C}.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{slabels}{ Character vector to label the scores.
+ Must be of length \eqn{n}. }
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{xlim} and \code{ylim}.
+ }
+}
+\details{
+ For RR-VGLMs, a \emph{biplot} and a \emph{latent variable} plot coincide.
+ In general, many of the arguments starting with
+ ``A'' refer to \bold{A} (of length \eqn{M}),
+ ``C'' to \bold{C} (of length \eqn{p2}),
+ ``c'' to the convex hull (of length \code{length(unique(groups))}),
+ and ``s'' to scores (of length \eqn{n}).
+
+ As the result is a biplot, its interpretation is based on the inner
+ product.
+
+}
+\value{
+ The matrix of scores (\eqn{n} latent variable values) is returned
+ regardless of whether a plot was produced or not.
+
+}
+
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+}
+\author{ Thomas W. Yee }
+\note{
+% Further work to be done: This function could be hooked up
+% to the normalization code of \code{\link{rrvglm}} to allow uncorrelated
+% latent variables etc.
+
+ The functions \code{\link{lvplot.rrvglm}} and
+ \code{\link{biplot.rrvglm}} are equivalent.
+
+ In the example below the predictor variables are centered, which
+ is a good idea.
+}
+
+\seealso{
+ \code{\link{lvplot}},
+ \code{\link[graphics]{par}},
+ \code{\link{rrvglm}},
+ \code{\link{Coef.rrvglm}},
+ \code{\link{rrvglm.control}}.
+}
+\examples{
+data(pneumo)
+n = nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
+pneumo = transform(pneumo, slet=scale(log(exposure.time)),
+ x1 = rnorm(n), x2 = rnorm(n), x3 = rnorm(n))
+fit = rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3,
+ multinomial, pneumo, Rank=2, Corner=FALSE, Uncor=TRUE)
+\dontrun{
+lvplot(fit, chull=TRUE, scores=TRUE, clty=2, ccol="blue", scol="red",
+ Ccol="darkgreen", Clwd=2, Ccex=2,
+ main="Biplot of some fictitional data")
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{hplot}
+
+% pneumo$slet = scale(log(pneumo$exposure.time))
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
new file mode 100644
index 0000000..4b883c0
--- /dev/null
+++ b/man/maxwell.Rd
@@ -0,0 +1,61 @@
+\name{maxwell}
+\alias{maxwell}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Maxwell Distribution Family Function }
+\description{
+ Estimating the parameter of the Maxwell distribution by
+ maximum likelihood estimation.
+}
+\usage{
+maxwell(link = "loge")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the parameter \eqn{a}.
+ See \code{\link{Links}} for more choices.
+ A log link is the default because the parameter is positive.
+
+ }
+}
+\details{
+ The Maxwell distribution, which is used in the area of
+ thermodynamics,
+ has a probability density function that can be written
+ \deqn{f(y) = \sqrt{2/\pi} a^{3/2} y^2 \exp(-0.5 a y^2)}{%
+ f(y) = sqrt(2/pi) * a^(3/2) * y^2 * exp(-0.5*a*y^2)}
+ for \eqn{y>0} and \eqn{a>0}.
+ The mean of \eqn{Y} is
+ \eqn{\sqrt{8 / (a \pi)}}{sqrt(8 / (a * pi))}
+ (returned as the fitted values), and its variance is
+ \eqn{(3\pi - 8)/(\pi a)}{(3*pi - 8)/(pi*a)}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+ von Seggern, D. H. (1993)
+ \emph{CRC Standard Curves and Surfaces},
+ Boca Raton, FL.: CRC Press.
+}
+\author{ T. W. Yee }
+\note{
+A related distribution is the Rayleigh distribution.
+
+}
+
+\seealso{
+ \code{\link{Maxwell}},
+ \code{\link{rayleigh}}.
+}
+\examples{
+y = rmaxwell(n <- 1000, a=exp(2))
+fit = vglm(y ~ 1, maxwell, trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
new file mode 100644
index 0000000..c5ff8a6
--- /dev/null
+++ b/man/mccullagh89.Rd
@@ -0,0 +1,125 @@
+\name{mccullagh89}
+\alias{mccullagh89}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{McCullagh (1989) Distribution Family Function}
+\description{
+ Estimates the two parameters of the McCullagh (1989) distribution by
+ maximum likelihood estimation.
+
+}
+\usage{
+mccullagh89(ltheta="rhobit", lnu="logoff",
+ itheta=NULL, inu=NULL,
+ etheta=list(),
+ enu=if(lnu == "logoff") list(offset=0.5) else list(),
+ zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{ltheta, lnu}{
+ Link functions for the \eqn{\theta}{theta} and \eqn{\nu}{nu} parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{itheta, inu}{
+ Numeric.
+ Optional initial values for \eqn{\theta}{theta} and \eqn{\nu}{nu}.
+ The default is to internally compute them.
+
+ }
+ \item{etheta, enu}{
+ List. Extra argument associated with \code{ltheta} and \code{lnu}
+ containing any extra information.
+ See \code{\link{Links}} for general information about \pkg{VGAM} link
+ functions.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose one value from the set \{1,2\}.
+
+ }
+}
+\details{
+ The McCullagh (1989) distribution has density function
+ \deqn{f(y;\theta,\nu) =
+\frac{ \{ 1-y^2 \}^{\nu-\frac12}}
+{ (1-2\theta y + \theta^2)^{\nu} \mbox{Beta}(\nu+\frac12, \frac12)}}{%
+f(y;theta,nu) =
+(1-y^2)^(nu-0.5) / [ (1 - 2*theta*y+theta^2)^nu * Beta(nu+0.5, 0.5)]}
+where \eqn{-1 < y < 1} and \eqn{-1 < \theta < 1}{-1 < theta < 1}.
+This distribution is equation (1) in that paper.
+The parameter \eqn{\nu}{nu} satisfies \eqn{\nu > -1/2}{nu > -1/2},
+therefore the default is to use an log-offset link
+with offset equal to 0.5, i.e.,
+\eqn{\eta_2=\log(\nu+0.5)}{eta_2=log(nu+0.5)}.
+The mean is of \eqn{Y} is \eqn{\nu \theta / (1+\nu)}{nu*theta/(1+nu)},
+and these are returned as the fitted values.
+
+This distribution is related to the Leipnik distribution (see Johnson
+et al. (1995)), is related to ultraspherical functions,
+and under certain conditions, arises as exit
+distributions for Brownian motion.
+Fisher scoring is implemented here and it uses a diagonal matrix
+so the parameters are globally orthogonal in the Fisher information
+sense.
+McCullagh (1989) also states that, to some extent, \eqn{\theta}{theta}
+and \eqn{\nu}{nu} have the properties of a location parameter and a
+precision parameter, respectively.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+
+McCullagh, P. (1989)
+Some statistical properties of a family of continuous univariate distributions.
+\emph{Journal of the American Statistical Association},
+\bold{84}, 125--129.
+
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+\emph{Continuous Univariate Distributions},
+2nd edition,
+Volume 2,
+New York: Wiley.
+(pages 612--617).
+
+}
+\author{ T. W. Yee }
+\note{
+Convergence may be slow or fail unless the initial values are reasonably
+close. If a failure occurs, try assigning the argument \code{inu} and/or
+\code{itheta}. Figure 1 of McCullagh (1989) gives a broad range of
+densities for different values of \eqn{\theta}{theta} and \eqn{\nu}{nu},
+and this could be consulted for obtaining reasonable initial values if
+all else fails.
+
+}
+\seealso{
+ \code{\link{leipnik}},
+ \code{\link{rhobit}},
+ \code{\link{logoff}}.
+}
+
+%\section{Warning }{
+%}
+
+\examples{
+n = 1000
+y = rnorm(n, mean=0.0, sd=0.2) # Limit as theta is 0, nu is infinity
+fit = vglm(y ~ 1, mccullagh89, trace=TRUE)
+fitted(fit)[1:5]
+mean(y)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/mckaygamma2.Rd b/man/mckaygamma2.Rd
new file mode 100644
index 0000000..4d40bb4
--- /dev/null
+++ b/man/mckaygamma2.Rd
@@ -0,0 +1,106 @@
+\name{mckaygamma2}
+\alias{mckaygamma2}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ McKay's Bivariate Gamma Distribution }
+\description{
+ Estimate the two parameters of McKay's bivariate gamma distribution
+ using maximum likelihood estimation.
+}
+\usage{
+mckaygamma2(la = "loge", lp = "loge", lq = "loge",
+ ia = NULL, ip = 1, iq = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{la, lp, lq}{
+ Link functions applied to the (positive)
+ parameters \eqn{a}, \eqn{p} and \eqn{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ia, ip, iq}{
+ Initial values for \eqn{a}, \eqn{p} and \eqn{q}.
+ The default for \eqn{a} is to estimate it using \code{ip} and \code{iq}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default is none of them.
+
+ }
+}
+\details{
+ The joint probability density function is given by
+ \deqn{f(y_1,y_2) = a^{p+q} y_1^{p-1} (y_2-y_1)^{q-1}
+ \exp(-a y_2) / [\Gamma(p) \Gamma(q)]}{%
+ f(y1,y2) = a^(p+q) y1^(p-1) (y2-y1)^(q-1)
+ exp(-a y2) / [gamma(p) gamma(q)] }
+ for \eqn{a > 0}, \eqn{p > 0}, \eqn{q > 0} and
+ \eqn{0 < y_1 < y_2}{0<y1<y2}.
+ Here, \eqn{\Gamma}{gamma} is the gamma
+ function, as in \code{\link[base:Special]{gamma}}.
+ By default, the linear/additive predictors are
+ \eqn{\eta_1=\log(a)}{eta1=log(a)},
+ \eqn{\eta_2=\log(p)}{eta2=log(p)},
+ \eqn{\eta_3=\log(q)}{eta3=log(q)}.
+
+ Although Fisher scoring and Newton-Raphson coincide for this
+ distribution, faster convergence may be obtained by choosing
+ better values for the arguments \code{ip} and \code{iq}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+
+%% improve the references
+\references{
+
+McKay, A. T. (1934)
+Sampling from batches.
+\emph{Journal of the Royal Statistical Society---Supplement},
+\bold{1}, 207--216.
+
+Kotz, S. and Balakrishnan, N. and Johnson, N. L. (2000)
+\emph{Continuous Multivariate Distributions Volume 1: Models
+and Applications},
+2nd edition,
+New York: Wiley.
+
+
+}
+\author{ T. W. Yee }
+\note{
+ The response must be a two column matrix.
+ Currently, the fitted value is a matrix with two columns; the
+ first column has values \eqn{(p+q)/a} for the mean of
+ \code{pmin(y1,y2)}, while the second column is filled with \code{NA}
+ for the unknown mean of \code{pmax(y1,y2)}.
+ The data are sorted internally and the user need not input the
+ data presorted.
+
+}
+
+\seealso{
+ \code{\link{gamma2}}.
+
+}
+\examples{
+y1 = rgamma(n <- 200, shape=4)
+y2 = rgamma(n, shape=8)
+ymat = cbind(y1,y2)
+fit = vglm(ymat ~ 1, fam=mckaygamma2, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+vcov(fit)
+fitted(fit)[1:5,]
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/meplot.Rd b/man/meplot.Rd
new file mode 100644
index 0000000..ea86861
--- /dev/null
+++ b/man/meplot.Rd
@@ -0,0 +1,92 @@
+\name{meplot}
+\alias{meplot}
+\alias{meplot.default}
+\alias{meplot.vlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Mean Excess Plot }
+\description{
+ Mean excess plot (also known as a mean residual life plot),
+ a diagnostic plot for the generalized Pareto distribution (GPD).
+}
+\usage{
+meplot(object, ...)
+meplot.default(y, main="Mean Excess Plot",
+ xlab="Threshold", ylab="Mean Excess", lty=c(2,1:2),
+ conf=0.95, col=c("blue","black","blue"), type="l", ...)
+meplot.vlm(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{y}{ A numerical vector. \code{NA}s etc. are not allowed.}
+ \item{main}{Character. Overall title for the plot. }
+ \item{xlab}{Character. Title for the x axis. }
+ \item{ylab}{Character. Title for the y axis. }
+ \item{lty}{Line type. The second value is for the mean
+ excess value, the first and third values are for the envelope
+ surrounding the confidence interval.}
+ \item{conf}{Confidence level. The default results in approximate 95
+ percent confidence intervals for each mean excess value. }
+ \item{col}{Colour of the three lines. }
+ \item{type}{Type of plot. The default means lines are
+ joined between the mean excesses and also the upper and lower
+ limits of the confidence intervals. }
+ \item{object}{ An object that inherits class \code{"vlm"},
+ usually of class \code{\link{vglm-class}} or
+ \code{\link{vgam-class}}. }
+ \item{\dots}{ Graphical argument passed into
+ \code{\link[graphics]{plot}}. See \code{\link[graphics]{par}}
+ for an exhaustive list. The arguments \code{xlim} and
+ \code{ylim} are particularly useful. }
+}
+\details{
+ If \eqn{Y} has a GPD with scale parameter
+ \eqn{\sigma}{sigma} and shape parameter \eqn{\xi<1}{xi<1},
+ and if \eqn{y>0}, then
+ \deqn{E(Y-u|Y>u) = \frac{\sigma+\xi u}{1-\xi}.}{%
+ E(Y-u|Y>u) = \frac{\sigma+\xi u}{1-\xi}.}
+ It is a linear function in \eqn{u}, the threshold.
+ Note that \eqn{Y-u} is called the \emph{excess} and
+ values of \eqn{Y} greater than \eqn{u} are called \emph{exceedences}.
+ The empirical versions used by these functions is to use
+ sample means to estimate the left hand side of the equation.
+ Values of \eqn{u} in the plot are the values of \eqn{y} itself.
+ If the plot is roughly a straight line then the GPD is a good
+ fit; this plot can be used to select an appropriate threshold
+ value. See \code{\link{gpd}} for more details.
+ If the plot is flat then the data may be exponential,
+ and if it is curved then it may be Weibull or gamma.
+
+ The function \code{meplot} is generic, and
+ \code{meplot.default} and \code{meplot.vlm} are some
+ methods functions for mean excess plots.
+
+}
+\value{
+ A list is returned invisibly with the following components.
+ \item{threshold }{The x axis values. }
+ \item{meanExcess }{The y axis values. Each value is a sample
+ mean minus a value \eqn{u}. }
+}
+\references{
+Davison, A. C. and Smith, R. L. (1990)
+Models for exceedances over high thresholds (with discussion).
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{52}, 393--442.
+
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+}
+\author{ T. W. Yee }
+\note{ The function is designed for speed and not
+ accuracy, therefore huge data sets with extremely large values
+ may cause failure (the function \code{\link[base]{cumsum}}
+ is used.) Ties may not be well handled.
+}
+\seealso{ \code{\link{gpd}}. }
+\examples{\dontrun{meplot(runif(500), las=1) -> i
+names(i)
+}}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/micmen.Rd b/man/micmen.Rd
new file mode 100644
index 0000000..6ab9f25
--- /dev/null
+++ b/man/micmen.Rd
@@ -0,0 +1,123 @@
+\name{micmen}
+\alias{micmen}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Michaelis-Menten Model }
+\description{
+ Fits a Michaelis-Menten nonlinear regression model.
+}
+\usage{
+micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
+ link1 = "identity", link2 = "identity",
+ dispersion = 0, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{rpar}{
+ Numeric. Initial positive ridge parameter. This is used to create
+ positive-definite weight matrices.
+
+ }
+ \item{divisor}{
+ Numerical. The divisor used to divide the ridge parameter at each
+ iteration until it is very small but still positive. The value of
+ \code{divisor} should be greater than one.
+
+ }
+ \item{init1, init2}{
+ Numerical. Initial value for the first and second parameters,
+ respectively. The default is to use a self-starting value.
+
+ }
+ \item{link1, link2}{
+ Parameter link function applied to the first and second
+ parameters, respectively.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{dispersion}{
+ Numerical. Dispersion parameter.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2\}.
+ A \code{NULL} means none.
+
+ }
+}
+\details{
+ The Michaelis-Menten Model is given by
+ \deqn{E(Y_i) = (\theta_1 x_i) / (\theta_2 + x_i)}{%
+ E(Y_i) = theta1 * x_i / (theta2 + x_i)}
+ where \eqn{\theta_1}{theta1} and \eqn{\theta_2}{theta2}
+ are the two parameters.
+
+ The relationship between iteratively reweighted least squares
+ and the Gauss-Newton algorithm is given in Wedderburn (1974).
+ However, the algorithm used by this family function is different.
+ Details are given at the Author's web site.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+ Seber, G. A. F. and Wild, C. J. (1989)
+ \emph{Nonlinear Regression},
+ New York: Wiley.
+
+ Wedderburn, R. W. M. (1974)
+ Quasi-likelihood functions, generalized linear models,
+ and the Gauss-Newton method.
+ \emph{Biometrika},
+ \bold{61}, 439--447.
+
+ Bates, D. M. and Watts, D. G. (1988)
+ \emph{Nonlinear Regression Analysis and Its Applications},
+ New York: Wiley.
+
+ Documentation accompanying the \pkg{VGAM} package at
+ \url{http://www.stat.auckland.ac.nz/~yee}
+ contains further information and examples.
+
+}
+\author{ T. W. Yee }
+\note{
+ The regressor values \eqn{x_i}{x_i} are inputted as a vector in the
+ \code{regressor} argument.
+ To predict the response at new values of \eqn{x_i}{x_i} one must assign
+ the \code{@extra$uvec} slot in the fitted object these values, e.g.,
+ see the example below.
+
+ Numerical problems may occur. If so, try setting some initial values
+ for the parameters. In the future, several self-starting initial values
+ will be implemented.
+
+}
+
+\seealso{
+ \code{\link{enzyme}}.
+% \code{skira}.
+}
+\examples{
+data(enzyme)
+fit = vglm(velocity ~ 1, micmen, enzyme, trace=TRUE, crit="c",
+ regressor=enzyme$conc)
+\dontrun{
+attach(enzyme)
+plot(conc, velocity, xlab="concentration", las=1, main="Enzyme data")
+lines(conc, fitted(fit), col="blue") # Join up the fitted values
+detach(enzyme)
+
+# Predict the response at other concentrations and add it to the plot
+newdata = new=data.frame(concentration=seq(0, 2, len=200))
+fit at extra$uvec = newdata$concentration
+lines(newdata$conc, predict(fit, new=newdata, type="res"), col="red")
+}
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/mix2normal1.Rd b/man/mix2normal1.Rd
new file mode 100644
index 0000000..506876b
--- /dev/null
+++ b/man/mix2normal1.Rd
@@ -0,0 +1,174 @@
+\name{mix2normal1}
+\alias{mix2normal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Mixture of Two Univariate Normal Distributions }
+\description{
+ Estimates the five parameters of a mixture of two univariate
+ normal distributions by maximum likelihood estimation.
+
+}
+\usage{
+mix2normal1(lphi="logit", lmu="identity", lsd="loge",
+ iphi=0.5, imu1=NULL, imu2=NULL, isd1=NULL, isd2=NULL,
+ qmu=c(0.2, 0.8), esd=FALSE, zero=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lphi}{
+ Link function for the parameter \eqn{\phi}{phi}.
+ See below for more details.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lmu}{
+ Link function applied to each \eqn{\mu}{mu} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lsd}{
+ Link function applied to each \eqn{\sigma}{sd} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iphi}{
+ Initial value for \eqn{\phi}{phi}, whose value must lie
+ between 0 and 1.
+
+ }
+ \item{imu1, imu2}{
+ Optional initial value for \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2}.
+ The default is to compute initial values internally using
+ the argument \code{qmu}.
+
+ }
+ \item{isd1, isd2}{
+ Optional initial value for \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2}.
+ The default is to compute initial values internally based on
+ the argument \code{qmu}.
+
+ }
+ \item{qmu}{
+ Vector with two values giving the probabilities relating to the sample
+ quantiles for obtaining initial values for \eqn{\mu_1}{mu1}
+ and \eqn{\mu_2}{mu2}.
+ The two values are fed in as the \code{probs} argument into
+ \code{\link[stats]{quantile}}.
+
+ }
+ \item{esd}{
+ Logical indicating whether the two standard deviations should be
+ constrained to be equal. If set \code{TRUE}, the appropriate
+ constraint matrices will be used.
+
+ }
+\item{zero}{
+ An integer specifying which linear/additive predictor is modelled as
+ intercepts only. If given, the value or values must be from the
+ set \eqn{\{1,2,\ldots,5\}}{1,2,...,5}.
+ The default is the first one only, meaning \eqn{\phi}{phi}
+ is a single parameter even when there are explanatory variables.
+ Set \code{zero=NULL} to model all linear/additive predictors as
+ functions of the explanatory variables.
+
+}
+}
+\details{
+ The probability function can be loosely written as
+ \deqn{f(y) = \phi \, N(\mu_1,\sigma_1^2) + (1-\phi) \, N(\mu_2, \sigma_2^2)}{%
+ f(y) = phi * N(mu1, sd1^2) + (1-phi) * N(mu2, sd2^2)}
+ where \eqn{\phi}{phi} is the probability an observation belongs
+ to the first group.
+ The parameters \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} are the means, and
+ \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2} are the standard deviations.
+ The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
+ The mean of \eqn{Y} is
+ \eqn{\phi \mu_1 + (1-\phi) \mu_2}{phi*mu1 + (1-phi)*mu2}
+ and this is returned as the fitted values.
+ By default, the five linear/additive predictors are
+ \eqn{(logit(\phi), \mu_1, \log(\sigma_1), \mu_2, \log(\sigma_2))^T}{(logit(phi),
+ mu1, log(sd1), mu2, log(sd2))^T}.
+ If \code{esd=TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2} is enforced.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+
+Everitt, B. S. and Hand, D. J. (1981)
+\emph{Finite Mixture Distributions}.
+London: Chapman & Hall.
+
+}
+\section{Warning }{
+ Numerical problems can occur.
+ Half-stepping is not uncommon.
+ If failure to converge occurs, try obtaining better initial values,
+ e.g., by using \code{iphi} and \code{qmu} etc.
+
+ This function uses a quasi-Newton update for the working weight matrices
+ (BFGS variant). It builds up approximations to the weight matrices,
+ and currently the code is not fully tested.
+ In particular, results based on the weight matrices (e.g., from
+ \code{vcov} and \code{summary}) may be quite incorrect, especially when
+ the arguments \code{weights} is used to input prior weights.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ Fitting this model successfully to data can be difficult due to
+ numerical problems and ill-conditioned data. It pays to fit the
+ model several times with different initial values, and check that
+ the best fit looks reasonable. Plotting the results is recommended.
+ This function works better as \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2}
+ become more different.
+
+ Convergence is often slow, especially when the two component
+ distributions are not well separated. The control argument \code{maxit}
+ should be set to a higher value, e.g., 200, and use \code{trace=TRUE}
+ to monitor convergence. If appropriate in the first place, setting
+ \code{esd=TRUE} often makes the optimization problem much easier
+ in general.
+
+}
+
+\seealso{
+ \code{\link{normal1}},
+ \code{\link[stats:Normal]{Normal}},
+ \code{\link{mix2poisson}}.
+}
+
+\examples{
+n = 1000
+mu1 = 99 # Mean IQ of geography professors
+mu2 = 150 # Mean IQ of mathematics professors
+sd1 = sd2 = 16
+phi = 0.3
+y = ifelse(runif(n) < phi, rnorm(n, mu1, sd1), rnorm(n, mu2, sd2))
+
+# Good idea to have trace=TRUE:
+fit = vglm(y ~ 1, mix2normal1(esd=TRUE), maxit=200)
+coef(fit, matrix=TRUE) # the estimates
+c(phi, mu1, sd1, mu2, sd2) # the truth
+
+\dontrun{# Plot the results
+xx = seq(min(y), max(y), len=200)
+plot(xx, (1-phi)*dnorm(xx, mu2, sd2), type="l", xlab="IQ",
+ main="Red=estimate, blue=truth", col="blue")
+phi.est = logit(coef(fit)[1], inverse=TRUE)
+sd.est = exp(coef(fit)[3])
+lines(xx, phi*dnorm(xx, mu1, sd1), col="blue")
+lines(xx, phi.est * dnorm(xx, Coef(fit)[2], sd.est), col="red")
+lines(xx, (1-phi.est) * dnorm(xx, Coef(fit)[4], sd.est), col="red")
+abline(v=Coef(fit)[c(2,4)], lty=2, col="red")
+abline(v=c(mu1, mu2), lty=2, col="blue")
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
new file mode 100644
index 0000000..5c74ebf
--- /dev/null
+++ b/man/mix2poisson.Rd
@@ -0,0 +1,138 @@
+\name{mix2poisson}
+\alias{mix2poisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Mixture of Two Poisson Distributions }
+\description{
+ Estimates the three parameters of a mixture of two Poisson distributions
+ by maximum likelihood estimation.
+
+}
+\usage{
+mix2poisson(lphi = "logit", llambda = "loge",
+ iphi = 0.5, il1 = NULL, il2 = NULL,
+ qmu = c(0.2, 0.8), zero = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lphi}{
+ Link function for the parameter \eqn{\phi}{phi}.
+ See below for more details.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{llambda}{
+ Link function applied to each \eqn{\lambda}{lambda} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{iphi}{
+ Initial value for \eqn{\phi}{phi}, whose value must lie
+ between 0 and 1.
+
+ }
+ \item{il1, il2}{
+ Optional initial value for \eqn{\lambda_1}{lambda1} and
+ \eqn{\lambda_2}{lambda2}. These values must be positive.
+ The default is to compute initial values internally using
+ the argument \code{qmu}.
+
+ }
+ \item{qmu}{
+ Vector with two values giving the probabilities relating to the sample
+ quantiles for obtaining initial values for \eqn{\lambda_1}{lambda1}
+ and \eqn{\lambda_2}{lambda2}.
+ The two values are fed in as the \code{probs} argument into
+ \code{\link[stats]{quantile}}.
+
+ }
+ \item{zero}{
+ An integer specifying which linear/additive predictor is modelled as
+ intercepts only. If given, the value must be either 1 and/or 2 and/or
+ 3, and the default is the first one only, meaning \eqn{\phi}{phi}
+ is a single parameter even when there are explanatory variables.
+ Set \code{zero=NULL} to model all linear/additive predictors as
+ functions of the explanatory variables.
+
+ }
+}
+\details{
+ The probability function can be loosely written as
+ \deqn{P(Y=y) = \phi \, Poisson(\lambda_1) + (1-\phi) \, Poisson(\lambda_2)}{%
+ P(Y=y) = phi * Poisson(lambda1) + (1-phi) * Poisson(lambda2)}
+ where \eqn{\phi}{phi} is the probability an observation belongs
+ to the first group, and \eqn{y=0,1,2,\ldots}{y=0,1,2,...}.
+ The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
+ The mean of \eqn{Y} is
+ \eqn{\phi \lambda_1 + (1-\phi) \lambda_2}{phi*lambda1 + (1-phi)*lambda2}
+ and this is returned as the fitted values.
+ By default, the three linear/additive predictors are
+ \eqn{(logit(\phi), \log(\lambda_1), \log(\lambda_2))^T}{(logit(phi),
+ log(lambda1), log(lambda2))^T}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+% \references{ ~put references to the literature/web site here ~ }
+\section{Warning }{
+ Numerical problems can occur.
+ Half-stepping is not uncommon.
+ If failure to converge occurs, try obtaining better initial values,
+ e.g., by using \code{iphi} and \code{qmu} etc.
+
+ This function uses a quasi-Newton update for the working weight matrices
+ (BFGS variant). It builds up approximations to the weight matrices,
+ and currently the code is not fully tested.
+ In particular, results based on the weight matrices (e.g., from
+ \code{vcov} and \code{summary}) may be quite incorrect, especially when
+ the arguments \code{weights} is used to input prior weights.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ Fitting this model successfully to data can be difficult due to
+ numerical problems and ill-conditioned data. It pays to fit the model
+ several times with different initial values, and check that the best fit
+ looks reasonable. Plotting the results is recommended. This function
+ works better as \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}
+ become more different.
+
+ Convergence is often slow, especially when the two component
+ distributions are not well separated. The control argument \code{maxit}
+ should be set to a higher value, e.g., 200, and use \code{trace=TRUE}
+ to monitor convergence.
+
+}
+
+\seealso{
+ \code{\link[stats:Poisson]{rpois}},
+ \code{\link{mix2normal1}}.
+}
+
+\examples{
+n = 3000
+mu1 = exp(2.4) # also known as lambda1
+mu2 = exp(3.1)
+phi = 0.3
+y = ifelse(runif(n) < phi, rpois(n, mu1), rpois(n, mu2))
+
+fit = vglm(y ~ 1, mix2poisson, maxit=200) # good idea to have trace=TRUE
+coef(fit, matrix=TRUE)
+Coef(fit) # the estimates
+c(phi, mu1, mu2) # the truth
+
+\dontrun{# Plot the results
+ty = table(y)
+plot(names(ty), ty, type="h", main="Red=estimate, blue=truth")
+abline(v=Coef(fit)[-1], lty=2, col="red")
+abline(v=c(mu1, mu2), lty=2, col="blue")
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/model.framevlm.Rd b/man/model.framevlm.Rd
new file mode 100644
index 0000000..f4a366b
--- /dev/null
+++ b/man/model.framevlm.Rd
@@ -0,0 +1,70 @@
+\name{model.framevlm}
+\alias{model.framevlm}
+\title{Construct the Model Frame of a VLM Object}
+\usage{
+model.framevlm(object, \dots)
+}
+\arguments{
+ \item{object}{a model object from the \pkg{VGAM} \R package
+ that inherits from a \emph{vector linear model} (VLM),
+ e.g., a model of class \code{"vglm"}.}
+
+ \item{\dots}{further arguments such as \code{data}, \code{na.action},
+ \code{subset}.
+ See \code{\link[stats]{model.frame}} for more information on these.
+ }
+}
+\description{
+ This function returns a \code{\link{data.frame}} with the variables.
+ It is applied to an object which inherits from class \code{"vlm"} (e.g.,
+ a fitted model of class \code{"vglm"}).
+}
+\details{Since \code{object} is
+ an object which inherits from class \code{"vlm"} (e.g.,
+ a fitted model of class \code{"vglm"}),
+ the method will either returned the saved model frame
+ used when fitting the model (if any, selected by argument
+ \code{model = TRUE}) or pass the call used when fitting on to the
+ default method.
+
+ This code implements \emph{smart prediction}
+ (see \code{\link{smartpred}}).
+}
+\value{
+ A \code{\link{data.frame}} containing the variables used in
+ the \code{object} plus those specified in \code{\dots}.
+}
+\seealso{
+ \code{\link[stats]{model.frame}},
+ \code{\link{model.matrixvlm}},
+ \code{\link{predict.vglm}},
+ \code{\link{smartpred}}.
+}
+\references{
+ Chambers, J. M. (1992)
+ \emph{Data for models.}
+ Chapter 3 of \emph{Statistical Models in S}
+ eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole.
+}
+\examples{
+# Illustrates smart prediction
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
+ fam=multinomial,
+ data=pneumo, trace=TRUE, x=FALSE)
+class(fit)
+
+check1 = model.frame(fit)[1:3,]
+check1
+check2 = model.frame(fit, data=pneumo[1:3,])
+check2
+all.equal(unlist(check1), unlist(check2)) # Should be TRUE
+
+q0 = predict(fit)[1:3,]
+q1 = predict(fit, newdata=pneumo)[1:3,]
+q2 = predict(fit, newdata=pneumo[1:3,])
+all.equal(q0, q1) # Should be TRUE
+all.equal(q1, q2) # Should be TRUE
+}
+\keyword{models}
diff --git a/man/model.matrixvlm.Rd b/man/model.matrixvlm.Rd
new file mode 100644
index 0000000..ed4c748
--- /dev/null
+++ b/man/model.matrixvlm.Rd
@@ -0,0 +1,81 @@
+\name{model.matrixvlm}
+\alias{model.matrixvlm}
+\title{Construct the Design Matrix of a VLM Object}
+\usage{
+model.matrixvlm(object, type=c("vlm","lm"), \dots)
+}
+\arguments{
+ \item{object}{an object of a class that inherits from the
+ \emph{vector linear model} (VLM).
+ }
+ \item{type}{Type of design matrix returned. The first is the default.
+ }
+ \item{\dots}{further arguments passed to or from other methods.
+ These include \code{data} (which
+ is a data frame created with \code{\link{model.framevlm}}),
+ \code{contrasts.arg}, and \code{xlev}.
+ See \code{\link[stats]{model.matrix}} for more information.
+ }
+}
+
+\description{
+ Creates a design matrix. Two types can be
+ returned: a large one (class \code{"vlm"} or one that inherits
+ from this such as \code{"vglm"}) or a small one
+ (such as returned if it were of class \code{"lm"}).
+}
+\details{
+ This function creates a design matrix from \code{object}.
+ This can be a small LM object or a big VLM object (default).
+ The latter is constructed from the former and the constraint
+ matrices.
+
+ This code implements \emph{smart prediction}
+ (see \code{\link{smartpred}}).
+}
+\value{
+ The design matrix for a regression model with the specified formula
+ and data.
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+ Chambers, J. M. (1992)
+ \emph{Data for models.}
+ Chapter 3 of \emph{Statistical Models in S}
+ eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole.
+
+}
+\seealso{
+ \code{\link[stats]{model.matrix}},
+ \code{\link{model.framevlm}},
+ \code{\link{predict.vglm}},
+ \code{\link{smartpred}}.
+}
+\examples{
+# Illustrates smart prediction
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
+ fam=multinomial,
+ data=pneumo, trace=TRUE, x=FALSE)
+class(fit)
+fit at x
+model.matrix(fit)
+
+Check1 = model.matrix(fit, type="lm")[1:3,]
+Check1
+Check2 = model.matrix(fit, data=pneumo[1:3,], type="lm")
+Check2
+all.equal(c(Check1), c(Check2))
+
+q0 = predict(fit)[1:3,]
+q1 = predict(fit, newdata=pneumo)[1:3,]
+q2 = predict(fit, newdata=pneumo[1:3,])
+all.equal(q0, q1) # Should be TRUE
+all.equal(q1, q2) # Should be TRUE
+}
+\keyword{models}
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
new file mode 100644
index 0000000..fdca22e
--- /dev/null
+++ b/man/multinomial.Rd
@@ -0,0 +1,230 @@
+\name{multinomial}
+\alias{multinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Multinomial Logit Model }
+\description{
+ Fits a multinomial logit model to an unordered factor response.
+}
+\usage{
+multinomial(zero = NULL, parallel = FALSE, nointercept = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response \eqn{Y} is assumed to be a factor with
+ unordered values \eqn{1,2,\dots,M+1}, so that \eqn{M} is the number
+ of linear/additive predictors \eqn{\eta_j}{eta_j}.
+
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+ \item{parallel}{
+ A logical, or formula specifying which terms have
+ equal/unequal coefficients.
+
+ }
+ \item{nointercept}{
+ An integer-valued vector specifying which
+ linear/additive predictors have no intercepts.
+ The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+
+ }
+}
+\details{
+ The model can be written
+ \deqn{\eta_j = \log(P[Y=j]/ P[Y=M+1])}{%
+ eta_j = log(P[Y=j]/ P[Y=M+1])}
+ where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor.
+ Here, \eqn{j=1,\ldots,M} and \eqn{\eta_{M+1}}{eta_{M+1}} is 0 by
+ definition. That is, the last level of the factor, or last column of
+ the response matrix, is taken as the reference level or baseline---this
+ is for identifiability of the parameters.
+
+ In almost all the literature, the constraint matrices associated
+ with this family of models are known. For example, setting
+ \code{parallel=TRUE} will make all constraint matrices (except for
+ the intercept) equal to a vector of \eqn{M} 1's. If the constraint
+ matrices are unknown and to be estimated, then this can be achieved
+ by fitting the model as a reduced-rank vector generalized linear model
+ (RR-VGLM; see \code{\link{rrvglm}}). In particular, a multinomial logit
+ model with unknown constraint matrices is known as a stereotype model
+ (Anderson, 1984), and can be fitted with \code{\link{rrvglm}}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+McCullagh, P. and Nelder, J. A. (1989)
+\emph{Generalized Linear Models}, 2nd ed.
+London: Chapman & Hall.
+
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Simonoff, J. S. (2003)
+\emph{Analyzing Categorical Data},
+New York: Springer-Verlag.
+
+Anderson, J. A. (1984)
+Regression and ordered categorical variables.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{46}, 1--30.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ The response should be either a matrix of counts (with row sums that are
+ all positive), or a factor. In both cases, the \code{y} slot returned
+ by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the
+ matrix of counts.
+
+ The multinomial logit model is more appropriate for a nominal
+ (unordered) factor response. For an ordinal (ordered) factor
+ response, models such as those based on cumulative probabilities
+ (see \code{\link{cumulative}}) are more suited.
+
+ \code{multinomial} is prone to numerical difficulties if the groups
+ are separable and/or the fitted probabilities are close to 0 or 1.
+ The fitted values returned are estimates of the probabilities
+ \eqn{P[Y=j]} for \eqn{j=1,\ldots,M+1}.
+
+ Here is an example of the usage of the \code{parallel} argument.
+ If there are covariates \code{x1}, \code{x2} and \code{x3}, then
+ \code{parallel = TRUE ~ x1 + x2 -1} and
+ \code{parallel = FALSE ~ x3} are equivalent. This would constrain
+ the regression coefficients for \code{x1} and \code{x2} to be
+ equal; those of the intercepts and \code{x3} would be different.
+
+ In Example 4 below, a conditional logit model is fitted to a artificial
+ data set that explores how cost and travel time affect people's
+ decision about how to travel to work. Walking is the baseline group.
+ The variable \code{Cost.car} is the difference between the cost of
+ travel to work by car and walking, etc. The variable \code{Durn.car}
+ is the difference between the travel duration/time to work by car and
+ walking, etc. For other details about the \code{xij} argument see
+ \code{\link{vglm.control}} and \code{\link{fill}}.
+
+}
+
+% In the future, this family function may be renamed to
+% ``\code{mum}'' (for \bold{mu}ltinomial logit \bold{m}odel).
+% Please let me know if you strongly agree or disagree about this.
+
+\section{Warning }{
+ The arguments \code{zero} and \code{nointercept} can be inputted
+ with values that fail. For example, \code{multinomial(zero=2,
+ nointercept=1:3)} means the second linear/additive predictor is
+ identically zero, which will cause a failure.
+
+ Be careful about the use of other potentially contradictory constraints,
+ e.g., \code{multinomial(zero=2, parallel = TRUE ~ x3)}. If in doubt,
+ apply \code{constraints()} to the fitted object to check.
+
+ No check is made to verify that the response is nominal.
+
+}
+\seealso{
+ \code{\link{acat}},
+ \code{\link{cumulative}},
+ \code{\link{cratio}},
+ \code{\link{sratio}},
+ \code{\link{dirichlet}},
+ \code{\link{dirmultinomial}},
+ \code{\link{rrvglm}},
+ \code{\link[stats:Multinom]{Multinomial}},
+ \code{\link[base]{iris}}.
+}
+% \code{\link[base:Multinom]{rmultinom}}
+
+\examples{
+# Example 1: fit a multinomial logit model to Edgar Anderson's iris data
+data(iris)
+\dontrun{
+fit = vglm(Species ~ ., multinomial, iris)
+coef(fit, matrix=TRUE)
+}
+
+
+# Example 2a: a simple example
+y = t(rmultinom(10, size = 20, prob=c(0.1,0.2,0.8))) # Counts
+fit = vglm(y ~ 1, multinomial)
+fitted(fit)[1:4,] # Proportions
+fit at prior.weights # Not recommended for extraction of prior weights
+weights(fit, type="prior", matrix=FALSE) # The better method
+fit at y # Sample proportions
+constraints(fit) # Constraint matrices
+
+# Example 2b: Different input to Example 2a but same result
+w = apply(y, 1, sum) # Prior weights
+yprop = y / w # Sample proportions
+fitprop = vglm(yprop ~ 1, multinomial, weights=w)
+fitted(fitprop)[1:4,] # Proportions
+weights(fitprop, type="prior", matrix=FALSE)
+fitprop at y # Same as the input
+
+
+# Example 3: Fit a rank-1 stereotype model
+data(car.all)
+fit = rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank=1)
+coef(fit) # Contains the C matrix
+constraints(fit)$HP # The A matrix
+coef(fit, matrix=TRUE) # The B matrix
+Coef(fit)@C # The C matrix
+ccoef(fit) # Better to get the C matrix this way
+Coef(fit)@A # The A matrix
+svd(coef(fit, matrix=TRUE)[-1,])$d # This has rank 1; = C %*% t(A)
+
+
+# Example 4: The use of the xij argument (conditional logit model)
+set.seed(111)
+n = 100 # Number of people who travel to work
+M = 3 # There are M+1 models of transport
+ymat = matrix(0, n, M+1)
+ymat[cbind(1:n, sample(x=M+1, size=n, replace=TRUE))] = 1
+dimnames(ymat) = list(NULL, c("bus","train","car","walk"))
+transport = data.frame(cost.bus=runif(n), cost.train=runif(n),
+ cost.car=runif(n), cost.walk=runif(n),
+ durn.bus=runif(n), durn.train=runif(n),
+ durn.car=runif(n), durn.walk=runif(n))
+transport = round(transport, dig=2) # For convenience
+transport = transform(transport,
+ Cost.bus = cost.bus - cost.walk,
+ Cost.car = cost.car - cost.walk,
+ Cost.train = cost.train - cost.walk,
+ Durn.bus = durn.bus - durn.walk,
+ Durn.car = durn.car - durn.walk,
+ Durn.train = durn.train - durn.walk)
+fit = vglm(ymat ~ Cost.bus + Cost.train + Cost.car +
+ Durn.bus + Durn.train + Durn.car,
+ fam = multinomial,
+ xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
+ Durn ~ Durn.bus + Durn.train + Durn.car),
+ data=transport)
+model.matrix(fit, type="lm")[1:7,] # LM model matrix
+model.matrix(fit, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit)
+coef(fit, matrix=TRUE)
+coef(fit, matrix=TRUE, compress=FALSE)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
new file mode 100644
index 0000000..53c342b
--- /dev/null
+++ b/man/nakagami.Rd
@@ -0,0 +1,102 @@
+\name{nakagami}
+\alias{nakagami}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Nakagami Distribution Family Function }
+\description{
+ Estimation of the two parameters of the
+ Nakagami distribution by maximum likelihood estimation.
+
+}
+\usage{
+nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link function applied to the
+ \emph{shape} and \emph{scale} parameters.
+ Log links ensure they are positive.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ishape, iscale}{
+ Optional initial values for the shape and scale parameters.
+ For \code{ishape}, a \code{NULL} value means it is obtained in the
+ \code{initialize} slot based on the value of \code{iscale}.
+ For \code{iscale}, assigning a \code{NULL} means a value is obtained in the
+ \code{initialize} slot, however, setting another numerical
+ value is recommended if convergence fails or is too slow.
+
+ }
+}
+\details{
+ The Nakagami distribution, which is useful for modelling
+ wireless systems such as radio links, can be written
+ \deqn{f(y) = 2 (shape/scale)^{shape} y^{2 \times shape-1}
+ \exp(-shape \times y^2/scale) / \Gamma(shape)}{%
+ 2 * (shape/scale)^shape *
+ y^(2*shape-1) * exp(-shape*y^2/scale) /
+ gamma(shape)}
+ for \eqn{y > 0}, \eqn{shape > 0}, \eqn{scale > 0}.
+ The mean of \eqn{Y} is
+ \eqn{\sqrt{scale/shape} \times \Gamma(shape+0.5) /
+ \Gamma(shape)}{sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)} and
+ these are returned as the fitted values.
+ By default, the linear/additive predictors are
+ \eqn{\eta_1=\log(shape)}{eta1=log(shape)} and
+ \eqn{\eta_2=\log(scale)}{eta2=log(scale)}.
+ Fisher scoring is implemented.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Nakagami, M. (1960).
+ The \emph{m}-distribution: a general formula of
+ intensity distribution of rapid fading,
+ pp.3--36 in:
+ \emph{Statistical Methods in Radio Wave Propagation}.
+ W. C. Hoffman, Ed., New York: Pergamon.
+
+}
+\author{ T. W. Yee }
+\note{
+ The Nakagami distribution is also known as the
+ Nakagami-\emph{m} distribution, where \eqn{m=shape} here.
+ Special cases: \eqn{m=0.5} is a one-sided Gaussian
+ distribution and \eqn{m=1} is a Rayleigh distribution.
+ The second moment is \eqn{E(Y^2)=m}.
+
+ If \eqn{Y} has a Nakagami distribution with parameters \emph{shape}
+ and \emph{scale} then \eqn{Y^2} has a gamma distribution with shape
+ parameter \emph{shape} and scale parameter \emph{scale/shape}.
+
+}
+
+\seealso{
+ \code{\link{rnaka}},
+ \code{\link{gamma2}},
+ \code{\link{rayleigh}}.
+}
+\examples{
+n = 1000; shape = exp(0); Scale = exp(1)
+y = sqrt(rgamma(n, shape=shape, scale=Scale/shape))
+fit = vglm(y ~ 1, nakagami, trace=TRUE, crit="c")
+y = rnaka(n, shape=shape, scale=Scale)
+fit = vglm(y ~ 1, nakagami(iscale=3), trace=TRUE)
+fitted(fit)[1:5]
+mean(y)
+coef(fit, matrix=TRUE)
+(Cfit = Coef(fit))
+\dontrun{
+hist(sy <- sort(y), prob=TRUE, main="", xlab="y", ylim=c(0,0.6))
+lines(sy, dnaka(sy, shape=Cfit[1], scale=Cfit[2]), col="red")
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
new file mode 100644
index 0000000..6e60942
--- /dev/null
+++ b/man/nakagamiUC.Rd
@@ -0,0 +1,86 @@
+\name{Nakagami}
+\alias{Nakagami}
+\alias{dnaka}
+\alias{pnaka}
+\alias{qnaka}
+\alias{rnaka}
+\title{Nakagami Distribution }
+\description{
+ Density, cumulative distribution function, quantile function and
+ random generation for
+ the Nakagami distribution.
+
+}
+\usage{
+dnaka(x, shape, scale=1)
+pnaka(q, shape, scale=1)
+qnaka(p, shape, scale=1, ...)
+rnaka(n, shape, scale=1, Smallno=1.0e-6)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{shape, scale }{
+ arguments for the parameters of the distribution.
+ See \code{\link{nakagami}} for more details.
+ For \code{rnaka}, arguments \code{shape} and \code{scale} must be of
+ length 1.
+
+ }
+ \item{Smallno}{
+ Numeric, a small value used by the rejection method for determining
+ the upper limit of the distribution.
+ That is, \code{pnaka(U) > 1-Smallno} where \code{U} is the upper limit.
+
+ }
+ \item{\ldots}{
+ Arguments that can be passed into \code{\link[stats]{uniroot}}.
+
+ }
+}
+\value{
+ \code{dnaka} gives the density,
+ \code{pnaka} gives the cumulative distribution function,
+ \code{qnaka} gives the quantile function, and
+ \code{rnaka} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{nakagami}} for more details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{nakagami}}.
+
+}
+\examples{
+\dontrun{
+x = seq(0, 3.2, len=200)
+plot(x, dgamma(x, shape=1), type="n", col="black", ylab="",
+ ylim=c(0,1.5), main="dnaka(x, shape)")
+lines(x, dnaka(x, shape=1), col="red")
+lines(x, dnaka(x, shape=2), col="blue")
+lines(x, dnaka(x, shape=3), col="green")
+legend(2, 1.0, col=c("red","blue","green"), lty=rep(1, len=3),
+ legend=paste("shape =", c(1, 2, 3)))
+
+plot(x, pnorm(x), type="n", col="black", ylab="",
+ ylim=0:1, main="pnaka(x, shape)")
+lines(x, pnaka(x, shape=1), col="red")
+lines(x, pnaka(x, shape=2), col="blue")
+lines(x, pnaka(x, shape=3), col="green")
+legend(2, 0.6, col=c("red","blue","green"), lty=rep(1, len=3),
+ legend=paste("shape =", c(1, 2, 3)))
+}
+
+probs = seq(0.1, 0.9, by=0.1)
+pnaka(qnaka(p=probs, shape=2), shape=2) - probs # Should be all 0
+}
+\keyword{distribution}
+
+
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
new file mode 100644
index 0000000..a48c27e
--- /dev/null
+++ b/man/nbolf.Rd
@@ -0,0 +1,154 @@
+\name{nbolf}
+\alias{nbolf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Negative Binomial-Ordinal Link Function }
+\description{
+ Computes the negative binomial-ordinal transformation, including its
+ inverse and the first two derivatives.
+
+}
+\usage{
+nbolf(theta, earg = stop("'earg' must be given"), inverse = FALSE,
+ deriv = 0, short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Extra argument for passing in additional information.
+ This must be list with components \code{cutpoint}
+ and \code{k}. Here, \code{k} is the \eqn{k} parameter associated
+ with the negative binomial distribution; see
+ \code{\link{negbinomial}}.
+ If \code{nbolf()} is used as the link function in
+ \code{\link{cumulative}} then one should choose
+ \code{reverse=TRUE, parallel=TRUE, intercept.apply=TRUE}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The negative binomial-ordinal link function (NBOLF) can be applied to
+ a parameter lying in the unit interval.
+ Its purpose is to link cumulative probabilities associated with
+ an ordinal response coming from an underlying negative binomial
+ distribution.
+
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+ See \code{\link{Links}} for general information about \pkg{VGAM}
+ link functions.
+
+}
+\value{
+ See Yee (2006) for details.
+
+}
+\references{
+ Yee, T. W. (2006)
+ \emph{Link functions for ordinal count data},
+ (submitted for publication).
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical values of \code{theta} too close to 0 or 1 or out of range
+ result in large positive or negative values, or maybe 0 depending on
+ the arguments.
+ Although measures have been taken to handle cases where
+ \code{theta} is too close to 1 or 0,
+ numerical instabilities may still arise.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the negative
+ binomial distribution (see \code{\link{negbinomial}}) that has been
+ recorded as an ordinal response using known cutpoints.
+
+}
+\section{Warning }{
+ Prediction may not work on \code{\link{vglm}} or
+ \code{\link{vgam}} etc. objects if this link function is used.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{negbinomial}},
+ \code{\link{polf}},
+ \code{\link{golf}},
+ \code{\link{cumulative}}.
+
+}
+\examples{
+nbolf("prob", short=FALSE)
+nbolf("prob", tag=TRUE)
+
+p = seq(0.02, 0.98, by=0.01)
+earg = list(cutpoint=2, k=1)
+y = nbolf(p, earg=earg)
+y. = nbolf(p, earg=earg, deriv=1)
+max(abs(nbolf(y, earg=earg, inv=TRUE) - p)) # Should be 0
+
+\dontrun{
+par(mfrow=c(2,1), las=1)
+plot(p, y, type="l", col="blue", main="nbolf()")
+abline(h=0, v=0.5, col="red", lty="dashed")
+
+plot(p, y., type="l", col="blue",
+ main="(Reciprocal of) first NBOLF derivative")
+}
+
+# Another example
+nn = 1000
+x2 = sort(runif(nn))
+x3 = runif(nn)
+mymu = exp( 3 + 1 * x2 - 2 * x3)
+k = 4
+y1 = rnbinom(nn, mu=mymu, size=k)
+cutpoints = c(-Inf, 10, 20, Inf)
+cuty = Cut(y1, breaks=cutpoints)
+\dontrun{
+plot(x2, x3, col=cuty, pch=as.character(cuty))
+}
+table(cuty) / sum(table(cuty))
+fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="nbolf",
+ reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
+ mv=TRUE, earg=list(cutpoint=cutpoints[2:3], k=k)),
+ trace=TRUE)
+fit at y[1:5,]
+fitted(fit)[1:5,]
+predict(fit)[1:5,]
+coef(fit)
+coef(fit, matrix=TRUE)
+constraints(fit)
+fit at misc$earg
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
new file mode 100644
index 0000000..98a5533
--- /dev/null
+++ b/man/negbinomial.Rd
@@ -0,0 +1,214 @@
+\name{negbinomial}
+\alias{negbinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Negative Binomial Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the two parameters of a negative binomial
+ distribution.
+}
+\usage{
+negbinomial(lmu = "loge", lk = "loge",
+ ik = NULL, cutoff = 0.995, Maxiter=5000,
+ deviance.arg = FALSE, method.init=1, zero = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, lk}{
+ Link functions applied to the \eqn{\mu}{mu} and \eqn{k} parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ik}{
+ Optional initial values for \eqn{k}.
+ If failure to converge occurs try different values (and/or use
+ \code{method.init}).
+ For a \eqn{S}-column response, \code{ik} can be of length \eqn{S}.
+ A value \code{NULL} means an initial value for each response is
+ computed internally using a range of values.
+ This argument is ignored if used within \code{\link{cqo}}; see
+ the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead.
+
+ }
+ \item{cutoff}{
+ A numeric which is close to 1 but never exactly 1.
+ Used to specify how many terms of the infinite series
+ for computing the second diagonal element of the expected information
+ matrix are actually used.
+ The sum of the probabilites are added until they reach this value or more
+ (but no more than \code{Maxiter} terms allowed).
+ It is like specifying \code{p} in an imaginary function
+ \code{qnegbin(p)}.
+
+ }
+ \item{Maxiter}{
+ Integer. The maximum number of terms allowed when computing
+ the second diagonal element of the expected information matrix.
+ In theory, the value involves an infinite series.
+ If this argument is too small then the value may be inaccurate.
+
+ }
+ \item{deviance.arg}{
+ Logical. If \code{TRUE}, the deviance function
+ is attached to the object. Under ordinary circumstances, it should
+ be left alone because it really assumes the index parameter is at
+ the maximum likelihood estimate. Consequently, one cannot use that
+ criterion to minimize within the IRLS algorithm.
+ It should be set \code{TRUE} only when used with \code{\link{cqo}}
+ under the fast algorithm.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1}, \code{2} or \code{3} which
+ specifies the initialization method for the \eqn{\mu}{mu} parameter.
+ If failure to converge occurs
+ try another value (and/or specify a value for \code{ik}).
+
+ }
+ \item{zero}{
+ Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
+ at all. Specifies which of the two linear/additive predictors are
+ modelled as an intercept only. By default, the \eqn{k} parameter
+ (after \code{lk} is applied) is modelled as a single unknown
+ number that is estimated. It can be modelled as a function of the
+ explanatory variables by setting \code{zero=NULL}. A negative value
+ means that the value is recycled, so setting \eqn{-2} means all \eqn{k}
+ are intercept-only.
+
+ }
+}
+\details{
+ The negative binomial distribution can be motivated in several ways,
+ e.g., as a Poisson distribution with a mean that is gamma
+ distributed.
+ There are several common parametrizations of the negative binomial
+ distribution.
+ The one used here uses the mean \eqn{\mu}{mu} and an \emph{index} parameter
+ \eqn{k}, both which are positive.
+ Specifically, the density of a random variable \eqn{Y} is
+ \deqn{f(y;\mu,k) ~=~ {y + k - 1 \choose y} \,
+ \left( \frac{\mu}{\mu+k} \right)^y\,
+ \left( \frac{k}{k+\mu} \right)^k }{%
+ f(y;mu,k) = C_{y}^{y + k - 1}
+ [mu/(mu+k)]^y [k/(k+mu)]^k}
+ where \eqn{y=0,1,2,\ldots},
+ and \eqn{\mu > 0}{mu > 0} and \eqn{k > 0}.
+ Note that the dispersion parameter is
+ \eqn{1/k}, so that as \eqn{k} approaches infinity the negative
+ binomial distribution approaches a Poisson distribution.
+ The response has variance \eqn{Var(Y)=\mu+\mu^2/k}{Var(Y)=mu*(1+mu/k)}.
+ When fitted, the \code{fitted.values} slot of the object contains
+ the estimated value of the \eqn{\mu}{mu} parameter, i.e., of the mean
+ \eqn{E(Y)}.
+
+ The negative binomial distribution can be coerced into the classical
+ GLM framework, with one of the parameters being of interest and the
+ other treated as a nuisance/scale parameter (and implemented in the
+ MASS library). This \pkg{VGAM} family function \code{negbinomial} treats
+ both parameters on the same footing, and estimates them both by full
+ maximum likelihood estimation.
+
+ The parameters \eqn{\mu}{mu} and \eqn{k} are independent (diagonal
+ expected information matrix), and the
+ confidence region for \eqn{k} is extremely skewed so that its standard
+ error is often of no practical use. The parameter \eqn{1/k} has been
+ used as a measure of aggregation.
+
+ This \pkg{VGAM} function handles \emph{multivariate} responses, so
+ that a matrix can be used as the response. The number of columns is the
+ number of species, say, and setting \code{zero=-2} means that \emph{all}
+ species have a \eqn{k} equalling a (different) intercept only.
+
+}
+\section{Warning}{
+ The Poisson model corresponds to \eqn{k} equalling infinity.
+ If the data is Poisson or close to Poisson, numerical problems will
+ occur. Possibly choosing a log-log link may help in such cases,
+ otherwise use \code{\link{poissonff}}.
+
+ This function is fragile; the maximum likelihood estimate of the
+ index parameter is fraught (see Lawless, 1987). In general, the
+ \code{\link{quasipoissonff}} is more robust than this function.
+ Assigning values to the \code{ik} argument may lead to a local solution,
+ and smaller values are preferred over large values when using this argument.
+
+ Yet to do: write a family function which uses the methods of moments
+ estimator for \eqn{k}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Lawless, J. F. (1987)
+Negative binomial and mixed Poisson regression.
+\emph{The Canadian Journal of Statistics}
+\bold{15}, 209--225.
+
+Bliss, C. and Fisher, R. A. (1953)
+Fitting the negative binomial distribution to biological data.
+\emph{Biometrics}
+\bold{9}, 174--200.
+
+}
+\author{ Thomas W. Yee }
+\note{
+% The \pkg{VGAM} package has a few other family functions for the
+% negative binomial distribution. Currently, none of these others work
+% very well.
+
+ This function can be used by the fast algorithm in
+ \code{\link{cqo}}, however, setting \code{EqualTolerances=TRUE} and
+ \code{ITolerances=FALSE} is recommended.
+
+% For \code{\link{cqo}} and \code{\link{cao}}, taking the square-root
+% of the response means (approximately) a \code{\link{poissonff}} family
+% may be used on the transformed data.
+
+% If the negative binomial family function \code{\link{negbinomial}}
+% is used for \code{cqo} then set \code{negbinomial(deviance=TRUE)}
+% is necessary. This means to minimize the deviance, which the fast
+% algorithm can handle.
+
+ In the first example below (Bliss and Fisher, 1953), from each of 6
+ McIntosh apple trees in an orchard that had been sprayed, 25 leaves
+ were randomly selected. On each of the leaves, the number of adult
+ female European red mites were counted.
+
+}
+
+\seealso{
+ \code{\link{quasipoissonff}},
+ \code{\link{poissonff}},
+ \code{\link{cao}},
+ \code{\link{cqo}},
+ \code{\link{posnegbinomial}},
+% \code{\link[MASS]{rnegbin}}.
+ \code{\link[stats:NegBinomial]{rnbinom}},
+ \code{\link{nbolf}}.
+}
+\examples{
+y = 0:7 # Example 1: apple tree data
+w = c(70, 38, 17, 10, 9, 3, 2, 1)
+fit = vglm(y ~ 1, negbinomial, weights=w)
+summary(fit)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+\dontrun{
+n = 500 # Example 2: simulated data
+x = runif(n)
+y1 = rnbinom(n, mu=exp(3+x), size=exp(1)) # k is size
+y2 = rnbinom(n, mu=exp(2-x), size=exp(0))
+fit = vglm(cbind(y1,y2) ~ x, negbinomial, tra=TRUE) # multivariate response
+coef(fit, matrix=TRUE)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
+%y1 = MASS:::rnegbin(n, mu=exp(3+x), theta=exp(1)) # k is theta
+%y2 = MASS:::rnegbin(n, mu=exp(2-x), theta=exp(0))
diff --git a/man/normal1.Rd b/man/normal1.Rd
new file mode 100644
index 0000000..964e172
--- /dev/null
+++ b/man/normal1.Rd
@@ -0,0 +1,84 @@
+\name{normal1}
+\alias{normal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Univariate normal distribution }
+\description{
+ Maximum likelihood estimation of the two parameters of a
+ univariate normal distribution.
+
+}
+\usage{
+normal1(lmean="identity", lsd="loge", zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmean}{
+ Link function applied to the mean.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lsd}{
+ Parameter link function applied to the standard deviation.
+ See \code{\link{Links}} for more choices.
+ Being a positive quantity, a log link is the default.
+
+ }
+ \item{zero}{
+ An integer vector, containing the value 1 or 2. If so, the mean or
+ standard deviation respectively are modelled as an intercept only.
+ Usually, setting \code{zero=2} will be used, if used at all.
+ The default value \code{NULL} means both linear/additive predictors
+ are modelled as functions of the explanatory variables.
+
+ }
+}
+\details{
+ By default, the mean is the first linear/additive predictor and
+ the log of the standard deviation is the second linear/additive
+ predictor.
+ The Fisher information matrix is diagonal.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+
+\author{ T. W. Yee }
+\note{
+ The response should be univariate. Multivariate responses are more
+ generally handled using \code{gaussianff}, however this only handles
+ the mean and the variance-covariance matrices are assumed known.
+
+}
+\seealso{
+ \code{gaussianff},
+ \code{\link{posnormal1}},
+ \code{\link{tobit}},
+ \code{\link{cnormal1}},
+ \code{\link{dcnormal1}},
+ \code{\link{studentt}}.
+}
+\examples{
+n = 200
+x = rnorm(n)
+y = rnorm(n, mean=1-3*x, sd=exp(1+0.2*x))
+fit = vglm(y ~ x, normal1)
+coef(fit, matrix=TRUE)
+
+# Generate a random sample from a N(mu=theta, sigma=theta)
+# distribution with theta=10. Then estimate theta.
+theta = 10
+y = rnorm(100, m=theta, sd=theta)
+fit = vglm(y ~ 1, normal1(lsd="identity"),
+ constraints=list("(Intercept)"=rbind(1,1)))
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
new file mode 100644
index 0000000..5d9041e
--- /dev/null
+++ b/man/notdocumentedyet.Rd
@@ -0,0 +1,425 @@
+\name{notdocumentedyet}
+\alias{notdocumentedyet}
+\alias{A1A2A3}
+\alias{AAaa.nohw}
+%\alias{AIC.qrrgvlm}
+%\alias{AIC.rrvglm}
+%\alias{AIC.vgam}
+%\alias{AIC.vlm}
+% \alias{Build.terms}
+% \alias{Build.terms.vlm}
+\alias{Coef.cao}
+\alias{Coefficients}
+\alias{Cut}
+\alias{Deviance.categorical.data.vgam}
+\alias{InverseBrat}
+\alias{Max.Coef.qrrvglm}
+\alias{Max.qrrvglm}
+\alias{Opt.Coef.qrrvglm}
+\alias{Opt.qrrvglm}
+% \alias{R170.or.later}
+\alias{Tol.Coef.qrrvglm}
+\alias{Tol.Coef.uqo}
+\alias{Tol.qrrvglm}
+\alias{Tol.uqo}
+\alias{a2m}
+% \alias{acat.deriv}
+% \alias{add.arg}
+% \alias{add.constraints}
+% \alias{add.hookey}
+\alias{add1}
+\alias{add1.vgam}
+\alias{add1.vglm}
+% \alias{adjust.Dmat.expression}
+% \alias{alias.vgam}
+% \alias{alias.vglm}
+\alias{anova.vgam}
+\alias{anova.vglm}
+% \alias{as.vanova}
+% \alias{attrassign}
+% \alias{attrassigndefault}
+% \alias{attrassignlm}
+\alias{beta4}
+\alias{betaffqn}
+\alias{biplot}
+\alias{biplot.qrrvglm}
+% \alias{block.diag}
+\alias{borel.tanner}
+\alias{bs}
+% \alias{callcaof}
+% \alias{callcqof}
+% \alias{calldcaof}
+% \alias{calldcqof}
+% \alias{callduqof}
+% \alias{calluqof}
+% \alias{canonical.Blist}
+% \alias{cao.fit}
+\alias{car.all}
+% \alias{care.exp}
+\alias{ccoef.Coef.cao}
+\alias{ccoef.Coef.qrrvglm}
+\alias{ccoef.cao}
+\alias{ccoef.qrrvglm}
+\alias{cdf}
+\alias{cdf.lms.bcg}
+\alias{cdf.lms.bcn}
+\alias{cdf.lms.yjn}
+\alias{cdf.vglm}
+% \alias{cm.nointercept.vgam}
+% \alias{cm.vgam}
+% \alias{cm.zero.vgam}
+\alias{coefficients}
+\alias{coefqrrvglm}
+\alias{coefvlm}
+\alias{coefvsmooth.spline}
+\alias{coefvsmooth.spline.fit}
+\alias{constraints.vlm}
+% \alias{cqo.fit}
+\alias{d2theta.deta2}
+% \alias{dcda.fast}
+% \alias{dctda.fast.only}
+\alias{deplot}
+\alias{deplot.default}
+\alias{deplot.lms.bcg}
+\alias{deplot.lms.bcn}
+\alias{deplot.lms.yjn}
+\alias{deplot.vglm}
+\alias{deviance}
+\alias{deviance.uqo}
+\alias{deviance.vglm}
+\alias{deviance.vlm}
+\alias{df.residual}
+\alias{df.residual.vlm}
+\alias{dimm}
+% \alias{dneg.binomial}
+\alias{dnorm2}
+% \alias{dotC}
+% \alias{dotFortran}
+% \alias{dpsi.dlambda.yjn}
+% \alias{drop1.vgam}
+% \alias{drop1.vglm}
+\alias{dtheta.deta}
+% \alias{dy.dyj}
+% \alias{dyj.dy}
+\alias{effects}
+% \alias{effects.vgam}
+% \alias{effects.vlm}
+% \alias{ei}
+% \alias{eij}
+\alias{erfc}
+\alias{eta2theta}
+% \alias{extract.arg}
+\alias{family.vglm}
+\alias{fff.control}
+\alias{fill2}
+\alias{fill3}
+\alias{fitted}
+\alias{fitted.values}
+\alias{fitted.values.uqo}
+\alias{fittedvsmooth.spline}
+\alias{fsqrt}
+\alias{gammaff}
+% \alias{get.arg}
+% \alias{get.rrvglm.se1}
+% \alias{get.rrvglm.se2}
+% \alias{getind}
+% \alias{gh.weight.yjn.11}
+% \alias{gh.weight.yjn.12}
+% \alias{gh.weight.yjn.13}
+% \alias{glag.weight.yjn.11}
+% \alias{glag.weight.yjn.12}
+% \alias{glag.weight.yjn.13}
+% \alias{gleg.weight.yjn.11}
+% \alias{gleg.weight.yjn.12}
+% \alias{gleg.weight.yjn.13}
+\alias{glm}
+\alias{hyper.secant}
+% \alias{hyper.secant.1}
+% \alias{ima}
+% \alias{interleave.VGAM}
+\alias{invbinomial}
+\alias{inverse.gaussianff}
+\alias{is.Numeric}
+\alias{is.bell}
+\alias{is.bell.cao}
+\alias{is.bell.qrrvglm}
+\alias{is.bell.rrvglm}
+\alias{is.bell.vlm}
+% \alias{is.linear.term}
+% \alias{jitteruqo}
+\alias{lm}
+\alias{lm2qrrvlm.model.matrix}
+\alias{lm2vlm.model.matrix}
+\alias{lms.bcg.control}
+\alias{lms.bcn.control}
+\alias{lms.yjn.control}
+\alias{lmscreg.control}
+\alias{logLik.vlm}
+\alias{lv.Coef.cao}
+\alias{lv.Coef.qrrvglm}
+\alias{lv.cao}
+\alias{lv.qrrvglm}
+\alias{lvplot.cao}
+\alias{m2adefault}
+\alias{m2avglm}
+% \alias{matrix.power}
+% \alias{mbesselI0}
+\alias{model.matrix.qrrvglm}
+% \alias{mux11}
+% \alias{mux111}
+% \alias{mux15}
+% \alias{mux2}
+% \alias{mux22}
+% \alias{mux5}
+% \alias{mux55}
+% \alias{mux7}
+% \alias{mux9}
+% \alias{my.dbinom}
+\alias{my1}
+\alias{my2}
+\alias{namesof}
+% \alias{natural.ig}
+% \alias{neg.binomial}
+% \alias{neg.binomial.k}
+% \alias{negbin.ab}
+% \alias{new.assign}
+\alias{nlminbcontrol}
+\alias{ns}
+% \alias{num.deriv.rrr}
+\alias{persp}
+\alias{persp.cao}
+\alias{plot.cao}
+\alias{plotpreplotvgam}
+\alias{plotvglm}
+\alias{plotvlm}
+\alias{plotvsmooth.spline}
+\alias{pnorm2}
+% \alias{poissonqn}
+\alias{poly}
+\alias{powl}
+\alias{predict}
+\alias{predict.cao}
+\alias{predict.glm}
+\alias{predict.lm}
+\alias{predict.mlm}
+\alias{predict.qrrvglm}
+\alias{predict.rrvglm}
+\alias{predict.uqo}
+\alias{predict.vgam}
+\alias{predict.vlm}
+\alias{predictcao}
+\alias{predictors}
+\alias{predictors.vglm}
+\alias{predictvsmooth.spline}
+\alias{predictvsmooth.spline.fit}
+% \alias{preplotvgam}
+% \alias{print.vanova}
+% \alias{print.vfamily}
+% \alias{print.vgam}
+% \alias{print.vglm}
+% \alias{print.vglmff}
+% \alias{print.vlm}
+% \alias{print.vlm.wfit}
+% \alias{printCoef.cao}
+% \alias{printCoef.qrrvglm}
+% \alias{printCoef.rrvglm}
+% \alias{printrrvglm}
+% \alias{printsummary.cao}
+% \alias{printsummary.lms}
+% \alias{printsummary.qrrvglm}
+% \alias{printsummary.rc.exponential}
+% \alias{printsummary.rrvglm}
+% \alias{printsummary.uqo}
+% \alias{printsummary.vgam}
+% \alias{printsummary.vglm}
+% \alias{printsummary.vlm}
+% \alias{printuqo}
+% \alias{printvsmooth.spline}
+\alias{procVec}
+\alias{process.binomial2.data.vgam}
+\alias{process.categorical.data.vgam}
+% \alias{process.constraints}
+% \alias{proj.vgam}
+% \alias{proj.vglm}
+% \alias{pweights}
+% \alias{qrrvglm.xprod}
+\alias{qtplot}
+\alias{qtplot.default}
+\alias{qtplot.lms.bcg}
+\alias{qtplot.lms.bcn}
+\alias{qtplot.lms.yjn}
+\alias{qtplot.vextremes}
+\alias{qtplot.vglm}
+\alias{quasiff}
+\alias{rainfall}
+% \alias{remove.arg}
+% \alias{replace.constraints}
+\alias{resid}
+\alias{residuals}
+% \alias{residualsqrrvglm}
+% \alias{residualsuqo}
+% \alias{residualsvglm}
+% \alias{residualsvlm}
+% \alias{residvsmooth.spline}
+\alias{rlplot}
+\alias{rlplot.vextremes}
+\alias{rlplot.vglm}
+% \alias{rrar.Ak1}
+% \alias{rrar.Ci}
+% \alias{rrar.Di}
+% \alias{rrar.Ht}
+% \alias{rrar.Mi}
+% \alias{rrar.Mmat}
+% \alias{rrar.UU}
+% \alias{rrar.Ut}
+% \alias{rrar.Wmat}
+\alias{rrar.control}
+% \alias{rrr.alternating.expression}
+% \alias{rrr.deriv.gradient.fast}
+% \alias{rrr.deriv.rss}
+% \alias{rrr.derivC.rss}
+% \alias{rrr.derivative.expression}
+% \alias{rrr.end.expression}
+% \alias{rrr.init.expression}
+% \alias{rrr.normalize}
+\alias{rrvglm.control.Gaussian}
+% \alias{rrvglm.fit}
+\alias{rss.vgam}
+\alias{s.vam}
+\alias{scale.default}
+\alias{simple.exponential}
+\alias{simple.poisson}
+\alias{size.binomial}
+\alias{stdze1}
+\alias{stdze2}
+% \alias{step.vgam}
+% \alias{step.vglm}
+% \alias{subconstraints}
+\alias{summary.cao}
+\alias{summary.grc}
+\alias{summary.lms}
+\alias{summary.qrrvglm}
+\alias{summary.rc.exponential}
+\alias{summary.rrvglm}
+\alias{summary.uqo}
+\alias{summaryvgam}
+\alias{summaryvglm}
+\alias{summaryvlm}
+% \alias{tapplymat1}
+\alias{terms.vlm}
+\alias{theta2eta}
+% \alias{trivial.constraints}
+% \alias{update.vgam}
+% \alias{update.vglm}
+% \alias{uqo.fit}
+% \alias{valid.vglmff}
+% \alias{valid.vknotl2}
+% \alias{valt}
+% \alias{valt.1iter}
+% \alias{valt.2iter}
+% \alias{valt.control}
+% \alias{varassign}
+% \alias{vbacksub}
+% \alias{vchol}
+% \alias{vchol.greenstadt}
+\alias{vcontrol.expression}
+% \alias{vcovdefault}
+% \alias{vcovqrrvglm}
+% \alias{vcovrrvglm}
+% \alias{vcovvlm}
+% \alias{veigen}
+% \alias{vellipse}
+% \alias{vforsub}
+% \alias{vgam.fit}
+% \alias{vgam.match}
+% \alias{vgam.nlchisq}
+% \alias{vgety}
+% \alias{vglm.fit}
+\alias{vglm.garma.control}
+\alias{vglm.multinomial.control}
+\alias{vglm.multinomial.deviance.control}
+\alias{vglm.vcategorical.control}
+% \alias{vindex}
+% \alias{vlabel}
+\alias{vlm}
+\alias{vlm.control}
+% \alias{vlm.wfit}
+\alias{vnonlinear.control}
+\alias{vplot}
+\alias{vplot.default}
+\alias{vplot.factor}
+\alias{vplot.list}
+\alias{vplot.matrix}
+\alias{vplot.numeric}
+\alias{vvplot.factor}
+\alias{weights}
+% \alias{wweighted.mean}
+\alias{wweights}
+% \alias{yformat}
+% \alias{ylim.scale}
+%
+%
+%
+\alias{Coef.uqo-class}
+\alias{cao-class}
+\alias{grc-class}
+\alias{qrrvglm-class}
+\alias{summary.qrrvglm-class}
+\alias{summary.rrvglm-class}
+\alias{summary.vgam-class}
+\alias{summary.vglm-class}
+\alias{summary.vlm-class}
+\alias{uqo-class}
+\alias{vcov.qrrvglm-class}
+\alias{vlm-class}
+\alias{vlmsmall-class}
+\alias{vsmooth.spline-class}
+\alias{vsmooth.spline.fit-class}
+%
+%
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Undocumented and Internally Used Functions and Classes }
+\description{
+ Those currently undocumented and internally used functions are aliased
+ to this help file.
+ Ditto for some classes.
+
+}
+%\usage{
+%normal1(lmean="identity", lsd="loge", zero=NULL)
+%}
+%- maybe also 'usage' for other objects documented here.
+%\arguments{
+% \item{lmean}{
+% Link function applied to the mean.
+% See \code{\link{Links}} for more choices.
+%
+% }
+%}
+\details{
+ In the \pkg{VGAM} package there are currently many
+ objects/methods/classes which are currently internal and/or
+ undocumented. The help file suppresses the warnings when the package is
+ 'CHECK'ed.
+
+}
+\value{
+ Each objects/methods/classes may or may not have its own individual value.
+ These will be documented over time.
+
+}
+%\references{
+%}
+
+\author{ T. W. Yee }
+%\note{
+%
+%}
+%\seealso{
+% \code{gaussianff},
+% \code{\link{posnormal1}}.
+%}
+%\examples{
+%}
+\keyword{models}
+\keyword{regression}
diff --git a/man/nzc.Rd b/man/nzc.Rd
new file mode 100644
index 0000000..a062155
--- /dev/null
+++ b/man/nzc.Rd
@@ -0,0 +1,47 @@
+\name{nzc}
+\alias{nzc}
+\docType{data}
+\title{ Chinese Population in New Zealand 1867--2001}
+\description{
+ The Chinese population in New Zealand from 1867 to 2001,
+ along with the whole of the New Zealand population.
+}
+\usage{data(nzc)}
+\format{
+ A data frame with 26 observations on the following 4 variables.
+ \describe{
+ \item{\code{year}}{Year. }
+ \item{\code{male}}{Number of Chinese males. }
+ \item{\code{female}}{Number of Chinese females. }
+ \item{\code{nz}}{Total number in the New Zealand population. }
+ }
+}
+\details{
+ The NZ total for the years 1867 and 1871 exclude the Maori population.
+ The second value of 4583 looks erroneous, as seen by the plot below.
+}
+%\source{
+%}
+\references{
+ Page 6 of \emph{Aliens At My Table: Asians as New Zealanders see them}
+ by M. Ip and N. Murphy,
+ (2005), Penguin.
+}
+\examples{
+\dontrun{
+data(nzc)
+attach(nzc)
+plot(year, female/(male+female), type="b", ylab="Proportion",
+ main="Proportion of Chinese that are female",
+ col="blue", las=1)
+abline(h=0.5, lty="dashed")
+
+plot(year, 100*(male+female)/nz, type="b", ylab="Percent",
+ ylim=c(0, max(100*(male+female)/nz)), col="blue",
+ main="Percent of New Zealanders that are Chinese", las=1)
+abline(h=0, lty="dashed")
+
+detach(nzc)
+}
+}
+\keyword{datasets}
diff --git a/man/oxtemp.Rd b/man/oxtemp.Rd
new file mode 100644
index 0000000..4029ff1
--- /dev/null
+++ b/man/oxtemp.Rd
@@ -0,0 +1,30 @@
+\name{oxtemp}
+\alias{oxtemp}
+\docType{data}
+\title{ Oxford Temperature Data }
+\description{
+ Annual maximum temperatures collected at Oxford, UK.
+}
+\usage{data(oxtemp)}
+\format{
+ A data frame with 80 observations on the following 2 variables.
+ \describe{
+ \item{maxtemp}{Annual maximum temperatures (in degrees Fahrenheit). }
+ \item{year}{The values 1901 to 1980. }
+ }
+}
+\details{
+ The data were collected from 1901 to 1980.
+}
+% zz:
+\source{
+ Unknown.
+}
+% \references{
+% }
+\examples{
+data(oxtemp)
+fit = vglm(maxtemp ~ 1, egev, trace=TRUE, data=oxtemp)
+}
+\keyword{datasets}
+
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
new file mode 100644
index 0000000..3a11db1
--- /dev/null
+++ b/man/paralogistic.Rd
@@ -0,0 +1,95 @@
+\name{paralogistic}
+\alias{paralogistic}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Paralogistic Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ paralogistic distribution.
+}
+\usage{
+paralogistic(link.a = "loge", link.scale = "loge",
+ init.a = 1, init.scale = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale}{
+ Parameter link functions applied to the
+ (positive) shape parameter \code{a} and
+ (positive) scale parameter \code{scale}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale}{
+ Optional initial values for \code{a} and \code{scale}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2\} which correspond to
+ \code{a}, \code{scale}, respectively.
+
+ }
+}
+\details{
+ The 2-parameter paralogistic distribution is the 4-parameter
+ generalized beta II distribution with shape parameter \eqn{p=1} and
+ \eqn{a=q}.
+It is the 3-parameter Singh-Maddala distribution with \eqn{a=q}.
+ More details can be found in Kleiber and Kotz (2003).
+
+The 2-parameter paralogistic has density
+ \deqn{f(y) = a^2 y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+a}]}{%
+ f(y) = a^2 y^(a-1) / [b^a (1 + (y/b)^a)^(1+a)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and \eqn{a} is the shape parameter.
+The mean is
+ \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(a - 1/a) / \Gamma(a)}{%
+ E(Y) = b gamma(1 + 1/a) gamma(a - 1/a) / gamma(a)}
+provided \eqn{a > 1}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Paralogistic}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rparalogistic(n=3000, 4, 6)
+fit = vglm(y ~ 1, paralogistic, trace=TRUE)
+fit = vglm(y ~ 1, paralogistic(init.a=2.3, init.sc=5),
+ trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/pareto1.Rd b/man/pareto1.Rd
new file mode 100644
index 0000000..cc1a149
--- /dev/null
+++ b/man/pareto1.Rd
@@ -0,0 +1,170 @@
+\name{pareto1}
+\alias{pareto1}
+\alias{tpareto1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Pareto and Truncated Pareto Distribution Family Functions }
+\description{
+ Estimates one of the parameters of the Pareto(I) distribution
+ by maximum likelihood estimation.
+ Also includes the upper truncated Pareto(I) distribution.
+
+}
+\usage{
+pareto1(lshape = "loge", location=NULL)
+tpareto1(lower, upper, lshape = "loge", ishape=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape}{
+ Parameter link function applied to the parameter \eqn{k}.
+ See \code{\link{Links}} for more choices.
+ A log link is the default because \eqn{k} is positive.
+
+ }
+ \item{lower, upper}{
+ Numeric.
+ Lower and upper limits for the truncated Pareto distribution.
+ Each must be positive and of length 1.
+ They are called \eqn{\alpha}{alpha} and \eqn{U} below.
+
+ }
+ \item{ishape}{
+ Numeric.
+ Optional initial value for the shape parameter.
+ A \code{NULL} means a value is obtained internally.
+
+ }
+ \item{location}{
+ Numeric. The parameter \eqn{\alpha}{alpha} below.
+ If the user inputs a number then it is assumed known with this value.
+ The default means it is estimated by maximum likelihood
+ estimation, which means \code{min(y)} where \code{y} is the response
+ vector.
+
+ }
+}
+\details{
+ A random variable \eqn{Y} has a Pareto distribution if
+ \deqn{P[Y>y] = C / y^{k}}{%
+ P[Y>y] = C / y^k}
+ for some positive \eqn{k} and \eqn{C}.
+ This model is important in many applications due to the power
+ law probability tail, especially for large values of \eqn{y}.
+
+ The Pareto distribution, which is used a lot in economics,
+ has a probability density function that can be written
+ \deqn{f(y) = k \alpha^k / y^{k+1}}{%
+ f(y) = k * alpha^k / y^(k+1)}
+ for \eqn{0 < \alpha < y}{0< alpha < y} and \eqn{k>0}.
+ The \eqn{\alpha}{alpha} is known as the location parameter, and
+ \eqn{k} is known as the shape parameter.
+ The mean of \eqn{Y} is
+ \eqn{\alpha k/(k-1)}{alpha*k/(k-1)} provided \eqn{k>1}.
+ Its variance is
+ \eqn{\alpha^2 k /((k-1)^2 (k-2))}{alpha^2 k /((k-1)^2 (k-2))}
+ provided \eqn{k>2}.
+
+ The upper truncated Pareto distribution
+ has a probability density function that can be written
+ \deqn{f(y) = k \alpha^k / [y^{k+1} (1-(\alpha/U)^k)]}{%
+ f(y) = k * alpha^k / [y^(k+1) (1-(\alpha/U)^k)]}
+ for \eqn{0 < \alpha < y < U < \infty}{0< alpha < y < U < Inf}
+ and \eqn{k>0}.
+ Possibly, better names for \eqn{k} are
+ the \emph{index} and \emph{tail} parameters.
+ Here, \eqn{\alpha}{alpha} and \eqn{U} are known.
+ The mean of \eqn{Y} is
+ \eqn{k \alpha^k (U^{1-k}-\alpha^{1-k}) /
+ [(1-k)(1-(\alpha/U)^k)]}{
+ k * lower^k * (U^(1-k)-alpha^(1-k)) / ((1-k) * (1-(alpha/U)^k))}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Evans, M., Hastings, N. and Peacock, B. (2000)
+ \emph{Statistical Distributions},
+ New York: Wiley-Interscience, Third edition.
+
+ Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006).
+ Parameter estimation for the truncated Pareto distribution,
+ \emph{Journal of the American Statistical Association},
+ \bold{101}(473),
+ 270--277.
+
+}
+\author{ T. W. Yee }
+\note{
+ Outside of economics, the Pareto distribution is known as the Bradford
+ distribution.
+
+ For \code{pareto1},
+ if the estimate of \eqn{k} is less than or equal to unity
+ then the fitted values will be \code{NA}s.
+ Also, \code{pareto1} fits the Pareto(I) distribution.
+ See \code{\link{paretoIV}} for the more general Pareto(IV/III/II)
+ distributions, but there is a slight change in notation: \eqn{s=k}
+ and \eqn{b=\alpha}{b=alpha}.
+
+ In some applications the Pareto law is truncated by a
+ natural upper bound on the probability tail.
+ The upper truncated Pareto distribution has three parameters (called
+ \eqn{\alpha}{alpha}, \eqn{U} and \eqn{k} here) but the family function
+ \code{tpareto} estimates only \eqn{k}.
+ With known lower and upper limits, the ML estimator of \eqn{k} has
+ the usual properties of MLEs.
+ Aban (2006) discusses other inferential details.
+
+}
+\section{Warning }{
+ The usual or unbounded Pareto distribution has two parameters
+ (called \eqn{\alpha}{alpha} and \eqn{k} here) but the family
+ function \code{pareto1} estimates only \eqn{k} using iteratively
+ reweighted least squares. The MLE of the \eqn{\alpha}{alpha}
+ parameter lies on the boundary and is \code{min(y)} where \code{y}
+ is the response. Consequently, using the default argument values,
+ the standard errors are incorrect when one does a \code{summary}
+ on the fitted object. If the user inputs a value for \code{alpha}
+ then it is assumed known with this value and then \code{summary} on
+ the fitted object should be correct. Numerical problems may occur
+ for small \eqn{k}, e.g., \eqn{k < 1}.
+
+}
+\seealso{
+ \code{\link{Pareto}},
+ \code{\link{Tpareto}},
+ \code{\link{paretoIV}},
+ \code{\link{gpd}}.
+}
+\examples{
+alpha = 2; k = exp(3)
+y = rpareto(n=1000, location=alpha, shape=k)
+fit = vglm(y ~ 1, pareto1, trace=TRUE)
+fit at extra # The estimate of alpha is here
+fitted(fit)[1:5]
+mean(y)
+coef(fit, matrix=TRUE)
+summary(fit) # Standard errors are incorrect!!
+
+# Here, alpha is assumed known
+fit2 = vglm(y ~ 1, pareto1(location=alpha), trace=TRUE, crit="c")
+fit2 at extra # alpha stored here
+fitted(fit2)[1:5]
+mean(y)
+coef(fit2, matrix=TRUE)
+summary(fit2) # Standard errors are ok
+
+# Upper truncated Pareto distribution
+lower = 2; upper = 8; k = exp(2)
+y = rtpareto(n=100, lower=lower, upper=upper, shape=k)
+fit3 = vglm(y ~ 1, tpareto1(lower, upper), trace=TRUE, cri="c")
+coef(fit3, matrix=TRUE)
+c(fit3 at misc$lower, fit3 at misc$upper)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
new file mode 100644
index 0000000..9771259
--- /dev/null
+++ b/man/paretoIV.Rd
@@ -0,0 +1,152 @@
+\name{paretoIV}
+\alias{paretoIV}
+\alias{paretoIII}
+\alias{paretoII}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Pareto(IV/III/II) Distribution Family Functions }
+\description{
+ Estimates three of the parameters of the Pareto(IV) distribution
+ by maximum likelihood estimation. Some special cases of this
+ distribution are also handled.
+
+}
+\usage{
+paretoIV(location=0, lscale="loge", linequality="loge", lshape="loge",
+ iscale=1, iinequality=1, ishape=NULL, method.init=1)
+paretoIII(location=0, lscale="loge", linequality="loge",
+ iscale=NULL, iinequality=NULL)
+paretoII(location=0, lscale="loge", lshape="loge",
+ iscale=NULL, ishape=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{location}{
+ Location parameter, called \eqn{a} below.
+ It is assumed known.
+
+ }
+ \item{lscale, linequality, lshape}{
+ Parameter link functions for the
+ scale parameter (called \eqn{b} below),
+ inequality parameter (called \eqn{g} below), and
+ shape parameter (called \eqn{s} below).
+ See \code{\link{Links}} for more choices.
+ A log link is the default for all because all these parameters are
+ positive.
+
+ }
+ \item{iscale, iinequality, ishape}{
+ Initial values for the parameters.
+ A \code{NULL} value means that it is obtained internally.
+ If convergence failure occurs, use these arguments to input
+ some alternative initial values.
+
+ }
+ \item{method.init}{
+ Method of initialization for the shape parameter.
+ Currently only values 1 and 2 are available.
+ Try the other value if convergence failure occurs.
+
+ }
+}
+\details{
+ The Pareto(IV) distribution, which is used in actuarial science,
+ economics, finance and telecommunications,
+ has a cumulative distribution function that can be written
+ \deqn{F(y) = 1 - [1 + ((y-a)/b)^{1/g}]^{-s}}{%
+ F(y) = 1 - [1 + ((y-a)/b)^(1/g)]^(-s)}
+ for \eqn{y > a}, \eqn{b>0}, \eqn{g>0} and \eqn{s>0}.
+ The \eqn{a}{a} is called the \emph{location} parameter,
+ \eqn{b} the \emph{scale} parameter,
+ \eqn{g} the \emph{inequality} parameter, and
+ \eqn{s} the \emph{shape} parameter.
+
+ The location parameter is assumed known otherwise the Pareto(IV)
+ distribution will not be a regular family. This assumption is not too
+ restrictive in modelling because in typical applications this parameter
+ is known, e.g., in insurance and reinsurance it is pre-defined by a
+ contract and can be represented as a deductible or a retention level.
+
+ The inequality parameter is so-called because of its interpretation
+ in the economics context. If we choose a unit shape parameter value
+ and a zero location parameter value then the inequality parameter
+ is the Gini index of inequality, provided \eqn{g \leq 1}{g<=1}.
+
+ The fitted values are currently \code{NA} because I haven't worked
+ out what the mean of \eqn{Y} is yet.
+
+% The mean of \eqn{Y} is
+% \eqn{\alpha k/(k-1)}{alpha*k/(k-1)} provided \eqn{k>1}.
+% Its variance is
+% \eqn{\alpha^2 k /((k-1)^2 (k-2))}{alpha^2 k /((k-1)^2 (k-2))}
+% provided \eqn{k>2}.
+
+% The maximum likelihood estimator for the location parameter is
+% \code{min(y)}, i.e., the smallest response value.
+
+ There are a number of special cases of the Pareto(IV) distribution.
+ These include the Pareto(I), Pareto(II), Pareto(III), and Burr family
+ of distributions.
+ Denoting \eqn{PIV(a,b,g,s)} as the Pareto(IV) distribution,
+ the Burr distribution \eqn{Burr(b,g,s)} is \eqn{PIV(a=0,b,1/g,s)},
+ the Pareto(III) distribution \eqn{PIII(a,b,g)} is \eqn{PIV(a,b,g,s=1)},
+ the Pareto(II) distribution \eqn{PII(a,b,s)} is \eqn{PIV(a,b,g=1,s)},
+ and
+ the Pareto(I) distribution \eqn{PI(b,s)} is \eqn{PIV(b,b,g=1,s)}.
+ Thus the Burr distribution can be fitted using the
+ \code{\link{nloge}} link
+ function and using the default \code{location=0} argument.
+ The Pareto(I) distribution can be fitted using \code{\link{pareto1}}
+ but there is a slight change in notation: \eqn{s=k} and
+ \eqn{b=\alpha}{b=alpha}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Brazauskas, V. (2003)
+Information matrix for Pareto(IV), Burr, and related distributions.
+\emph{Comm.\ Statist.\ Theory and Methods}
+\bold{32}, 315--325.
+
+Arnold, B. C. (1983)
+\emph{Pareto Distributions}.
+Fairland, Maryland: International Cooperative Publishing House.
+
+}
+\author{ T. W. Yee }
+\note{
+ The \code{extra} slot of the fitted object has a component called
+ \code{"location"} which stores the location parameter value(s).
+
+}
+%\section{Warning }{
+% The Pareto(IV) distribution is very general,
+% for example, special cases include the Pareto(I), Pareto(II),
+% Pareto(III), and Burr family of distributions. Consequently, reasonably
+% good initial values are recommended, and convergence to a local solution
+% may occur. For this reason setting \code{trace=TRUE} is a good idea
+% for monitoring the convergence.
+% Large samples are ideally required to get reasonable results.
+%
+%}
+\seealso{
+ \code{\link{ParetoIV}},
+ \code{\link{pareto1}},
+ \code{\link{gpd}}.
+}
+\examples{
+y = rparetoIV(n <- 2000, scale=exp(1), ineq=exp(-0.3), shape=exp(1))
+\dontrun{par(mfrow=c(2,1)); hist(y); hist(log(y)); }
+fit = vglm(y ~ 1, paretoIV, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
new file mode 100644
index 0000000..166dae5
--- /dev/null
+++ b/man/persp.qrrvglm.Rd
@@ -0,0 +1,214 @@
+\name{persp.qrrvglm}
+\alias{persp.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Perspective plot for QRR-VGLMs }
+\description{
+Produces a perspective plot for a CQO model (QRR-VGLM). It is only
+applicable for rank-1 or rank-2 models with argument \code{Norrr = ~ 1}.
+
+}
+\usage{
+persp.qrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
+ xlim=NULL, ylim=NULL, zlim=NULL,
+ gridlength=if(Rank==1) 301 else c(51,51),
+ whichSpecies = NULL,
+ xlab = if(Rank==1) "Latent Variable" else "Latent Variable 1",
+ ylab = if(Rank==1) "Expected Value" else "Latent Variable 2",
+ zlab = "Expected value", labelSpecies = FALSE,
+ stretch = 1.05, main="", ticktype = "detailed",
+ col = if(Rank==1) par()$col else "white", add1 = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+ Object of class \code{"qrrvglm"}, i.e., a
+ constrained quadratic ordination (CQO) object.
+
+ }
+ \item{varlvI}{
+ Logical that is fed into \code{\link{Coef.qrrvglm}}.
+
+ }
+ \item{reference}{
+ Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
+
+ }
+ \item{plot.it}{ Logical. Plot it? }
+ \item{xlim, ylim}{
+ Limits of the x- and y-axis. Both are numeric of length 2.
+ See \code{\link[graphics]{par}}.
+
+ }
+ \item{zlim}{
+ Limits of the z-axis. Numeric of length 2.
+ Ignored if rank is 1.
+ See \code{\link[graphics]{par}}.
+
+ }
+ \item{gridlength}{
+ Numeric. The fitted values are evaluated on a grid, and this
+ argument regulates the fineness of the grid. If \code{Rank=2}
+ then the argument is recycled to length 2, and the two numbers
+ are the number of grid points on the x- and y-axes respectively.
+
+ }
+ \item{whichSpecies}{
+ Numeric or character vector. Indicates which species are to be
+ plotted. The default is to plot all of them. If numeric, it should
+ contain values in the set \{1,2,\ldots,\eqn{S}\} where \eqn{S}
+ is the number of species.
+
+ }
+ \item{xlab, ylab}{
+ Character caption for the x-axis and y-axis. By default, a suitable caption is
+ found. See the \code{xlab} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+
+ }
+ \item{zlab}{Character caption for the z-axis.
+ Used only if \code{Rank=2}.
+ By default, a suitable caption is found.
+ See the \code{xlab} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+
+ }
+ \item{labelSpecies}{Logical.
+ Whether the species should be labelled with their names.
+ Used for \code{Rank=1} only.
+ The position of the label is just above the species' maximum.
+
+ }
+ \item{stretch}{
+ Numeric. A value slightly more than 1, this argument
+ adjusts the height of the y-axis. Used for \code{Rank=1} only.
+
+ }
+ \item{main}{
+ Character, giving the title of the plot.
+ See the \code{main} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+
+ }
+ \item{ticktype}{ Tick type. Used only if \code{Rank=2}.
+ See \code{\link[graphics]{persp}} for more information.
+
+ }
+ \item{col}{ Color.
+ See \code{\link[graphics]{persp}} for more information.
+
+ }
+ \item{add1}{ Logical. Add to an existing plot?
+ Used only for rank-1 models.
+
+ }
+ \item{\dots}{
+ Arguments passed into \code{\link[graphics]{persp}}. Useful
+ arguments here include \code{theta} and \code{phi}, which control
+ the position of the eye.
+
+ }
+}
+\details{
+ For a rank-1 model, a perspective plot is similar to
+ \code{\link{lvplot.qrrvglm}} but plots the curves along a fine grid
+ and there is no rugplot to show the site scores.
+
+ For a rank-2 model, a perspective plot has the first latent variable as
+ the x-axis, the second latent variable as the y-axis, and the expected
+ value (fitted value) as the z-axis. The result of a CQO is that each
+ species has a response surface with elliptical contours. This function
+ will, at each grid point, work out the maximum fitted value over all
+ the species. The resulting response surface is plotted. Thus rare
+ species will be obscured and abundant species will dominate the plot.
+ To view rare species, use the \code{whichSpecies} argument to select
+ a subset of the species.
+
+ A perspective plot will be performed if \code{Norrr = ~ 1}, and
+ \code{Rank = 1} or \code{2}. Also, all the tolerance matrices of
+ those species to be plotted must be positive-definite.
+
+}
+\value{
+ For a rank-2 model, a list with the following components.
+ \item{fitted}{
+ A \eqn{(G_1 \times G_2)}{(G1*G2)} by \eqn{M} matrix
+ of fitted values on the grid. Here, \eqn{G_1}{G1} and \eqn{G_2}{G2}
+ are the two values of \code{gridlength}.
+
+ }
+ \item{lv1grid, lv2grid}{The grid points for the x-axis and y-axis. }
+ \item{maxfitted}{
+ A \eqn{G_1}{G1} by \eqn{G_2}{G2} matrix of maximum
+ of the fitted values over all species. These are the
+ values that are plotted on the z-axis.
+
+ }
+ For a rank-1 model, the components \code{lv2grid} and \code{maxfitted}
+ are \code{NULL}.
+
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ Yee (2004) does not refer to perspective plots. Instead, contour plots
+ via \code{\link{lvplot.qrrvglm}} are used.
+
+ For rank-1 models, a similar function to this one is
+ \code{\link{lvplot.qrrvglm}}. It plots the fitted values at the actual
+ site score values rather than on a fine grid here. The result has
+ the advantage that the user sees the curves as a direct result from a
+ model fitted to data whereas here, it is easy to think that the smooth
+ bell-shaped curves are the truth because the data is more of a distance
+ away.
+
+}
+\seealso{
+\code{\link[graphics]{persp}},
+\code{\link{cqo}},
+\code{\link{Coef.qrrvglm}},
+\code{\link{lvplot.qrrvglm}},
+\code{\link[graphics]{par}},
+\code{\link[graphics]{title}}.
+}
+\examples{\dontrun{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+set.seed(111)
+r1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, hspider, trace = FALSE, ITolerances = TRUE)
+set.seed(111) # r2 below is an ill-conditioned model
+r2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ isdlv = c(2.4,1.0), Muxfactor = 3.0, trace = FALSE,
+ poissonff, hspider, Rank = 2, EqualTolerances = TRUE)
+
+sort(r1 at misc$deviance.Bestof) # A history of the fits
+sort(r2 at misc$deviance.Bestof) # A history of the fits
+if(deviance(r2) > 857) stop("suboptimal fit obtained")
+
+persp(r1, xlim=c(-6,5), col=1:4, label=TRUE)
+
+# Involves all species
+persp(r2, xlim=c(-6,5), ylim=c(-4,5), theta=10, phi=20, zlim=c(0,220))
+# Omit the two dominant species to see what's behind them
+persp(r2, xlim=c(-6,5), ylim=c(-4,5), theta=10, phi=20, zlim=c(0,220),
+ which=(1:10)[-c(8,10)]) # Use zlim to retain the original z-scale
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{hplot}
+
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
new file mode 100644
index 0000000..65e6b66
--- /dev/null
+++ b/man/plotdeplot.lmscreg.Rd
@@ -0,0 +1,101 @@
+\name{plotdeplot.lmscreg}
+\alias{plotdeplot.lmscreg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Density Plot for LMS Quantile Regression }
+\description{
+ Plots a probability density function
+ associated with a LMS quantile regression.
+}
+\usage{
+plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE,
+ xlab = "", ylab = "density",
+ xlim = NULL, ylim = NULL,
+ llty.arg = par()$lty, col.arg = par()$col,
+ llwd.arg = par()$lwd, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{answer}{ Output from functions of the form
+ \code{deplot.???} where \code{???} is the name of the
+ \pkg{VGAM} LMS family function, e.g., \code{lms.yjn}.
+ See below for details.
+ }
+
+ \item{y.arg}{ Numerical vector. The values of the response variable
+ at which to evaluate the density. This should be a grid that is fine
+ enough to ensure the plotted curves are smooth. }
+
+ \item{add.arg}{ Logical. Add the density to an existing plot? }
+ \item{xlab, ylab}{
+ Caption for the x- and y-axes. See \code{\link[graphics]{par}}.
+ }
+ \item{xlim, ylim}{
+ Limits of the x- and y-axes. See \code{\link[graphics]{par}}.
+ }
+ \item{llty.arg}{ Line type.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{col.arg}{ Line color.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+
+ \item{llwd.arg}{ Line width.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{main} and \code{las}.
+ }
+}
+\details{
+ The above graphical parameters offer some flexibility when
+ plotting the quantiles.
+}
+\value{
+ The list \code{answer}, which has components
+ \item{newdata}{ The argument \code{newdata} above from
+ the argument list of \code{\link{deplot.lmscreg}},
+ or a one-row
+ data frame constructed out of the \code{x0} argument. }
+ \item{y}{ The argument \code{y.arg} above. }
+ \item{density}{ Vector of the density function values evaluated at \code{y.arg}. }
+}
+
+\references{
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+While the graphical arguments of this function are useful to the user,
+this function should not be called directly.
+
+}
+\seealso{
+\code{\link{deplot.lmscreg}}.
+}
+
+\examples{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+\dontrun{
+y = seq(15, 43, by=0.25)
+deplot(fit, x0=20, y=y, xlab="BMI", col="green", llwd=2,
+ main="BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
+deplot(fit, x0=40, y=y, add=TRUE, col="blue", llwd=2)
+deplot(fit, x0=60, y=y, add=TRUE, col="red", llwd=2) -> a
+
+names(a at post$deplot)
+a at post$deplot$newdata
+a at post$deplot$y[1:5]
+a at post$deplot$density[1:5]
+}
+}
+\keyword{dplot}
+\keyword{models}
+\keyword{regression}
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
new file mode 100644
index 0000000..a307767
--- /dev/null
+++ b/man/plotqrrvglm.Rd
@@ -0,0 +1,75 @@
+\name{plotqrrvglm}
+\alias{plotqrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Model Diagnostic Plots for QRR-VGLMs }
+\description{
+ The residuals of a QRR-VGLM are plotted for model diagnostic purposes.
+}
+\usage{
+plotqrrvglm(object,
+ rtype = c("pearson", "response", "deviance", "working"),
+ ask = FALSE,
+ main = paste(Rtype, "residuals vs latent variable(s)"),
+ xlab = "Latent Variable",
+ ITolerances = object at control$EqualTolerances, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object of class \code{"qrrvglm"}. }
+ \item{rtype}{ Character string giving residual type. By default, the first
+ one is chosen. }
+ \item{ask}{ Logical. If \code{TRUE}, the user is asked to hit the return
+ key for the next plot. }
+ \item{main}{ Character string giving the title of the plot. }
+ \item{xlab}{ Character string giving the x-axis caption. }
+ \item{ITolerances}{ Logical. This argument is fed into
+ \code{Coef(object, ITolerances=ITolerances)}.
+ }
+ \item{\dots}{ Other plotting arguments (see \code{\link[graphics]{par}}). }
+}
+\details{
+ Plotting the residuals can be potentially very useful for checking
+ that the model fit is adequate.
+}
+\value{
+ The original object.
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+}
+\author{Thomas W. Yee}
+
+\note{
+An ordination plot of a QRR-VGLM can be obtained
+by \code{\link{lvplot.qrrvglm}}.
+}
+
+\seealso{
+ \code{\link{lvplot.qrrvglm}},
+ \code{\link{cqo}}.
+}
+
+\examples{\dontrun{
+# QRR-VGLM on the hunting spiders data
+# This is computationally expensive
+data(hspider)
+set.seed(111) # This leads to the global solution
+# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+par(mfrow=c(3,4))
+plot(p1, rtype="d", col="blue", pch=4, las=1)
+}
+}
+\keyword{dplot}
+\keyword{models}
+\keyword{regression}
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
new file mode 100644
index 0000000..03c1d44
--- /dev/null
+++ b/man/plotqtplot.lmscreg.Rd
@@ -0,0 +1,110 @@
+\name{plotqtplot.lmscreg}
+\alias{plotqtplot.lmscreg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Quantile Plot for LMS Quantile Regression }
+\description{
+ Plots the quantiles
+ associated with a LMS quantile regression.
+}
+\usage{
+plotqtplot.lmscreg(fitted.values, object, newdata = NULL,
+ percentiles = object at misc$percentiles, lp = NULL,
+ add.arg = FALSE, y = if (length(newdata)) FALSE else TRUE,
+ spline.fit = FALSE, label = TRUE, size.label = 0.06,
+ xlab = NULL, ylab = "",
+ pch = par()$pch, pcex = par()$cex, pcol.arg = par()$col,
+ xlim = NULL, ylim = NULL,
+ llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+ tcol.arg = par()$col, tadj = 1, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{fitted.values}{ Matrix of fitted values. }
+ \item{object}{ A \pkg{VGAM} quantile regression model, i.e.,
+ an object produced by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}} with a family function beginning with
+ \code{"lms."}, e.g., \code{\link{lms.yjn}}.
+ }
+ \item{newdata}{ Data frame at which predictions are made.
+ By default, the original data are used. }
+ \item{percentiles}{ Numerical vector with values between 0 and 100
+ that specify the percentiles (quantiles).
+ The default is to use the percentiles when fitting the model.
+ For example, the value 50 corresponds to the median.
+ }
+ \item{lp}{ Length of \code{percentiles}. }
+ \item{add.arg}{ Logical. Add the quantiles to an existing plot? }
+ \item{y}{ Logical. Add the response as points to the plot? }
+ \item{spline.fit}{ Logical. Add a spline curve to the plot? }
+ \item{label}{ Logical. Add the percentiles (as text) to the plot? }
+ \item{size.label}{ Numeric. How much room to leave at the RHS for the label.
+ It is in percent (of the range of the primary variable).
+ }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. }
+ \item{pcex}{ Character expansion of the points.
+ See \code{\link[graphics]{par}}. }
+ \item{pcol.arg}{ Color of the points.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{xlim}{ Limits of the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylim}{ Limits of the y-axis. See \code{\link[graphics]{par}}. }
+ \item{llty.arg}{ Line type. Line type.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{lcol.arg}{ Color of the lines.
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{llwd.arg}{ Line width.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}.
+ }
+ \item{tcol.arg}{ Color of the text
+ (if \code{label} is \code{TRUE}).
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{tadj}{ Text justification.
+ See the \code{adj} argument of \code{\link[graphics]{par}}.
+ }
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{main} and \code{las}.
+}
+}
+\details{
+ The above graphical parameters offer some flexibility when
+ plotting the quantiles.
+}
+\value{
+ The matrix of fitted values.
+}
+\references{
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ While the graphical arguments of this function are useful to the user,
+ this function should not be called directly.
+}
+
+\seealso{
+\code{\link{qtplot.lmscreg}}.
+}
+
+\examples{\dontrun{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+qtplot(fit)
+qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
+}
+}
+
+\keyword{hplot}
+\keyword{models}
+\keyword{regression}
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
new file mode 100644
index 0000000..8b44a44
--- /dev/null
+++ b/man/plotvgam.Rd
@@ -0,0 +1,143 @@
+\name{plotvgam}
+\alias{plotvgam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Default VGAM Plotting }
+\description{
+ Component functions of a \code{\link{vgam-class}} object can be plotted
+ with \code{plotvgam()}. These are on the scale of the linear/additive
+ predictor.
+}
+\usage{
+plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
+ rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ type.residuals = c("deviance", "working", "pearson", "response"),
+ plot.arg = TRUE, which.term = NULL, which.cf = NULL,
+ control = plotvgam.control(...), ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{ A fitted \pkg{VGAM} object, e.g., produced by
+ \code{vgam()}, \code{vglm()}, or \code{rrvglm()}. }
+ \item{newdata}{ Data frame.
+ May be used to reconstruct the original data set. }
+ \item{y}{ Unused. }
+ \item{residuals}{ Logical. If \code{TRUE}, residuals are plotted. See
+ \code{type.residuals}}
+\item{rugplot}{ Logical. If \code{TRUE}, a rug plot is plotted at the
+ foot of each plot. These values are jittered to expose ties.
+ }
+ \item{se}{
+ Logical. If \code{TRUE}, approximate \eqn{\pm 2}{+-2} pointwise
+ standard error bands are included in the plot.
+
+ }
+ \item{scale}{ Numerical. By default, each plot will have its own
+ y-axis scale. However, by specifying a value, each plot's y-axis
+ scale will be at least \code{scale} wide.
+ }
+ \item{raw}{ Logical. If \code{TRUE}, the smooth functions are those
+ obtained directly by the algorithm, and are
+ plotted without
+ having to premultiply with the constraint matrices.
+ If \code{FALSE}, the smooth functions have been premultiply by
+ the constraint matrices.
+ The \code{raw} argument is directly fed into \code{predict.vgam()}.
+ }
+ \item{offset.arg}{ Numerical vector of length \eqn{r}.
+ These are added to the component functions. Useful for
+ separating out the functions when \code{overlay} is \code{TRUE}.
+ If \code{overlay} is \code{TRUE} and there is one covariate, using the
+ intercept values as the offsets can be a good idea.
+ }
+ \item{deriv.arg}{ Numerical. The order of the derivative.
+ Should be assigned an small
+ integer such as 0, 1, 2. Only applying to \code{s()} terms,
+ it plots the derivative.
+ }
+ \item{overlay}{ Logical. If \code{TRUE}, component functions of the same
+ covariate are overlaid on each other.
+ The functions are centered, so \code{offset.arg} can be useful
+ when \code{overlay} is \code{TRUE}.
+ }
+ \item{type.residuals}{ if \code{residuals} is \code{TRUE}, the first
+ possible value
+ of this vector, is used to specify the type of
+ residual. }
+ \item{plot.arg}{ Logical. If \code{FALSE}, no plot is produced. }
+ \item{which.term}{ Character or integer vector containing all
+ terms to be
+ plotted, e.g., \code{which.term=c("s(age)", "s(height"))} or
+ \code{which.term=c(2,5,9)}.
+ By default, all are plotted. }
+ \item{which.cf}{ An integer-valued vector specifying which
+ linear/additive predictors are to be plotted.
+ The values must be from the set \{1,2,\ldots,\eqn{r}\}.
+ By default, all are plotted.
+ }
+ \item{control}{ Other control parameters. See
+ \code{\link{plotvgam.control}}. }
+ \item{\dots}{ Other arguments that can be fed into
+ \code{\link{plotvgam.control}}. This includes line colors,
+ line widths, line types, etc.
+ }
+ In the above, \eqn{M} is the number of linear/additive
+ predictors, and \eqn{r} is the number of columns of the
+ constraint matrix of interest.
+}
+\details{
+ Many of \code{plotvgam()}'s options can be found in
+ \code{\link{plotvgam.control}}, e.g., line types, line widths,
+ colors.
+}
+\value{
+ The original object, but with the \code{preplot} slot of the object
+ assigned information regarding the plot.
+}
+\references{
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ While \code{plot(fit)} will work if \code{class(fit)}
+ is \code{"vgam"}, it is necessary to use \code{plotvgam(fit)}
+ explicitly otherwise.
+
+ \code{plotvgam()} is quite buggy at the moment.
+ \code{plotvgam()} works in a similar
+ manner to S-PLUS's \code{plot.gam()}, however, there is no
+ options for interactive construction of the plots yet.
+}
+
+\seealso{
+ \code{\link{vgam}},
+ \code{\link{plotvgam.control}},
+ \code{predict.vgam},
+ \code{\link{vglm}}.
+}
+\examples{
+data(coalminers)
+coalminers = transform(coalminers, Age = (age - 42) / 5)
+fit = vgam(cbind(nBnW,nBW,BnW,BW) ~ s(Age), binom2.or(zero=NULL), coalminers)
+\dontrun{
+par(mfrow=c(1,3))
+plot(fit, se=TRUE, ylim=c(-3,2), las=1)
+
+plot(fit, se=TRUE, which.cf=1:2, lcol="blue", scol="red", ylim=c(-3,2))
+plot(fit, se=TRUE, which.cf=1:2, lcol="blue", scol="red", overlay=TRUE)
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+\keyword{hplot}
diff --git a/man/plotvgam.control.Rd b/man/plotvgam.control.Rd
new file mode 100644
index 0000000..ffbb1d7
--- /dev/null
+++ b/man/plotvgam.control.Rd
@@ -0,0 +1,90 @@
+\name{plotvgam.control}
+\alias{plotvgam.control}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Control Function for plotvgam() }
+\description{
+ Provides default values for many arguments available for
+ \code{plotvgam()}.
+}
+\usage{
+plotvgam.control(which.cf = NULL,
+ xlim = NULL, ylim = NULL, llty = par()$lty,
+ slty = if (is.R()) "dashed" else 3, pcex = par()$cex,
+ pch = par()$pch, pcol = par()$col,
+ lcol = par()$col, rcol = par()$col,
+ scol = par()$col, llwd = par()$lwd, slwd = par()$lwd,
+ add.arg = FALSE, one.at.a.time = FALSE,
+ .include.dots = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{which.cf}{ Integer vector specifying which component
+ functions are to be plotted (for each covariate). Must
+ have values from the
+ set \{1,2,\ldots,\eqn{M}\}. }
+ \item{xlim}{ Range for the x-axis. }
+ \item{ylim}{ Range for the y-axis. }
+ \item{llty}{ Line type for the fitted functions (lines).
+ Fed into \code{par(lty)}. }
+ \item{slty}{ Line type for the standard error bands.
+ Fed into \code{par(lty)}. }
+ \item{pcex}{ Character expansion for the points (residuals).
+ Fed into \code{par(cex)}. }
+ \item{pch}{ Character used for the points (residuals).
+ Same as \code{par(pch)}. }
+ \item{pcol}{ Color of the points.
+ Fed into \code{par(col)}. }
+ \item{lcol}{ Color of the fitted functions (lines).
+ Fed into \code{par(col)}. }
+ \item{rcol}{ Color of the rug plot.
+ Fed into \code{par(col)}. }
+ \item{scol}{ Color of the standard error bands.
+ Fed into \code{par(col)}. }
+ \item{llwd}{ Line width of the fitted functions (lines).
+ Fed into \code{par(lwd)}. }
+ \item{slwd}{ Line width of the standard error bands.
+ Fed into \code{par(lwd)}. }
+ \item{add.arg}{ Logical. If \code{TRUE}, the plot will be added
+ to an existing plot, otherwise a new plot will be made. }
+ \item{one.at.a.time}{ Logical. If \code{TRUE}, the plots are done
+ one at a time, with the user having to hit the return key
+ between the plots. }
+ \item{.include.dots}{ Not to be used by the user. }
+ \item{\dots}{ Other arguments that may be fed into \code{par()}. }
+ In the above, \eqn{M} is the number of linear/additive predictors.
+}
+\details{
+ The most obvious features of \code{\link{plotvgam}} can be
+ controlled by the above arguments.
+}
+\value{
+ A list with values matching the arguments.
+}
+\references{
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{ This function enables \code{plotvgam()} to work in a similar
+ manner to S-PLUS's \code{plot.gam()}. However, there is no
+ interactive options yet.
+}
+
+\seealso{
+ \code{\link{plotvgam}}.
+}
+\examples{
+plotvgam.control(lcol=c("red", "blue"), scol="darkgreen", se=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+\keyword{dplot}
diff --git a/man/pneumo.Rd b/man/pneumo.Rd
new file mode 100644
index 0000000..4d25e10
--- /dev/null
+++ b/man/pneumo.Rd
@@ -0,0 +1,40 @@
+\name{pneumo}
+\alias{pneumo}
+\non_function{}
+\title{Pneumoconiosis amongst a group of coalminers}
+\usage{data(pneumo)}
+\description{
+The \code{pneumo} data frame has 8 rows and 4 columns.
+Exposure time is explanatory, and there are 3 ordinal response variables.
+}
+\format{
+ This data frame contains the following columns:
+ \describe{
+ \item{exposure.time}{a numeric vector, in years}
+ \item{normal}{a numeric vector, counts}
+ \item{mild}{a numeric vector, counts}
+ \item{severe}{a numeric vector, counts}
+ }
+}
+\details{
+These were collected from coalface workers. In the original
+data set, the two most severe categories were combined.
+}
+\source{
+ Ashford, J.R., 1959. An approach to the analysis of data for
+ semi-quantal responses in biological assay.
+ \emph{Biometrics}, \bold{15}, 573--581.
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\examples{
+data(pneumo)
+
+# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
+pneumo = transform(pneumo, let=log(exposure.time))
+vglm(cbind(normal,mild,severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo)
+}
+\keyword{datasets}
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
new file mode 100644
index 0000000..f2c4231
--- /dev/null
+++ b/man/poissonff.Rd
@@ -0,0 +1,144 @@
+\name{poissonff}
+%\alias{poisson}
+\alias{poissonff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Poisson Family Function }
+\description{
+ Family function for a generalized linear model fitted to
+ Poisson responses.
+ The dispersion parameters may be known or unknown.
+
+}
+\usage{
+poissonff(link = "loge", dispersion = 1,
+ onedpar = FALSE, parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function. See \code{\link{Links}} for more choices.
+
+ }
+ \item{dispersion}{
+ Dispersion parameter. By default, maximum
+ likelihood is used to estimate the model because it is known.
+ However, the user can specify
+ \code{dispersion = 0} to have it estimated, or
+ else specify a known positive value (or values if the response
+ is a matrix---one value per column).
+
+ }
+ \item{onedpar}{
+ One dispersion parameter? If the response is a matrix,
+ then a separate
+ dispersion parameter will be computed for each response (column), by default.
+ Setting \code{onedpar=TRUE} will pool them so that there is only
+ one dispersion parameter to be estimated.
+
+ }
+ \item{parallel}{
+ A logical or formula. Used only if the response is a matrix.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
+ matrix response.
+
+ }
+}
+\details{
+ \eqn{M} defined above is the number of linear/additive predictors.
+
+ If the dispersion parameter is unknown, then the resulting estimate
+ is not fully a maximum likelihood estimate.
+
+ A dispersion parameter that is less/greater than unity corresponds to
+ under-/over-dispersion relative to the Poisson model. Over-dispersion
+ is more common in practice.
+
+ When fitting a Quadratic RR-VGLM (see \code{\link{cqo}}), the
+ response is a matrix of \eqn{M}, say, columns (e.g., one column
+ per species). Then there will be \eqn{M} dispersion parameters
+ (one per column of the response matrix) if \code{dispersion=0} and
+ \code{onedpar=FALSE}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ and \code{\link{cao}}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ This function will handle a matrix response automatically.
+
+ The call \code{poissonff(dispersion=0, ...)} is equivalent to
+ \code{quasipoissonff(...)}. The latter was written so that R users
+ of \code{quasipoisson()} would only need to add a ``\code{ff}''
+ to the end of the family function name.
+
+ Regardless of whether the dispersion parameter is to be estimated or
+ not, its value can be seen from the output from the \code{summary()}
+ of the object.
+
+% With the introduction of name spaces for the \pkg{VGAM} package,
+% \code{"ff"} can be dropped for this family function.
+
+}
+\section{Warning }{
+ With a multivariate response, assigning a known dispersion parameter
+ for \emph{each} response is not handled well yet. Currently, only
+ a single known dispersion parameter is handled well.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{quasipoissonff}},
+ \code{\link{zipoisson}},
+ \code{\link{loge}},
+ \code{\link{polf}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ \code{\link{cao}},
+ \code{\link{binomialff}},
+ \code{\link{quasibinomialff}},
+ \code{\link[stats]{poisson}}.
+}
+\examples{
+poissonff()
+
+n = 100
+x2 = rnorm(n)
+x3 = rnorm(n)
+x4 = rnorm(n)
+lv1 = 0 + x3 - 2*x4
+lambda1 = exp(3 - 0.5 * (lv1-0)^2)
+lambda2 = exp(2 - 0.5 * (lv1-1)^2)
+lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2)
+y1 = rpois(n, lambda1)
+y2 = rpois(n, lambda2)
+y3 = rpois(n, lambda3)
+p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, EqualTol=FALSE,
+ ITol=FALSE)
+summary(p1) # # Three dispersion parameters are all unity
+\dontrun{
+lvplot(p1, y=TRUE, lcol=2:4, pch=2:4, pcol=2:4, rug=FALSE)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/polf.Rd b/man/polf.Rd
new file mode 100644
index 0000000..0b0f414
--- /dev/null
+++ b/man/polf.Rd
@@ -0,0 +1,161 @@
+\name{polf}
+\alias{polf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Poisson-Ordinal Link Function }
+\description{
+ Computes the Poisson-ordinal transformation, including its inverse
+ and the first two derivatives.
+
+}
+\usage{
+polf(theta, earg = stop("'earg' must be given"), inverse = FALSE,
+ deriv = 0, short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Extra argument for passing in additional information.
+ This must be list with component \code{cutpoint}.
+ If \code{polf()} is used as the link function in
+ \code{\link{cumulative}} then one should choose
+ \code{reverse=TRUE, parallel=TRUE, intercept.apply=TRUE}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The Poisson-ordinal link function (POLF) can be applied to a
+ parameter lying in the unit interval.
+ Its purpose is to link cumulative probabilities associated with
+ an ordinal response coming from an underlying Poisson distribution.
+
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+ See \code{\link{Links}} for general information about \pkg{VGAM}
+ link functions.
+
+}
+\value{
+ See Yee (2006) for details.
+
+}
+\references{
+ Yee, T. W. (2006)
+ \emph{Link functions for ordinal count data},
+ (submitted for publication).
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical values of \code{theta} too close to 0 or 1 or out of range
+ result in large positive or negative values, or maybe 0 depending on
+ the arguments.
+ Although measures have been taken to handle cases where
+ \code{theta} is too close to 1 or 0,
+ numerical instabilities may still arise.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the
+ Poisson distribution (see \code{\link{poissonff}}) that has been
+ recorded as an ordinal response using known cutpoints.
+
+}
+\section{Warning }{
+ Prediction may not work on \code{\link{vglm}} or
+ \code{\link{vgam}} etc. objects if this link function is used.
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{poissonff}},
+ \code{\link{nbolf}},
+ \code{\link{golf}},
+ \code{\link{cumulative}}.
+}
+\examples{
+polf("prob", short=FALSE)
+polf("prob", tag=TRUE)
+
+p = seq(0.01, 0.99, by=0.01)
+earg = list(cutpoint=2)
+y = polf(p, earg=earg)
+y. = polf(p, earg=earg, deriv=1)
+max(abs(polf(y, earg=earg, inv=TRUE) - p)) # Should be 0
+
+\dontrun{
+par(mfrow=c(2,1), las=1)
+plot(p, y, type="l", col="blue", main="polf()")
+abline(h=0, v=0.5, col="red", lty="dashed")
+
+plot(p, y., type="l", col="blue",
+ main="(Reciprocal of) first POLF derivative")
+}
+
+
+# Rutherford and Geiger data
+ruge = c(57,203,383,525,532,408,273,139,45,27,10,4,0,1,1)
+y = 0:14
+yy = rep(y, times=ruge)
+length(yy) # 2608 1/8-minute intervals
+cutpoint = 5
+yy01 = ifelse(yy <= cutpoint, 0, 1)
+earg = list(cutpoint=cutpoint)
+fit = vglm(yy01 ~ 1, binomialff(link="polf", earg=earg))
+coef(fit, matrix=TRUE)
+exp(coef(fit))
+
+
+# Another example
+nn = 1000
+x2 = sort(runif(nn))
+x3 = runif(nn)
+mymu = exp( 3 + 1 * x2 - 2 * x3)
+y1 = rpois(nn, lambda=mymu)
+cutpoints = c(-Inf, 10, 20, Inf)
+cuty = Cut(y1, breaks=cutpoints)
+\dontrun{
+plot(x2, x3, col=cuty, pch=as.character(cuty))
+}
+table(cuty) / sum(table(cuty))
+fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="polf",
+ reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
+ mv=TRUE, earg=list(cutpoint=cutpoints[2:3])),
+ trace=TRUE)
+fit at y[1:5,]
+fitted(fit)[1:5,]
+predict(fit)[1:5,]
+coef(fit)
+coef(fit, matrix=TRUE)
+constraints(fit)
+fit at misc$earg
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/polonoUC.Rd b/man/polonoUC.Rd
new file mode 100644
index 0000000..8f03e05
--- /dev/null
+++ b/man/polonoUC.Rd
@@ -0,0 +1,87 @@
+\name{Polono}
+\alias{Polono}
+\alias{dpolono}
+%\alias{ppolono}
+%\alias{qpolono}
+\alias{rpolono}
+\title{The Poisson Lognormal Distribution}
+\description{
+ Density, and random
+ generation for the Poisson lognormal distribution.
+
+}
+\usage{
+dpolono(x, meanlog=0, sdlog=1, ...)
+rpolono(n, meanlog=0, sdlog=1)
+}
+\arguments{
+ \item{x}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{meanlog, sdlog }{
+ the mean and standard deviation of the normal distribution (on the
+ log scale).
+ They match the arguments in
+ \code{\link[stats:Lognormal]{Lognormal}}.
+
+ }
+ \item{...}{
+ Arguments passed into
+ \code{\link[stats]{integrate}}.
+
+ }
+}
+\value{
+ \code{dpolono} gives the density, and
+% \code{ppolono} gives the distribution function, and
+% \code{qpolono} gives the quantile function, and
+ \code{rpolono} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ The Poisson lognormal distribution is similar to the negative
+ binomial in that it can be motivated by a Poisson distribution whose
+ mean parameter comes from a right skewed distribution (gamma for the
+ negative binomial and lognormal for the Poisson lognormal distribution).
+
+
+% See zz code{link{polonozz}}, the \pkg{VGAM} family function
+% for estimating the parameters,
+% for the formula of the probability density function and other details.
+
+}
+\note{
+ \code{dpolono} involves numerical integration that is performed using
+ \code{\link[stats]{integrate}}. Consequently, computations may be very
+ slow. Also, numerical problems may occur, and if so, then the use of
+ \code{...} may be needed.
+
+ For the maximum likelihood estimation of the 2 parameters a \pkg{VGAM}
+ family function called \code{polono}, say, has not been written yet.
+
+}
+\seealso{
+ \code{\link{lognormal}},
+ \code{\link{poissonff}},
+ \code{\link{negbinomial}}.
+
+}
+\examples{
+\dontrun{
+meanlog = 0.5; sdlog = 0.5
+y = 0:19
+proby = dpolono(y, m=meanlog, sd=sdlog)
+plot(y, proby, type="h", col="blue", las=1, ylab="P[Y=y]", log="",
+ main=paste("Poisson lognormal(meanlog=",meanlog,", sdlog=",sdlog,")",
+ sep=""))
+sum(proby) # Should be 1
+
+y = rpolono(n=1000, m=meanlog, sd=sdlog)
+table(y)
+hist(y, breaks=((-1):max(y))+0.5, prob=TRUE)
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
new file mode 100644
index 0000000..69217a0
--- /dev/null
+++ b/man/posbinomUC.Rd
@@ -0,0 +1,96 @@
+\name{Posbinom}
+\alias{Posbinom}
+\alias{dposbinom}
+\alias{pposbinom}
+\alias{qposbinom}
+\alias{rposbinom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive-Binomial Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the positive-binomial distribution.
+
+}
+\usage{
+dposbinom(x, size, prob, log = FALSE)
+pposbinom(q, size, prob, lower.tail = TRUE, log.p = FALSE)
+qposbinom(p, size, prob, lower.tail = TRUE, log.p = FALSE)
+rposbinom(n, size, prob)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{size}{number of trials. It is the \eqn{N} symbol in the formula
+ given in \code{\link{posbinomial}}. }
+ \item{prob}{probability of success on each trial. }
+ \item{log, log.p, lower.tail}{ Arguments that are passed on to
+ \code{\link[stats:Binomial]{pbinom}} etc.}
+}
+\details{
+ The positive-binomial distribution is a binomial distribution but with
+ the probability of a zero being zero. The other probabilities are scaled
+ to add to unity.
+ The mean therefore is
+ \deqn{\mu / (1-(1-\mu)^N)}{%
+ mu / (1-(1-mu)^N)}
+ where \eqn{\mu}{mu} is the argument \code{prob} above.
+ As \eqn{\mu}{mu} increases, the positive-binomial and binomial
+ distributions become more similar.
+ Unlike similar functions for the binomial distribution, a zero value
+ of \code{prob} is not permitted here.
+
+}
+\value{
+ \code{dposbinom} gives the density,
+ \code{pposbinom} gives the distribution function,
+ \code{qposbinom} gives the quantile function, and
+ \code{rposbinom} generates random deviates.
+}
+%\references{
+%None.
+%}
+
+\author{ T. W. Yee }
+\note{
+ For \code{rposbinom}, the arguments of the function are fed into
+ \code{\link[stats:Binomial]{rbinom}} until \eqn{n} positive values
+ are obtained. This may take a long time if \code{prob} has values
+ close to 0.
+
+ The family function \code{\link{posbinomial}} estimates the parameters
+ by maximum likelihood estimation.
+
+}
+
+\seealso{
+ \code{\link{posbinomial}},
+ \code{\link[stats:Binomial]{rbinom}}.
+}
+\examples{
+prob = 0.2
+size = 10
+y = rposbinom(n=1000, size, prob)
+table(y)
+mean(y) # Sample mean
+prob / (1-(1-prob)^size) # Population mean
+
+(i = dposbinom(0:size, size, prob))
+cumsum(i) - pposbinom(0:size, size, prob) # Should be 0s
+table(rposbinom(100, size, prob))
+
+table(qposbinom(runif(1000), size, prob))
+round(dposbinom(1:10, size, prob) * 1000) # Should be similar
+
+\dontrun{
+x = 0:size
+plot(x, dposbinom(x, size, prob), type="h", ylab="Probability",
+ main=paste("Positive-binomial(", size, ",", prob, ") (blue) vs",
+ " Binomial(", size, ",", prob, ") (red & shifted slightly)", sep=""),
+ lwd=2, col="blue", las=1)
+lines(x+0.05, dbinom(x, size, prob), type="h", lwd=2, col="red")
+}
+}
+\keyword{distribution}
+
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
new file mode 100644
index 0000000..4456e9b
--- /dev/null
+++ b/man/posbinomial.Rd
@@ -0,0 +1,79 @@
+\name{posbinomial}
+\alias{posbinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Binomial Distribution Family Function }
+\description{
+ Fits a positive binomial distribution.
+}
+\usage{
+posbinomial(link = "logit")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function for the usual probability parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+}
+\details{
+ The positive binomial distribution is the ordinary binomial distribution
+ but with the probability of zero being zero.
+ Thus the other probabilities are scaled up (i.e., divided by 1-P(Y=0)).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+}
+\references{
+Patil, G. P. (1962)
+Maximum likelihood estimation for
+generalised power series distributions and its application to a
+truncated binomial distribution.
+\emph{Biometrika},
+\bold{49}, 227--237.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+The input for this family function is the same as
+\code{\link{binomialff}}.
+
+Yet to be done: a \code{quasi.posbinomial} which estimates a dispersion
+parameter.
+
+}
+
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned.
+}
+\seealso{
+ \code{\link{binomialff}}.
+}
+
+\examples{
+# Number of albinotic children in families with 5 children
+# Data from Patil (1962)
+y = c(rep(1,25), rep(2,23), rep(3,10), 4, 5) # No zeros
+n = rep(5, 60)
+yprop = y / 5
+# Fit the identical models in two ways; MLE of p is 0.3088
+fit = vglm(yprop ~ 1, posbinomial, trace=TRUE, weights=n)
+fit2 = vglm(cbind(y, n-y) ~ 1, posbinomial, trace=TRUE)
+summary(fit)
+summary(fit2)
+Coef(fit2) # = MLE of p
+Coef(fit) # = MLE of p
+fitted(fit2)[1:2]
+fitted(fit)[1:2]
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
new file mode 100644
index 0000000..daf5047
--- /dev/null
+++ b/man/posnegbinomial.Rd
@@ -0,0 +1,156 @@
+\name{posnegbinomial}
+\alias{posnegbinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Negative Binomial Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the two parameters of a positive
+ negative binomial distribution.
+
+}
+\usage{
+posnegbinomial(lmunb = "loge", lk = "loge", ik = NULL,
+ zero = -2, cutoff = 0.995, method.init=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmunb}{
+ Link function applied to the \code{munb} parameter, which is the mean
+ \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lk}{
+ Parameter link function applied to the dispersion parameter,
+ called \code{k}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ik}{
+ Optional initial value for \code{k}, an index parameter.
+ The value \code{1/k} is known as a dispersion parameter.
+ If failure to converge occurs try different values (and/or use
+ \code{method.init}).
+ If necessary this vector is recycled to length equal to the number
+ of responses.
+ A value \code{NULL} means an initial value for each response is
+ computed internally using a range of values.
+
+ }
+ \item{zero}{
+ Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
+ at all. Specifies which of the two linear/additive predictors are
+ modelled as an intercept only. By default, the \code{k} parameter
+ (after \code{lk} is applied) is modelled as a single unknown
+ number that is estimated. It can be modelled as a function of
+ the explanatory variables by setting \code{zero=NULL}. A negative
+ value means that the value is recycled, so setting \eqn{-2} means
+ all \code{k} are intercept only.
+
+ }
+ \item{cutoff}{
+ A numeric which is close to 1 but never exactly 1. Used to
+ specify how many terms of the infinite series are actually used.
+ The sum of the probabilites are added until they reach this value
+ or more. It is like specifying \code{p} in an imaginary function
+ \code{qnegbin(p)}.
+
+ }
+ \item{method.init}{
+ See \code{\link{negbinomial}}.
+
+ }
+}
+\details{
+ The positive negative binomial distribution is an ordinary negative
+ binomial distribution but with the probability of a zero response
+ being zero. The other probabilities are scaled to sum to unity.
+
+ This family function is based on \code{\link{negbinomial}} and most
+ details can be found there. To avoid confusion, the parameter
+ \code{munb} here corresponds to the mean of an ordinary negative
+ binomial distribution \code{\link{negbinomial}}. The mean of
+ \code{posnegbinomial} is
+ \deqn{\mu_{nb} / (1-p(0))}{%
+ munb / (1-p(0))}
+ where \eqn{p(0) = (k/(k + \mu_{nb}))^k}{p(0) = (k/(k + munb))^k} is the
+ probability an ordinary negative binomial distribution has a zero value.
+
+ The parameters \code{munb} and \code{k} are not independent in the
+ positive negative binomial distribution, whereas they are in the
+ ordinary negative binomial distribution.
+
+ This function handles \emph{multivariate} responses, so that a matrix
+ can be used as the response. The number of columns is the number
+ of species, say, and setting \code{zero=-2} means that \emph{all}
+ species have a \code{k} equalling a (different) intercept only.
+
+}
+\section{Warning}{
+ The Poisson model corresponds to \code{k} equalling infinity.
+ If the data is Poisson or close to Poisson, numerical problems may
+ occur. Possibly a loglog link could be added in the future to try help
+ handle this problem.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+}
+\references{
+Barry, S. C. and Welsh, A. H. (2002)
+Generalized additive modelling and zero inflated count data.
+\emph{Ecological Modelling},
+\bold{157},
+179--188.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ This family function can handle a multivariate response.
+
+}
+
+\seealso{
+ \code{\link{rposnegbin}},
+ \code{\link{pospoisson}},
+ \code{\link{negbinomial}},
+ \code{\link{zanegbinomial}},
+% \code{\link[MASS]{rnegbin}}.
+ \code{\link[stats:NegBinomial]{rnbinom}}.
+}
+
+\examples{
+\dontrun{
+x = runif(nn <- 2000)
+y1 = rnbinom(nn, mu=exp(0+2*x), size=exp(1)) # k is size in rnbinom()
+y2 = rnbinom(nn, mu=exp(1+2*x), size=exp(3))
+fit = vglm(cbind(y1,y2) ~ 1, posnegbinomial, subset=(y1>0)&(y2>1),
+ trace=TRUE)
+coef(fit, matrix=TRUE)
+dim(fit at y)
+fitted(fit)[1:5,]
+predict(fit)[1:5,]
+
+
+# Another artificial data example
+munb = exp(2); k = exp(3); n = 1000
+y = rposnegbin(n, munb=munb, k=k)
+table(y)
+fit = vglm(y ~ 1, posnegbinomial, trace=TRUE)
+coef(fit, matrix=TRUE)
+mean(y) # Sample mean
+munb / (1 - (k/(k+munb))^k) # Population mean
+fitted(fit)[1:5,]
+predict(fit)[1:5,]
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
+%library(MASS)
+%detach() # detach MASS
+%y1 = rnegbin(nn, mu=exp(0+2*x), theta=exp(1)) # k is theta in rnegbin()
+%y2 = rnegbin(nn, mu=exp(1+2*x), theta=exp(3))
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
new file mode 100644
index 0000000..ca5f1a3
--- /dev/null
+++ b/man/posnormUC.Rd
@@ -0,0 +1,64 @@
+\name{Posnorm}
+\alias{Posnorm}
+\alias{dposnorm}
+\alias{pposnorm}
+\alias{qposnorm}
+\alias{rposnorm}
+\title{The Positive-Normal Distribution}
+\description{
+ Density, distribution function, quantile function and random
+ generation for the univariate positive-normal distribution.
+
+}
+\usage{
+dposnorm(x, mean=0, sd=1)
+pposnorm(q, mean=0, sd=1)
+qposnorm(p, mean=0, sd=1)
+rposnorm(n, mean=0, sd=1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{mean, sd}{ see \code{\link[stats]{rnorm}}. }
+}
+\value{
+ \code{dposnorm} gives the density,
+ \code{pposnorm} gives the distribution function,
+ \code{qposnorm} gives the quantile function, and
+ \code{rposnorm} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{posnormal1}}, the \pkg{VGAM} family function
+ for estimating the parameters,
+ for the formula of the probability density function and other details.
+}
+\note{
+ \code{rposnorm()} may run very slowly if the mean is very negative.
+}
+\seealso{
+ \code{\link{posnormal1}}.
+}
+\examples{
+\dontrun{
+m = 0.8
+x = seq(-1, 4, len=501)
+plot(x, dposnorm(x, m=m), type="l", ylim=0:1, las=1,
+ ylab=paste("posnorm(m=",m,", sd=1)"), col="blue",
+ main="Blue is density, red is cumulative distribution function",
+ sub="Purple lines are the 10,20,...,90 percentiles")
+lines(x, pposnorm(x, m=m), col="red")
+abline(h=0)
+probs = seq(0.1, 0.9, by=0.1)
+Q = qposnorm(probs, m=m)
+lines(Q, dposnorm(Q, m=m), col="purple", lty=3, type="h")
+lines(Q, pposnorm(Q, m=m), col="purple", lty=3, type="h")
+abline(h=probs, col="purple", lty=3)
+pposnorm(Q, m=m) - probs # Should be all 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/posnormal1.Rd b/man/posnormal1.Rd
new file mode 100644
index 0000000..4b289d6
--- /dev/null
+++ b/man/posnormal1.Rd
@@ -0,0 +1,102 @@
+\name{posnormal1}
+\alias{posnormal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Normal Distribution Family Function }
+\description{
+ Fits a positive (univariate) normal distribution.
+}
+\usage{
+posnormal1(lmean="identity", lsd="loge",
+ imean=NULL, isd=NULL, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmean, lsd}{
+ Link functions for the mean and standard
+ deviation parameters of the usual univariate normal distribution.
+ They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{imean, isd}{
+ Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
+ A \code{NULL} means a value is computed internally.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2\} corresponding
+ respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}.
+ If \code{zero=NULL} then all linear/additive predictors are modelled as
+ a linear combination of the explanatory variables.
+ For many data sets having \code{zero=2} is a good idea.
+
+ }
+}
+\details{
+ The positive normal distribution is the ordinary normal distribution
+ but with the probability of zero or less being zero.
+ The rest of the probability density function is scaled up. Hence
+ the probability density function can be written
+ \deqn{f(y) = \frac{1}{\sqrt{2\pi} \sigma} \exp\left( -\frac12
+ (y-\mu)^2 / \sigma^2 \right) /
+ \left[ 1-\Phi(-\mu/ \sigma) \right]}{%
+ f(y) = (1/(sqrt(2*pi)*sigma)) * exp( -0.5 * (y-mu)^2/ sigma^2) /
+ [1-Phi(-mu/ sigma)] }
+ where \eqn{\Phi()}{Phi} is the cumulative distribution function of a
+ standard normal (\code{\link[stats]{pnorm}}).
+ Equivalently, this is
+ \deqn{f(y) = \frac{1}{\sigma} \frac{\phi((y-\mu) / \sigma)}{
+ 1-\Phi(-\mu/ \sigma)}.}{%
+ f(y) = (1/sigma) * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)].}
+ where \eqn{\phi()}{dnorm()} is the probability density function of a
+ standard normal distribution (\code{\link[stats]{dnorm}}).
+
+ The mean of \eqn{Y} is
+ \deqn{E(Y) = \mu + \sigma \frac{\phi(-\mu/ \sigma)}{
+ 1-\Phi(-\mu/ \sigma)}. }{%
+ E(Y) = mu + sigma * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)]. }
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+
+ Documentation accompanying the \pkg{VGAM} package at
+ \url{http://www.stat.auckland.ac.nz/~yee}
+ contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response variable for this family function is the same as
+ \code{\link{normal1}} except positive values are required.
+ Reasonably good initial values are needed.
+ Fisher scoring is implemented.
+
+}
+
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned.
+
+}
+\seealso{
+ \code{\link{normal1}}.
+}
+
+\examples{
+m = 1.0; SD = exp(1.0)
+y = rposnorm(n <- 1000, m=m, sd=SD)
+\dontrun{hist(y, prob=TRUE, main=paste("posnorm(m=",m,", sd=",round(SD,2),")"))}
+fit = vglm(y ~ 1, fam=posnormal1, trace=TRUE)
+coef(fit, mat=TRUE)
+(Cfit = Coef(fit))
+mygrid = seq(min(y), max(y), len=200) # Add the fit to the histogram
+\dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col="red")}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
new file mode 100644
index 0000000..cba8c51
--- /dev/null
+++ b/man/pospoisUC.Rd
@@ -0,0 +1,92 @@
+\name{Pospois}
+\alias{Pospois}
+\alias{dpospois}
+\alias{ppospois}
+\alias{qpospois}
+\alias{rpospois}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive-Poisson Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the positive-Poisson distribution.
+
+}
+\usage{
+dpospois(x, lambda)
+ppospois(q, lambda)
+qpospois(p, lambda)
+rpospois(n, lambda)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{lambda}{ vector of positive means (of an ordinary Poisson distribution).
+ Short vectors are recycled. }
+}
+\details{
+ The positive-Poisson distribution is a Poisson distribution but with
+ the probability of a zero being zero. The other probabilities are scaled
+ to add to unity.
+ The mean therefore is
+ \deqn{\lambda / (1-\exp(-\lambda)).}{%
+ lambda / (1-exp(-lambda)).}
+ As \eqn{\lambda}{lambda} increases, the positive-Poisson and Poisson
+ distributions become more similar.
+ Unlike similar functions for the Poisson distribution, a zero value
+ of \code{lambda} is not permitted here.
+
+}
+\value{
+ \code{dpospois} gives the density,
+ \code{ppospois} gives the distribution function,
+ \code{qpospois} gives the quantile function, and
+ \code{rpospois} generates random deviates.
+}
+%\references{
+%None.
+%}
+
+\author{ T. W. Yee }
+\note{
+ For \code{rpospois}, the arguments of the function are fed into
+ \code{\link[stats:Poisson]{rpois}} until \eqn{n} positive values
+ are obtained. This may take a long time if \code{lambda} has values
+ close to 0.
+
+ The family function \code{\link{pospoisson}} estimates
+ \eqn{\lambda}{lambda} by maximum likelihood estimation.
+
+}
+
+\seealso{
+ \code{\link{pospoisson}},
+ \code{\link{zapoisson}},
+ \code{\link[stats:Poisson]{rpois}}.
+}
+\examples{
+lambda = 2
+y = rpospois(n=1000, lambda)
+table(y)
+mean(y) # Sample mean
+lambda / (1-exp(-lambda)) # Population mean
+
+(i = dpospois(0:7, lambda))
+cumsum(i) - ppospois(0:7, lambda) # Should be 0s
+table(rpospois(100, lambda))
+
+table(qpospois(runif(1000), lambda))
+round(dpospois(1:10, lambda) * 1000) # Should be similar
+
+\dontrun{
+x = 0:7
+plot(x, dpospois(x, lambda), type="h", ylab="Probability",
+ main=paste("Positive Poisson(", lambda, ") (blue) vs",
+ " Poisson(", lambda, ") (red & shifted slightly)", sep=""),
+ lwd=2, col="blue", las=1)
+lines(x+0.05, dpois(x, lambda), type="h", lwd=2, col="red")
+}
+}
+\keyword{distribution}
+
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
new file mode 100644
index 0000000..9b06334
--- /dev/null
+++ b/man/pospoisson.Rd
@@ -0,0 +1,84 @@
+\name{pospoisson}
+\alias{pospoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Poisson Distribution Family Function }
+\description{
+ Fits a positive Poisson distribution.
+}
+\usage{
+pospoisson(link = "loge")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function for the usual mean (lambda) parameter of
+ an ordinary Poisson distribution.
+ See \code{\link{Links}} for more choices.
+
+ }
+}
+\details{
+ The positive Poisson distribution is the ordinary Poisson
+ distribution but with the probability of zero being zero. Thus the
+ other probabilities are scaled up (i.e., divided by \eqn{1-P[Y=0]}).
+ The mean, \eqn{\lambda / (1 - \exp(-\lambda))}{lambda/(1-exp(-lambda))},
+ can be obtained by the extractor function \code{fitted} applied to
+ the object.
+
+ A related distribution is the zero-inflated Poisson, in which the
+ probability \eqn{P[Y=0]} involves another parameter \eqn{\phi}{phi}.
+ See \code{\link{zipoisson}}.
+
+}
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned.
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+}
+\references{
+Coleman, J. S. and James, J. (1961)
+The equilibrium size distribution of freely-forming groups.
+\emph{Sociometry}, \bold{24}, 36--45.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+}
+\author{ Thomas W. Yee }
+\note{
+ Yet to be done: a \code{quasi.pospoisson} which estimates a dispersion
+ parameter.
+
+ This family function can handle a multivariate response.
+
+}
+\seealso{
+\code{\link{Pospois}},
+\code{\link{posnegbinomial}},
+\code{\link{poissonff}},
+\code{\link{zipoisson}}.
+}
+\examples{
+y = 1:6
+w = c(1486, 694, 195, 37, 10, 1) # Data from Coleman and James (1961)
+fit = vglm(y ~ 1, pospoisson, weights=w)
+Coef(fit)
+summary(fit)
+fitted(fit)
+
+# Artificial data
+x = runif(n <- 1000)
+lambda = exp(2 + 3*x)
+y = rpospois(n, lambda)
+table(y)
+fit = vglm(y ~ x, pospoisson, trace=TRUE, crit="c")
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/predict.vglm.Rd b/man/predict.vglm.Rd
new file mode 100644
index 0000000..a38f17c
--- /dev/null
+++ b/man/predict.vglm.Rd
@@ -0,0 +1,139 @@
+\name{predict.vglm}
+\alias{predict.vglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Predict Method for a VGLM fit}
+\description{
+ Predicted values based on a vector generalized linear model (VGLM)
+ object.
+}
+\usage{
+predict.vglm(object, newdata = NULL,
+ type = c("link", "response", "terms"),
+ se.fit = FALSE, deriv = 0, dispersion = NULL,
+ untransform=FALSE, extra = object at extra, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ Object of class inheriting from \code{"vlm"}.
+
+ }
+ \item{newdata}{
+ An optional data frame in which to look for variables with which
+ to predict. If omitted, the fitted linear predictors are used.
+
+ }
+ \item{type}{
+ the type of prediction required. The default is the first one,
+ meaning on the scale of the linear predictors. The alternative
+ \code{"response"} is on the scale of the response variable, and
+ depending on the family function, this may or may not be the mean.
+ The \code{"terms"} option returns a matrix giving the fitted values
+ of each term in the model formula on the linear predictor scale.
+
+ The value of this argument can be abbreviated.
+
+ }
+ \item{se.fit}{
+ logical: return standard errors?
+
+ }
+ \item{deriv}{
+ Non-negative integer. Currently this must be zero.
+ Later, this may be implemented for general values.
+
+ }
+ \item{dispersion}{
+ Dispersion parameter.
+ This may be inputted at this stage, but the default is to use
+ the dispersion parameter of the fitted model.
+
+ }
+ \item{extra}{
+ A list containing extra information.
+ This argument should be ignored.
+
+ }
+ \item{untransform}{
+ Logical. Reverses any parameter link function.
+ This argument only works if \code{type="link", se.fit=FALSE, deriv=0}.
+
+ }
+ \item{\dots}{Arguments passed into \code{predict.vlm}.
+ }
+}
+\details{
+ Obtains predictions and optionally estimates standard errors
+ of those
+ predictions from a fitted vector generalized linear model
+ (VGLM) object.
+
+ This code implements \emph{smart prediction}
+ (see \code{\link{smartpred}}).
+}
+\value{
+ If \code{se.fit = FALSE}, a vector or matrix of predictions.
+ If \code{se.fit = TRUE}, a list with components
+ \item{fitted.values}{Predictions}
+ \item{se.fit}{Estimated standard errors}
+ \item{df}{Degrees of freedom}
+ \item{sigma}{The square root of the dispersion parameter}
+
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Setting \code{se.fit=TRUE} and \code{type="response"}
+ will generate an error.
+
+}
+
+\section{Warning }{
+ This function may change in the future.
+}
+
+\seealso{
+ \code{\link[stats]{predict}},
+ \code{\link{vglm}},
+ \code{predict.vlm},
+ \code{\link{smartpred}}.
+}
+
+\examples{
+# Illustrates smart prediction
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
+ fam=cumulative(parallel=TRUE),
+ data=pneumo, trace=TRUE, x=FALSE)
+class(fit)
+
+(q0 = predict(fit)[1:3,])
+(q1 = predict(fit, newdata=pneumo)[1:3,])
+(q2 = predict(fit, newdata=pneumo[1:3,]))
+all.equal(q0, q1) # Should be TRUE
+all.equal(q1, q2) # Should be TRUE
+
+predict(fit)[1:3,]
+predict(fit, untransform=TRUE)[1:3,]
+
+p0 = predict(fit, type="res")[1:3,]
+p1 = predict(fit, type="res", newdata=pneumo)[1:3,]
+p2 = predict(fit, type="res", newdata=pneumo[1:3,])
+p3 = fitted(fit)[1:3,]
+all.equal(p0, p1) # Should be TRUE
+all.equal(p1, p2) # Should be TRUE
+all.equal(p2, p3) # Should be TRUE
+
+predict(fit, type="t", se=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
new file mode 100644
index 0000000..af36476
--- /dev/null
+++ b/man/prentice74.Rd
@@ -0,0 +1,108 @@
+\name{prentice74}
+\alias{prentice74}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Prentice (1974) Log-gamma Distribution }
+\description{
+ Estimation of a 3-parameter log-gamma distribution described by
+ Prentice (1974).
+}
+\usage{
+prentice74(llocation="identity", lscale="loge", lshape="identity",
+ ilocation=NULL, iscale=NULL, ishape=NULL, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{llocation}{
+ Parameter link function applied to the
+ location parameter \eqn{a}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lscale}{
+ Parameter link function applied to the
+ positive scale parameter \eqn{b}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lshape}{
+ Parameter link function applied to
+ the shape parameter \eqn{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ilocation, iscale}{
+ Initial value for \eqn{a} and \eqn{b}, respectively.
+ The defaults mean an initial value is determined internally for each.
+
+ }
+ \item{ishape}{
+ Initial value for \eqn{q}.
+ If failure to converge occurs, try some other value.
+ The default means an initial value is determined internally.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,3\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+}
+\details{
+ The probability density function is given by
+ \deqn{f(y;a,b,q) = |q| \exp(w/q^2 - e^w) / (b \Gamma(1/q^2)),}{%
+ f(y;a,b,q) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)),}
+for shape parameter \eqn{q \ne 0}{q != 0},
+positive scale parameter \eqn{b > 0},
+location parameter \eqn{a},
+and all real \eqn{y}.
+Here, \eqn{w = (y-a)q/b+\psi(1/q^2)}{w = (y-a)*q/b+psi(1/q^2)}
+where \eqn{\psi}{psi} is the digamma function.
+The mean of \eqn{Y} is \eqn{a} (returned as the fitted values).
+This is a different parameterization compared to \code{\link{lgamma3ff}}.
+
+Special cases:
+\eqn{q=0} is the normal distribution with standard deviation \eqn{b},
+\eqn{q=-1} is the extreme value distribution for maxima,
+\eqn{q=1} is the extreme value distribution for minima (Weibull).
+If \eqn{q>0} then the distribution is left skew,
+else \eqn{q<0} is right skew.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Prentice, R. L. (1974)
+A log gamma model and its maximum likelihood estimation.
+\emph{Biometrika}, \bold{61}, 539--544.
+}
+\section{Warning }{
+The special case \eqn{q=0} is not handled, therefore
+estimates of \eqn{q} too close to zero may cause numerical problems.
+}
+\author{ T. W. Yee }
+\note{
+The notation used here differs from Prentice (1974):
+\eqn{\alpha=a}{alpha=a},
+\eqn{\sigma=b}{sigma=b}.
+Fisher scoring is used.
+}
+\seealso{
+\code{\link{lgamma3ff}},
+\code{\link[base:Special]{lgamma}}.
+}
+\examples{
+x = runif(n <- 5000)
+loc = -1 + 2*x
+Scale = exp(1+x)
+y = rlgamma(n, loc=loc, scale=Scale, k=1)
+fit = vglm(y ~ x, prentice74(zero=3), trace=TRUE)
+coef(fit, matrix=TRUE) # Note the coefficients for location
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/probit.Rd b/man/probit.Rd
new file mode 100644
index 0000000..782a874
--- /dev/null
+++ b/man/probit.Rd
@@ -0,0 +1,116 @@
+\name{probit}
+\alias{probit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Probit Link Function }
+\description{
+ Computes the probit transformation, including its inverse and the
+ first two derivatives.
+}
+\usage{
+probit(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by 1 minus the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a \code{\link{vglmff-class}}
+ object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The probit link function is commonly used for parameters that
+ lie in the unit interval.
+ Numerical values of \code{theta} close to 0 or 1 or out of range
+ result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+ The arguments \code{short} and \code{tag} are used only if
+ \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the probit of \code{theta}, i.e.,
+ \code{qnorm(theta)} when \code{inverse = FALSE}, and if \code{inverse =
+ TRUE} then \code{pnorm(theta)}.
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to 1 or 0.
+ One way of overcoming this is to use \code{earg}.
+
+ In terms of the threshold approach with cumulative probabilities for
+ an ordinal response this link function corresponds to the univariate
+ normal distribution (see \code{\link{normal1}}).
+
+}
+\seealso{
+ \code{\link{Links}},
+ \code{\link{logit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}}.
+}
+\examples{
+p = seq(0.01, 0.99, by=0.01)
+probit(p)
+max(abs(probit(probit(p), inverse=TRUE) - p)) # Should be 0
+
+p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
+probit(p) # Has NAs
+probit(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
+
+\dontrun{
+plot(p, logit(p), type="l", col="limegreen", ylab="transformation",
+ lwd=2, las=1, main="Some probability link functions")
+lines(p, probit(p), col="purple", lwd=2)
+lines(p, cloglog(p), col="chocolate", lwd=2)
+lines(p, cauchit(p), col="tan", lwd=2)
+abline(v=0.5, h=0, lty="dashed")
+legend(0.1, 4.0, c("logit", "probit", "cloglog", "cauchit"),
+ col=c("limegreen","purple","chocolate","tan"), lwd=2)
+}
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/put.smart.Rd b/man/put.smart.Rd
new file mode 100644
index 0000000..5922438
--- /dev/null
+++ b/man/put.smart.Rd
@@ -0,0 +1,71 @@
+\name{put.smart}
+\alias{put.smart}
+\title{ Adds a List to the End of the List ``.smart.prediction'' }
+\description{
+Adds a list to the end of the list \code{.smart.prediction}
+in
+\code{smartpredenv} (\R)
+or
+frame 1 (S-PLUS).
+}
+\usage{
+put.smart(smart)
+}
+\arguments{
+ \item{smart}{
+ a list containing parameters needed later for smart prediction.
+ }
+}
+\value{
+Nothing is returned.
+}
+\section{Side Effects}{
+ The variable \code{.smart.prediction.counter} in
+ \code{smartpredenv} (\R)
+ or
+ frame 1 (S-PLUS)
+ is incremented beforehand,
+ and \code{.smart.prediction[[.smart.prediction.counter]]} is
+ assigned the list \code{smart}.
+ If the list \code{.smart.prediction} in
+ \code{smartpredenv} (\R)
+ or
+ frame 1 (S-PLUS)
+ is not long enough
+ to hold \code{smart}, then it is made larger, and the variable
+ \code{.max.smart} in
+ \code{smartpredenv} (\R)
+ or
+ frame 1 (S-PLUS)
+ is adjusted accordingly.
+}
+\details{
+ \code{put.smart} is used in \code{"write"} mode within a smart function.
+ It saves parameters at the time of model fitting, which are
+ later used for prediction.
+ The function \code{put.smart} is the opposite of
+ \code{\link{get.smart}}, and both deal with the same contents.
+
+}
+\seealso{
+ \code{\link{get.smart}}.
+}
+\examples{
+"my1" <- function(x, minx=min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if(smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
+ if(smart.mode.is("write"))
+ put.smart(list(minx=minx))
+ sqrt(x-minx)
+}
+attr(my1, "smart") <- TRUE
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
new file mode 100644
index 0000000..9d6bbbc
--- /dev/null
+++ b/man/qrrvglm.control.Rd
@@ -0,0 +1,484 @@
+\name{qrrvglm.control}
+\alias{qrrvglm.control}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Control function for QRR-VGLMs (CQO) }
+\description{
+ Algorithmic constants and parameters for a constrained quadratic
+ ordination (CQO), by fitting a \emph{quadratic reduced-rank vector
+ generalized linear model} (QRR-VGLM), are set using this function.
+ It is the control function for \code{\link{cqo}}.
+
+}
+\usage{
+qrrvglm.control(Rank = 1,
+ Bestof = if(length(Cinit)) 1 else 10,
+ checkwz=TRUE,
+ Cinit = NULL,
+ Crow1positive = TRUE,
+ epsilon = 1.0e-06,
+ EqualTolerances = ITolerances,
+ Etamat.colmax = 10,
+ FastAlgorithm = TRUE,
+ GradientFunction=TRUE,
+ Hstep = 0.001,
+ isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ iKvector = 0.1,
+ iShape = 0.1,
+ ITolerances = TRUE,
+ maxitl = 40,
+ method.init = 1,
+ Maxit.optim = 250,
+ MUXfactor = rep(7, length=Rank),
+ Norrr = ~ 1,
+ optim.maxit = 20,
+ Parscale = if(ITolerances) 0.001 else 1.0,
+ SD.Cinit = 0.02,
+ SmallNo = 5.0e-13,
+ trace = TRUE,
+ Use.Init.Poisson.QO=TRUE,
+ wzepsilon = .Machine$double.eps^0.75, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{R} is the \code{Rank}, \eqn{M} is the number
+ of linear predictors, and \eqn{S} is the number of responses
+ (species).
+ Thus \eqn{M=S} for binomial and Poisson responses, and
+ \eqn{M=2S} for the negative binomial and 2-parameter gamma distributions.
+
+ \item{Rank}{
+ The numerical rank \eqn{R} of the model, i.e., the
+ number of ordination axes. Must be an element from the set
+ \{1,2,\ldots,min(\eqn{M},\eqn{p_2}{p2})\} where the vector of explanatory
+ variables \eqn{x} is partitioned into (\eqn{x_1},\eqn{x_2}), which is
+ of dimension \eqn{p_1+p_2}{p1+p2}. The variables making up \eqn{x_1}
+ are given by the terms in the \code{Norrr} argument, and the rest
+ of the terms comprise \eqn{x_2}.
+
+ }
+ \item{Bestof}{
+ Integer. The best of \code{Bestof} models fitted is returned.
+ This argument helps guard against local solutions by (hopefully)
+ finding the global solution from many fits. The argument has value
+ 1 if an initial value for \eqn{C} is inputted using \code{Cinit}.
+
+ }
+ \item{checkwz}{ logical indicating whether the diagonal elements of
+ the working weight matrices should be checked whether they are
+ sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
+ any values less than \code{wzepsilon} are replaced with this value.
+
+ }
+ \item{Cinit}{
+ Optional initial \eqn{C} matrix, which must be a \eqn{p_2}{p2} by \eqn{R}
+ matrix. The default is to apply \code{.Init.Poisson.QO()} to obtain
+ initial values.
+
+ }
+ \item{Crow1positive}{
+ Logical vector of length \code{Rank} (recycled if necessary): are
+ the elements of the first row of \eqn{C} positive? For example,
+ if \code{Rank} is 4, then specifying \code{Crow1positive=c(FALSE,
+ TRUE)} will force \eqn{C[1,1]} and \eqn{C[1,3]} to be negative,
+ and \eqn{C[1,2]} and \eqn{C[1,4]} to be positive. This argument
+ allows for a reflection in the ordination axes because the
+ coefficients of the latent variables are unique up to a sign.
+
+ }
+ \item{epsilon}{
+ Positive numeric. Used to test for convergence for GLMs fitted
+ in FORTRAN. Larger values mean a loosening of the convergence
+ criterion.
+ If an error code of 3 is reported, try increasing this value.
+
+ }
+ \item{EqualTolerances}{
+ Logical indicating whether each (quadratic) predictor will
+ have equal tolerances. Setting \code{EqualTolerances=TRUE}
+ can help avoid numerical problems, especially with binary data.
+ Note that the estimated (common) tolerance matrix may or may
+ not be positive-definite. If it is, then it can be scaled to
+ the \eqn{R} by \eqn{R} identity matrix, i.e., made equivalent
+ to \code{ITolerances=TRUE}. Setting \code{ITolerances=TRUE}
+ will \emph{force} a common \eqn{R} by \eqn{R} identity matrix as
+ the tolerance matrix to the data even if it is not appropriate.
+ In general, setting \code{ITolerances=TRUE} is preferred over
+ \code{EqualTolerances=TRUE} because, if it works, it is much faster
+ and uses less memory. See \bold{Details} for more details.
+
+ }
+% \item{Eta.range}{ Numerical vector of length 2 or \code{NULL}.
+% Gives the lower and upper bounds on the values that can be taken
+% by the quadratic predictor (i.e., on the eta-scale).
+% Since \code{FastAlgorithm=TRUE}, this argument should be ignored.
+% }
+ \item{Etamat.colmax}{
+ Positive integer, no smaller than \code{Rank}. Controls the amount
+ of memory used by \code{.Init.Poisson.QO()}. It is the maximum
+ number of columns allowed for the pseudo-response and its weights.
+ In general, the larger the value, the better the initial value.
+ Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
+ }
+
+ \item{FastAlgorithm}{
+ Logical. Whether a new fast algorithm is to be used. The fast
+ algorithm results in a large speed increases compared to Yee
+ (2004). Some details of the fast algorithm are found in
+ Appendix A of Yee (2006).
+ Setting \code{FastAlgorithm=FALSE} will give an error.
+
+ }
+ \item{GradientFunction}{
+ Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr}
+ is used or not, i.e., to compute gradient values. Used only if
+ \code{FastAlgorithm} is \code{TRUE}. The default value is usually
+ faster on most problems.
+
+ }
+ \item{Hstep}{
+ Positive value. Used as the step size in the finite difference
+ approximation to the derivatives by \code{\link[stats]{optim}}.
+
+% Used only if \code{FastAlgorithm} is \code{TRUE}.
+ }
+ \item{isdlv}{
+ Initial standard deviations for the latent variables (site scores).
+ Numeric, positive and of length \eqn{R} (recycled if necessary).
+ This argument is used only if \code{ITolerances=TRUE}. Used by
+ \code{.Init.Poisson.QO()} to obtain initial values for the constrained
+ coefficients \eqn{C} adjusted to a reasonable value. It adjusts the
+ spread of the site scores relative to a common species tolerance of 1
+ for each ordination axis. A value between 0.5 and 10 is recommended;
+ a value such as 10 means that the range of the environmental space is
+ very large relative to the niche width of the species. The successive
+ values should decrease because the first ordination axis should have
+ the most spread of site scores, followed by the second ordination
+ axis, etc.
+
+ }
+ \item{iKvector, iShape}{
+ Numeric, recycled to length \eqn{S} if necessary.
+ Initial values used for estimating the positive \eqn{k} and
+ \eqn{\lambda}{lambda} parameters of the negative binomial and
+ 2-parameter gamma distributions respectively. For further information
+ see \code{\link{negbinomial}} and \code{\link{gamma2}}.
+ These arguments override the \code{ik} and \code{ishape}
+ arguments in \code{\link{negbinomial}} and \code{\link{gamma2}}.
+
+ }
+ \item{ITolerances}{
+ Logical. If \code{TRUE} then the (common) tolerance matrix is the
+ \eqn{R} by \eqn{R} identity matrix by definition. Note that having
+ \code{ITolerances=TRUE} implies \code{EqualTolerances=TRUE}, but
+ not vice versa. Internally, the quadratic terms will be treated as
+ offsets (in GLM jargon) and so the models can potentially be fitted
+ very efficiently. \emph{However, it is a very good idea to center all
+ numerical variables in the \eqn{x_2} vector}. See \bold{Details}
+ for more details. The success of \code{ITolerances=TRUE} often
+ depends on suitable values for \code{isdlv} and/or \code{MUXfactor}.
+
+ }
+ \item{maxitl}{
+ Maximum number of times the optimizer is called or restarted.
+ Most users should ignore this argument.
+
+ }
+ \item{method.init}{
+ Method of initialization. A positive integer 1 or 2 or 3 etc.
+ depending on the \pkg{VGAM} family function.
+ Currently it is used for \code{\link{negbinomial}} and
+ \code{\link{gamma2}} only, and used within the FORTRAN.
+
+ }
+ \item{Maxit.optim}{
+ Positive integer. Number of iterations given to the function
+ \code{\link[stats]{optim}} at each of the \code{optim.maxit}
+ iterations.
+
+ }
+
+ \item{MUXfactor}{
+ Multiplication factor for detecting large offset values. Numeric,
+ positive and of length \eqn{R} (recycled if necessary). This argument
+ is used only if \code{ITolerances=TRUE}. Offsets are \eqn{-0.5}
+ multiplied by the sum of the squares of all \eqn{R} latent variable
+ values. If the latent variable values are too large then this will
+ result in numerical problems. By too large, it is meant that the
+ standard deviation of the latent variable values are greater than
+ \code{MUXfactor[r] * isdlv[r]} for \code{r=1:Rank} (this is why
+ centering and scaling all the numerical predictor variables in
+ \eqn{x_2} is recommended). A value about 3 or 4 is recommended.
+ If failure to converge occurs, try a slightly lower value.
+
+}
+ \item{optim.maxit}{
+ Positive integer. Number of times \code{\link[stats]{optim}}
+ is invoked. At iteration \code{i}, the \code{i}th value of
+ \code{Maxit.optim} is fed into \code{\link[stats]{optim}}.
+
+ }
+
+ \item{Norrr}{
+ Formula giving terms that are \emph{not} to be included in the
+ reduced-rank regression (or formation of the latent variables),
+ i.e., those belong to \eqn{x_1}.
+ Those variables which do not make up the latent variable (reduced-rank
+ regression) correspond to the \eqn{B_1}{B_1} matrix.
+ The default is to omit the intercept term from the latent variables.
+
+ }
+ \item{Parscale}{
+ Numerical and positive-valued vector of length \eqn{C}
+ (recycled if necessary).
+ Passed into \code{optim(..., control=list(parscale=Parscale))};
+ the elements of \eqn{C} become \eqn{C} / \code{Parscale}.
+ Setting \code{ITolerances=TRUE} results in line searches that
+ are very large, therefore \eqn{C} has to be scaled accordingly
+ to avoid large step sizes.
+ See \bold{Details} for more information.
+ It's probably best to leave this argument alone.
+ }
+ \item{SD.Cinit}{
+ Standard deviation of the initial values for the elements
+ of \eqn{C}.
+ These are normally distributed with mean zero.
+ This argument is used only if \code{Use.Init.Poisson.QO = FALSE}
+ and \eqn{C} is not inputted using \code{Cinit}.
+
+ }
+ \item{trace}{
+ Logical indicating if output should be produced for
+ each iteration. The default is \code{TRUE} because the
+ calculations are numerically intensive, meaning it may take
+ a long time, so that the user might think the computer has
+ locked up if \code{trace=FALSE}.
+
+}
+
+% \item{Kinit}{ Initial values for the index parameters \code{k} in the
+% negative binomial distribution (one per species).
+% In general, a smaller number is preferred over a larger number.
+% The vector is recycled to the number of responses (species).
+% The argument is currently not used.
+% }
+
+% \item{Dzero}{ Integer vector specifying which squared terms
+% are to be zeroed. These linear predictors will correspond to
+% a RR-VGLM.
+% The values must be elements from the set \{1,2,\ldots,\eqn{M}\}.
+% Used only if \code{Quadratic=TRUE} and \code{FastAlgorithm=FALSE}.
+% }
+\item{SmallNo}{
+ Positive numeric between \code{.Machine$double.eps} and \code{0.0001}.
+ Used to avoid under- or over-flow in the IRLS algorithm.
+ Used only if \code{FastAlgorithm} is \code{TRUE}.
+ }
+ \item{Use.Init.Poisson.QO}{
+ Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO()} is
+ used to obtain initial values for the canonical coefficients \eqn{C}.
+ If \code{FALSE} then random numbers are used instead.
+ }
+ \item{wzepsilon}{
+ Small positive number used to test whether the diagonals of the working
+ weight matrices are sufficiently positive.
+
+ }
+\item{\dots}{ Ignored at present. }
+}
+\details{
+ Recall that the central formula for CQO is
+ \deqn{\eta = B_1^T x_1 + A \nu +
+ \sum_{m=1}^M (\nu^T D_m \nu) e_m}{%
+ eta = B_1^T x_1 + A nu +
+ sum_{m=1}^M (nu^T D_m nu) e_m}
+ where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept),
+ \eqn{x_2}{x_2} is a vector of environmental variables, \eqn{\nu=C^T
+ x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables, \eqn{e_m} is
+ a vector of 0s but with a 1 in the \eqn{m}th position.
+ QRR-VGLMs are an extension of RR-VGLMs and allow for maximum
+ likelihood solutions to constrained quadratic ordination (CQO) models.
+
+% For the fitting of QRR-VGLMs, the default is that the \eqn{C} matrix
+% (containing the \emph{canonical} or \emph{constrained coefficients}
+% corresponding to \eqn{x_2})
+% is constrained by forcing the latent variables to have sample
+% variance-covariance matrix equalling \code{diag(Rank)}, i.e.,
+% unit variance and uncorrelated. The tolerance matrices are, in
+% general, diagonal under such a constraint.
+
+ Having \code{ITolerances=TRUE} means all the tolerance matrices
+ are the order-\eqn{R} identity matrix, i.e., it \emph{forces}
+ bell-shaped curves/surfaces on all species. This results in a
+ more difficult optimization problem (especially for 2-parameter
+ models such as the negative binomial and gamma) because of overflow
+ errors and it appears there are more local solutions. To help avoid
+ the overflow errors, scaling \eqn{C} by the factor \code{Parscale}
+ can help enormously. Even better, scaling \eqn{C} by specifying
+ \code{isdlv} is more understandable to humans. If failure to
+ converge occurs, try adjusting \code{Parscale}, or better, setting
+ \code{EqualTolerances=TRUE} (and hope that the estimated tolerance
+ matrix is positive-definite). To fit an equal-tolerances model, it
+ is firstly best to try setting \code{ITolerances=TRUE} and varying
+ \code{isdlv} and/or \code{MUXfactor} if it fails to converge.
+ If it still fails to converge after many attempts, try setting
+ \code{EqualTolerances=TRUE}, however this will usually be a lot slower
+ because it requires a lot more memory.
+
+ With a \eqn{R>1} model, the latent variables are always uncorrelated,
+ i.e., the variance-covariance matrix of the site scores is a diagonal
+ matrix.
+
+ If setting \code{EqualTolerances=TRUE} is used and the common
+ estimated tolerance matrix is positive-definite then that model is
+ effectively the same as the \code{ITolerances=TRUE} model (the two are
+ transformations of each other). In general, \code{ITolerances=TRUE}
+ is numerically more unstable and presents a more difficult problem
+ to optimize; the arguments \code{isdlv} and/or \code{MUXfactor} often
+ must be assigned some good value(s) (possibly found by trial and error)
+ in order for convergence to occur. Setting \code{ITolerances=TRUE}
+ \emph{forces} a bell-shaped curve or surface onto all the species data,
+ therefore this option should be used with deliberation. If unsuitable,
+ the resulting fit may be very misleading. Usually it is a good idea
+ for the user to set \code{EqualTolerances=FALSE} to see which species
+ appear to have a bell-shaped curve or surface. Improvements to the
+ fit can often be achieved using transformations, e.g., nitrogen
+ concentration to log nitrogen concentration.
+
+ Fitting a CAO model (see \code{\link{cao}}) first is a good idea for
+ pre-examining the data and checking whether it is appropriate to fit
+ a CQO model.
+
+%Suppose \code{FastAlgorithm = FALSE}. In theory (if
+%\code{Eta.range=NULL}), for QRR-VGLMs, the predictors have the values of
+%a quadratic form. However, when \code{Eta.range} is assigned a numerical
+%vector of length 2 (giving the endpoints of an interval), then those
+%values lying outside the interval are assigned the closest boundary
+%value. The \code{Eta.range} argument is provided to help avoid
+%numerical problems associated with the inner minimization problem. A
+%consequence of this is that the fitted values are bounded, e.g., between
+%\code{1/(1+exp(-Eta.range[1]))} and \code{1/(1+exp(-Eta.range[2]))} for
+%binary data (logit link), and greater than \code{exp(Eta.range[1])} for
+%Poisson data (log link). It is suggested that, for binary responses,
+%\code{c(-16, 16)} be used, and for Poisson responses, \code{c(-16, Inf)}
+%be used. The value \code{NULL} corresponds to \code{c(-Inf, Inf)}.
+
+}
+\value{
+ A list with components matching the input names.
+}
+\references{
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology},
+\bold{87}, 203--213.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ When \code{ITolerances=TRUE} it is a good idea to apply
+ \code{\link[base]{scale}} to all the numerical variables that make up
+ the latent variable, i.e., those of \eqn{x_2}{x_2}. This is to make
+ them have mean 0, and hence avoid large offset values which cause
+ numerical problems.
+
+ This function has many arguments that are common with
+ \code{\link{rrvglm.control}} and \code{\link{vglm.control}}.
+
+ It is usually a good idea to try fitting a model with
+ \code{ITolerances=TRUE} first, and if convergence is unsuccessful,
+ then try \code{EqualTolerances=TRUE} and \code{ITolerances=FALSE}.
+ Ordination diagrams with
+ \code{EqualTolerances=TRUE} have a natural interpretation, but
+ with \code{EqualTolerances=FALSE} they are more complicated and
+ requires, e.g., contours to be overlaid on the ordination diagram
+ (see \code{\link{lvplot.qrrvglm}}).
+
+% and/or use the \code{Eta.range} argument.
+
+ In the example below, an equal-tolerances CQO model is fitted to the
+ hunting spiders data. Because \code{ITolerances=TRUE}, it is a good idea
+ to center all the \eqn{x_2} variables first. Upon fitting the model,
+ the actual standard deviation of the site scores are computed. Ideally,
+ the \code{isdlv} argument should have had this value for the best
+ chances of getting good initial values. For comparison, the model is
+ refitted with that value and it should run more faster and reliably.
+}
+
+\section{Warning }{
+
+The default value of \code{Bestof} is a bare minimum for many datasets,
+therefore it will be necessary to increase its value to increase the
+chances of obtaining the global solution.
+
+%Suppose \code{FastAlgorithm = FALSE}.
+%The fitted values of QRR-VGLMs can be restricted to lie between two values
+%in order to help make the computation numerically stable. For some data
+%sets, it may be necessary to use \code{Eta.range} to obtain convergence;
+%however, the fitted values etc. will no longer be accurate, especially at
+%small and/or large values. Convergence is slower when \code{Eta.range}
+%is used to restrict values.
+
+}
+
+\seealso{
+ \code{\link{cqo}},
+ \code{\link{rcqo}},
+ \code{\link{Coef.qrrvglm}},
+ \code{\link{Coef.qrrvglm-class}},
+% \code{\link{rrvglm}},
+% \code{\link{rrvglm.control}},
+% \code{\link{rrvglm.optim.control}},
+ \code{\link[stats]{optim}},
+ \code{\link{binomialff}},
+ \code{\link{poissonff}},
+ \code{\link{negbinomial}},
+ \code{\link{gamma2}},
+ \code{\link{gaussianff}}.
+}
+
+\examples{
+# Poisson CQO with equal tolerances
+data(hspider)
+set.seed(111) # This leads to the global solution
+hspider[,1:6]=scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITolerances = TRUE,
+ fam = quasipoissonff, data = hspider)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+
+(isdlv = sd(lv(p1))) # should be approx isdlv
+
+# Refit the model with better initial values
+set.seed(111) # This leads to the global solution
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITolerances = TRUE, isdlv = isdlv, # Note the use of isdlv here
+ fam = quasipoissonff, data = hspider)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+
+# Negative binomial CQO; smallest deviance is about 275.389
+set.seed(111) # This leads to the global solution
+nb1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
+ fam = negbinomial, data = hspider)
+sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
+summary(nb1)
+\dontrun{
+lvplot(nb1, lcol=1:12, y=TRUE, pcol=1:12)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/qtplot.gumbel.Rd b/man/qtplot.gumbel.Rd
new file mode 100644
index 0000000..5cb03ec
--- /dev/null
+++ b/man/qtplot.gumbel.Rd
@@ -0,0 +1,112 @@
+\name{qtplot.gumbel}
+\alias{qtplot.gumbel}
+\alias{qtplot.egumbel}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Quantile Plot for Gumbel Regression }
+\description{
+ Plots quantiles associated with a Gumbel model.
+}
+\usage{
+qtplot.gumbel(object, plot.it = TRUE,
+ y.arg = TRUE, spline.fit = FALSE, label = TRUE,
+ R = object at misc$R, percentiles = object at misc$percentiles,
+ add.arg = FALSE, mpv = object at misc$mpv,
+ xlab = NULL, ylab = "", main = "",
+ pch = par()$pch, pcol.arg = par()$col,
+ llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+ tcol.arg = par()$col, tadj = 1, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A \pkg{VGAM} extremes model of the
+ Gumbel type, produced by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}} with a family function either
+ \code{"gumbel"} or \code{"egumbel"}. }
+ \item{plot.it}{
+ Logical. Plot it? If \code{FALSE} no plot will be done.
+
+ }
+ \item{y.arg}{ Logical. Add the raw data on to the plot? }
+ \item{spline.fit}{ Logical. Use a spline fit through the fitted
+ percentiles? This can be useful if there are large gaps
+ between some values along the covariate.
+ }
+ \item{label}{ Logical. Label the percentiles? }
+ \item{R}{ See \code{\link{gumbel}}. }
+ \item{percentiles}{ See \code{\link{gumbel}}. }
+ \item{add.arg}{ Logical. Add the plot to an existing plot? }
+ \item{mpv}{ See \code{\link{gumbel}}. }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. }
+ \item{main}{ Title of the plot. See \code{\link[graphics]{title}}. }
+ \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. }
+ \item{pcol.arg}{ Color of the points.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{llty.arg}{ Line type. Line type.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{lcol.arg}{ Color of the lines.
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{llwd.arg}{ Line width.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}.
+ }
+ \item{tcol.arg}{ Color of the text
+ (if \code{label} is \code{TRUE}).
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{tadj}{ Text justification.
+ See the \code{adj} argument of \code{\link[graphics]{par}}.
+ }
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{sub} and \code{las}.
+ }
+
+
+}
+\details{
+ There should be a single covariate such as time.
+ The quantiles specified by \code{percentiles} are plotted.
+}
+\value{
+ The object with a list called \code{qtplot} in the \code{post}
+ slot of \code{object}.
+ (If \code{plot.it=FALSE} then just the list is returned.)
+ The list contains components
+ \item{fitted.values}{ The percentiles of the response,
+ possibly including the MPV. }
+ \item{percentiles }{The percentiles (small vector of values between
+ 0 and 100. }
+}
+%\references{ ~put references to the literature/web site here ~ }
+\author{ Thomas W. Yee }
+\note{
+ Unlike \code{\link{gumbel}}, one cannot have
+ \code{percentiles=NULL}.
+
+}
+\seealso{
+ \code{\link{gumbel}}.
+}
+\examples{
+data(venice)
+y = as.matrix(venice[,paste("r",1:10,sep="")])
+fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
+ data=venice, trace=TRUE, na.action=na.pass)
+fitted(fit1)[1:4,]
+
+\dontrun{
+par(mfrow=c(1,1), bty="l", xpd=TRUE, las=1)
+qtplot(fit1, mpv=TRUE, lcol=c(1,2,5), tcol=c(1,2,5), lwd=2,
+ pcol="blue", tadj=0.4)
+
+qtplot(fit1, perc=97, mpv=FALSE, lcol=3, tcol=3,
+ lwd=2, tadj=0.4, add=TRUE) -> i
+i at post$qtplot$fitted[1:4,]
+}
+}
+\keyword{hplot}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
new file mode 100644
index 0000000..0781c1b
--- /dev/null
+++ b/man/qtplot.lmscreg.Rd
@@ -0,0 +1,82 @@
+\name{qtplot.lmscreg}
+\alias{qtplot.lmscreg}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Quantile Plot for LMS Quantile Regression }
+\description{
+ Plots quantiles associated with a LMS quantile regression.
+}
+\usage{
+qtplot.lmscreg(object, newdata = NULL,
+ percentiles = object at misc$percentiles,
+ plot.it = TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A \pkg{VGAM} quantile regression model, i.e.,
+ an object produced by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}} with a family function beginning with
+ \code{"lms."}, e.g., \code{\link{lms.yjn}}.
+ }
+ \item{newdata}{ Optional data frame for computing the quantiles.
+ If missing, the original data is used.
+ }
+ \item{percentiles}{ Numerical vector with values between 0 and 100
+ that specify the percentiles (quantiles).
+ The default are the percentiles used when the model was fitted.
+ }
+ \item{plot.it}{ Logical. Plot it? If \code{FALSE} no plot will
+ be done. }
+ \item{\dots}{ Graphical parameter that are passed into
+ \code{\link{plotqtplot.lmscreg}}.
+ }
+}
+\details{
+The `primary' variable is defined as the main covariate upon which
+the regression or smoothing is performed. For example, in medical
+studies, it is often the age.
+In \pkg{VGAM}, it is possible to handle more than one covariate, however,
+the primary variable must be the first term after the intercept.
+
+}
+\value{
+ A list with the following components.
+ \item{fitted.values }{A vector of fitted percentile values. }
+ \item{percentiles }{The percentiles used. }
+}
+
+\references{
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ \code{\link{plotqtplot.lmscreg}} does the actual plotting.
+}
+
+\seealso{
+\code{\link{plotqtplot.lmscreg}},
+\code{\link{deplot.lmscreg}},
+\code{\link{lms.bcn}},
+\code{\link{lms.bcg}},
+\code{\link{lms.yjn}}.
+}
+
+\examples{\dontrun{
+data(bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+qtplot(fit)
+qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
+}
+}
+\keyword{hplot}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
new file mode 100644
index 0000000..eef1c95
--- /dev/null
+++ b/man/quasibinomialff.Rd
@@ -0,0 +1,149 @@
+\name{quasibinomialff}
+%\alias{quasibinomial}
+\alias{quasibinomialff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Quasi-Binomial Family Function }
+\description{
+ Family function for fitting generalized linear models to binomial
+ responses, where the dispersion parameters are unknown.
+
+}
+\usage{
+quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
+ parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{ Link function. See \code{\link{Links}} for more choices.
+
+ }
+ \item{mv}{
+ Multivariate response? If \code{TRUE}, then the response is interpreted
+ as \eqn{M} binary responses, where \eqn{M} is the number of columns
+ of the response matrix. In this case, the response matrix should have
+ zero/one values only.
+
+ If \code{FALSE} and the response is a (2-column) matrix, then the
+ number of successes is given in the first column and the second column
+ is the number of failures.
+
+ }
+ \item{onedpar}{
+ One dispersion parameter? If \code{mv}, then a separate dispersion
+ parameter will be computed for each response (column), by default.
+ Setting \code{onedpar=TRUE} will pool them so that there is only one
+ dispersion parameter to be estimated.
+
+ }
+ \item{parallel}{
+ A logical or formula. Used only if \code{mv} is \code{TRUE}. This
+ argument allows for the parallelism assumption whereby the regression
+ coefficients for a variable is constrained to be equal over the \eqn{M}
+ linear/additive predictors.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of
+ the matrix response.
+
+ }
+}
+\details{
+ The final model is not fully estimated by maximum likelihood since the
+ dispersion parameter is unknown (see pp.124--8 of McCullagh and Nelder
+ (1989) for more details).
+
+ A dispersion parameter that is less/greater than unity corresponds to
+ under-/over-dispersion relative to the binomial model. Over-dispersion
+ is more common in practice.
+
+ Setting \code{mv=TRUE} is necessary when fitting a Quadratic RR-VGLM
+ (see \code{\link{cqo}}) because the response will be a matrix of
+ \eqn{M} columns (e.g., one column per species). Then there will be
+ \eqn{M} dispersion parameters (one per column of the response).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ and \code{\link{cao}}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+\author{ Thomas W. Yee }
+\note{
+ If \code{mv} is \code{FALSE} (the default), then the response can be
+ of one of three formats: a factor (first level taken as success), a
+ vector of proportions of success, or a 2-column matrix (first column =
+ successes) of counts. The argument \code{weights} in the modelling
+ function can also be specified. In particular, for a general vector
+ of proportions, you will need to specify \code{weights} because the
+ number of trials is needed.
+
+ If \code{mv} is \code{TRUE}, then the matrix response can only be of
+ one format: a matrix of 1's and 0's (1=success).
+
+ This function is only a front-end to the \pkg{VGAM} family function
+ \code{binomialff()}; indeed, \code{quasibinomialff(...)} is equivalent
+ to \code{binomialff(..., dispersion=0)}. Here, the argument
+ \code{dispersion=0} signifies that the dispersion parameter is to
+ be estimated.
+
+ Regardless of whether the dispersion parameter is to be estimated or
+ not, its value can be seen from the output from the \code{summary()}
+ of the object.
+
+% With the introduction of name spaces for the \pkg{VGAM} package,
+% \code{"ff"} can be dropped for this family function.
+
+}
+\seealso{
+ \code{\link{binomialff}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ \code{\link{cao}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}},
+ \code{\link{poissonff}},
+ \code{\link{quasipoissonff}},
+ \code{\link[stats]{quasibinomial}}.
+}
+\examples{
+quasibinomialff()
+quasibinomialff(link="probit")
+
+# Nonparametric logistic regression
+data(hunua)
+hunua = transform(hunua, a.5 = sqrt(altitude)) # Transformation of altitude
+fit1 = vglm(agaaus ~ poly(a.5, 2), quasibinomialff, hunua)
+fit2 = vgam(agaaus ~ s(a.5), quasibinomialff, hunua)
+\dontrun{
+plot(fit2, se=TRUE, llwd=2, lcol="darkgreen", scol="darkgreen",
+ xlab="sqrt(altitude)",
+ main="GAM and quadratic GLM fitted to species data")
+plotvgam(fit1, se=TRUE, lcol="blue", scol="blue", add=TRUE, llwd=2)
+}
+fit1 at misc$dispersion # dispersion parameter
+logLik(fit1)
+
+# Here, the dispersion parameter defaults to 1
+fit0 = vglm(agaaus ~ poly(a.5, 2), binomialff, hunua)
+fit0 at misc$dispersion # dispersion parameter
+}
+\keyword{models}
+\keyword{regression}
+
+
+%AIC(fit1)
+%AIC(fit0)
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
new file mode 100644
index 0000000..7f67c37
--- /dev/null
+++ b/man/quasipoissonff.Rd
@@ -0,0 +1,129 @@
+\name{quasipoissonff}
+%\alias{quasipoisson}
+\alias{quasipoissonff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Quasi-Poisson Family Function }
+\description{
+ Fits a generalized linear model to a Poisson response, where
+ the dispersion parameter is unknown.
+}
+\usage{
+quasipoissonff(link = "loge", onedpar = FALSE,
+ parallel = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function. See \code{\link{Links}} for more choices.
+
+ }
+ \item{onedpar}{
+ One dispersion parameter? If the response is a matrix,
+ then a separate
+ dispersion parameter will be computed for each response (column),
+ by default.
+ Setting \code{onedpar=TRUE} will pool them so that there is only
+ one dispersion parameter to be estimated.
+
+ }
+ \item{parallel}{
+ A logical or formula. Used only if the response is a matrix.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which linear/additive predictors
+ are modelled as intercepts only. The values must be from the set
+ \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
+ matrix response.
+
+ }
+}
+\details{
+ \eqn{M} defined above is the number of linear/additive predictors.
+
+ If the dispersion parameter is unknown, then the resulting estimate
+ is not fully a maximum likelihood estimate.
+
+ A dispersion parameter that is less/greater than unity corresponds to
+ under-/over-dispersion relative to the Poisson model. Over-dispersion
+ is more common in practice.
+
+ When fitting a Quadratic RR-VGLM, the response is a matrix of \eqn{M},
+ say, columns (e.g., one column per species). Then there will be \eqn{M}
+ dispersion parameters (one per column of the response matrix).
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}},
+ \code{\link{vgam}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ and \code{\link{cao}}.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ This function will handle a matrix response automatically.
+
+ The call \code{poissonff(dispersion=0, ...)} is equivalent to
+ \code{quasipoissonff(...)}. The latter was written so that R users
+ of \code{quasipoisson()} would only need to add a ``\code{ff}''
+ to the end of the family function name.
+
+ Regardless of whether the dispersion parameter is to be estimated or
+ not, its value can be seen from the output from the \code{summary()}
+ of the object.
+
+% With the introduction of name spaces for the \pkg{VGAM} package,
+% \code{"ff"} can be dropped for this family function.
+
+}
+
+\seealso{
+ \code{\link{poissonff}},
+ \code{\link{loge}},
+ \code{\link{rrvglm}},
+ \code{\link{cqo}},
+ \code{\link{cao}},
+ \code{\link{binomialff}},
+ \code{\link{quasibinomialff}},
+ \code{\link[stats]{quasipoisson}}.
+}
+\examples{
+quasipoissonff()
+
+\dontrun{
+n = 200; p = 5; S = 5
+mydata = rcqo(n, p, S, fam="poisson", EqualTol=FALSE)
+myform = attr(mydata, "formula")
+p1 = cqo(myform, fam=quasipoissonff, EqualTol=FALSE, data=mydata)
+sort(p1 at misc$deviance.Bestof) # A history of all the iterations
+lvplot(p1, y=TRUE, lcol=1:S, pch=1:S, pcol=1:S)
+summary(p1) # The dispersion parameters are estimated
+}
+}
+\keyword{models}
+\keyword{regression}
+
+%n = 100
+%x2 = rnorm(n)
+%x3 = rnorm(n)
+%x4 = rnorm(n)
+%lv1 = 0 + x3 - 2*x4
+%lambda1 = exp(3 - 0.5 * (lv1-0)^2)
+%lambda2 = exp(2 - 0.5 * (lv1-1)^2)
+%lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2)
+%y1 = rpois(n, lambda1)
+%y2 = rpois(n, lambda2)
+%y3 = rpois(n, lambda3)
+%p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, quasipoissonff)
+%lvplot(p1, y=TRUE, lcol=1:3, pch=1:3, pcol=1:3)
+%summary(p1) # Three dispersion parameters are estimated
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
new file mode 100644
index 0000000..2e792d6
--- /dev/null
+++ b/man/rayleigh.Rd
@@ -0,0 +1,92 @@
+\name{rayleigh}
+\alias{rayleigh}
+\alias{crayleigh}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Rayleigh Distribution Family Function }
+\description{
+ Estimating the parameter of the Rayleigh distribution by maximum
+ likelihood estimation. Right-censoring is allowed.
+
+}
+\usage{
+rayleigh(link = "loge")
+crayleigh(link ="loge", expected=FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the parameter \eqn{a}.
+ See \code{\link{Links}} for more choices.
+ A log link is the default because \eqn{a} is positive.
+
+ }
+ \item{expected}{
+ Logical. For censored data only, \code{FALSE}
+ means the Newton-Raphson algorithm, and \code{TRUE} means Fisher scoring.
+
+ }
+}
+\details{
+ The Rayleigh distribution, which is used in physics,
+ has a probability density function that can be written
+ \deqn{f(y) = y \exp(-0.5 (y/a)^2)/a^2}{%
+ f(y) = y*exp(-0.5*(y/a)^2)/a^2}
+ for \eqn{y>0} and \eqn{a>0}.
+ The mean of \eqn{Y} is
+ \eqn{a \sqrt{\pi / 2}}{a * sqrt(pi / 2)}
+ and its variance is
+ \eqn{a^2 (4-\pi)/2}{a^2 (4-pi)/2}.
+
+ The \pkg{VGAM} family function \code{crayleigh} handles right-censored
+ data (the true value is greater than the observed value). To indicate
+ which type of censoring, input \code{extra = list(rightcensored = vec2)}
+ where \code{vec2} is a logical vector the same length as the response.
+ If the component of this list is missing then the logical values are
+ taken to be \code{FALSE}. The fitted object has this component stored
+ in the \code{extra} slot.
+
+}
+\section{Warning}{
+ The theory behind the argument \code{expected} is not fully complete.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+\note{ A related distribution is the Maxwell distribution.
+}
+\seealso{
+ \code{\link{Rayleigh}},
+ \code{\link{maxwell}}.
+}
+\examples{
+n = 1000; a = exp(2)
+ystar = rrayleigh(n, a=a)
+fit = vglm(ystar ~ 1, rayleigh, trace=TRUE, crit="c")
+fitted(fit)[1:5]
+mean(ystar)
+coef(fit, matrix=TRUE)
+Coef(fit)
+
+# Censored data
+U = runif(n, 5, 15)
+y = pmin(U, ystar)
+\dontrun{ par(mfrow=c(1,2)); hist(ystar); hist(y); }
+extra = list(rightcensored = ystar > U)
+fit = vglm(y ~ 1, crayleigh, trace=TRUE, extra=extra)
+table(fit at extra$rightcen)
+coef(fit, matrix=TRUE)
+fitted(fit)[1:4,]
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
new file mode 100644
index 0000000..dcb944d
--- /dev/null
+++ b/man/rcqo.Rd
@@ -0,0 +1,380 @@
+\name{rcqo}
+\alias{rcqo}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Constrained Quadratic Ordination }
+\description{
+ Random generation for constrained quadratic ordination (CQO).
+}
+\usage{
+rcqo(n, p, S, Rank = 1,
+ family = c("poisson", "negbinomial", "binomial-poisson",
+ "Binomial-negbinomial", "ordinal-poisson",
+ "Ordinal-negbinomial", "gamma2"),
+ EqualMaxima = FALSE, EqualTolerances = TRUE, ESOptima = FALSE,
+ loabundance = if (EqualMaxima) hiabundance else 10,
+ hiabundance = 100, sdlv = c(1.5/2^(0:3))[1:Rank],
+ sdOptima = ifelse(ESOptima, 1.5/Rank, 1) * sdlv,
+ sdTolerances = 0.25, Kvector = 1, Shape = 1,
+ sqrt = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
+ seed = NULL, Crow1positive=TRUE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{n}{
+ Number of sites. It is denoted by \eqn{n} below.
+
+ }
+ \item{p}{
+ Number of environmental variables, including an intercept term.
+ It is denoted by \eqn{p} below.
+ Must be no less than \eqn{1+R} in value.
+
+ }
+ \item{S}{
+ Number of species.
+ It is denoted by \eqn{S} below.
+
+ }
+ \item{Rank}{
+ The rank or the number of latent variables or true dimension
+ of the data on the reduced space.
+ This must be either 1, 2, 3 or 4.
+ It is denoted by \eqn{R}.
+
+ }
+ \item{family}{
+ What type of species data is to be returned.
+ The first choice is the default.
+ If binomial then a 0 means absence and 1 means presence.
+ If ordinal then the \code{breaks} argument is passed into
+ the \code{breaks} argument of \code{\link[base]{cut}}.
+ Note that either the Poisson or negative binomial distributions
+ are used to generate binomial and ordinal data, and that
+ an upper-case choice is used for the negative binomial distribution
+ (this makes it easier for the user).
+ If \code{"gamma2"} then this is the 2-parameter gamma distribution.
+
+% ,
+% and the resulting values are
+% 1,2,\ldots,\code{breaks} if \code{breaks} is a single integer zz
+% else zz.
+
+ }
+ \item{EqualMaxima}{
+ Logical. Does each species have the same maxima?
+ See arguments \code{loabundance} and \code{hiabundance}.
+
+ }
+ \item{EqualTolerances}{
+ Logical. Does each species have the
+ same tolerance? If \code{TRUE} then the common value is 1 along
+ every latent variable, i.e., all species' tolerance matrices are the
+ order-\eqn{R} identity matrix.
+
+ }
+ \item{ESOptima}{
+ Logical. Do the species have equally spaced optima?
+ If \code{TRUE} then the quantity
+ \eqn{S^{1/R}}{S^(1/R)} must be an
+ integer with value 2 or more. That is, there has to be an
+ appropriate number of species in total. This is so that a grid
+ of optimum values is possible in \eqn{R}-dimensional
+ latent variable space
+ in order to place the species' optima.
+ Also see the argument \code{sdTolerances}.
+
+ }
+ \item{loabundance, hiabundance}{
+ Numeric. These are recycled to a vector of length \eqn{S}.
+ The species have a maximum
+ between \code{loabundance} and \code{hiabundance}. That is,
+ at their optimal environment, the mean abundance of each
+ species is between the two componentwise values. If \code{EqualMaxima}
+ is \code{TRUE} then \code{loabundance} and \code{hiabundance}
+ must have the same values.
+ If \code{EqualMaxima} is \code{FALSE} then the
+ logarithm of the maxima are uniformly distributed between
+ \code{log(loabundance)} and \code{log(hiabundance)}.
+
+ }
+ \item{sdlv}{
+ Numeric, of length \eqn{R}
+ (recycled if necessary). Site scores along
+ each latent variable have these standard deviation values.
+ This must be a decreasing sequence of values because the first
+ ordination axis contains the greatest spread of the species'
+ site scores, followed by the second axis, followed by the third
+ axis, etc.
+
+ }
+ \item{sdOptima}{
+ Numeric, of length \eqn{R} (recycled if necessary).
+ If \code{ESOptima=FALSE} then,
+ for the \eqn{r}th latent variable axis,
+ the optima of the species are generated from a
+ normal distribution centered about 0.
+ If \code{ESOptima=TRUE} then the \eqn{S} optima
+ are equally spaced about 0 along every latent variable axis.
+ Regardless of the value of \code{ESOptima}, the optima
+ are then scaled to give standard deviation \code{sdOptima[r]}.
+
+ }
+ \item{sdTolerances}{
+ Logical. If \code{EqualTolerances=FALSE} then, for the
+ \eqn{r}th latent variable, the
+ species' tolerances are
+ chosen from a normal distribution with mean 1 and
+ standard deviation
+ \code{sdTolerances[r]}.
+ However, the first species \code{y1} has its tolerance matrix
+ set equal to the order-\eqn{R} identity matrix.
+ All tolerance matrices for all species are diagonal in this function.
+ This argument is ignored if \code{EqualTolerances} is \code{TRUE},
+ otherwise it is recycled to length \eqn{R} if necessary.
+
+ }
+ \item{Kvector}{
+ A vector of positive \eqn{k} values (recycled to length \eqn{S}
+ if necessary) for the negative binomial distribution
+ (see \code{\link{negbinomial}} for details).
+ Note that a natural default value does not exist, however the default
+ value here is probably a realistic one, and that for large values
+ of \eqn{\mu} one has \eqn{Var(Y) = \mu^2 / k}{Var(Y) = mu^2 / k}
+ approximately.
+
+ }
+ \item{Shape}{
+ A vector of positive \eqn{\lambda}{lambda} values (recycled to length
+ \eqn{S} if necessary) for the 2-parameter gamma distribution (see
+ \code{\link{gamma2}} for details). Note that a natural default value
+ does not exist, however the default value here is probably a realistic
+ one, and that \eqn{Var(Y) = \mu^2 / \lambda}{Var(Y) = mu^2 / lambda}.
+
+ }
+ \item{sqrt}{
+ Logical. Take the square-root of the negative binomial counts?
+ Assigning \code{sqrt=TRUE} when \code{family="negbinomial"} means
+ that the resulting species data can be considered very crudely to be
+ approximately Poisson distributed.
+ They will not integers in general but much easier (less numerical
+ problems) to estimate using something like \code{cqo(..., family="poissonff")}.
+
+ }
+ \item{Log}{
+ Logical. Take the logarithm of the gamma random variates?
+ Assigning \code{Log=TRUE} when \code{family="gamma2"} means
+ that the resulting species data can be considered very crudely to be
+ approximately Gaussian distributed about its (quadratic) mean.
+ The result is that it is much easier (less numerical
+ problems) to estimate using something like \code{cqo(..., family="gaussianff")}.
+
+ }
+ \item{rhox}{
+ Numeric, less than 1 in absolute value.
+ The correlation between the environmental variables.
+ The correlation matrix is a matrix of 1's along the diagonal
+ and \code{rhox} in the off-diagonals.
+ Note that each environmental variable is normally distributed
+ with mean 0. The standard deviation of each environmental variable
+ is chosen so that the site scores have the determined standard
+ deviation, as given by argument \code{sdlv}.
+
+ }
+ \item{breaks}{
+ If \code{family} is assigned an ordinal value then this argument
+ is used to define the cutpoints. It is fed into the
+ \code{breaks} argument of \code{\link[base]{cut}}.
+
+ }
+ \item{seed}{
+ Optionally, a single value, interpreted as an integer.
+ If given, it is passed into \code{\link[base:Random]{set.seed}}.
+ This argument can be used to obtain reproducible results.
+
+ }
+ \item{Crow1positive}{
+ See \code{\link{qrrvglm.control}} for details.
+
+ }
+}
+\details{
+ This function generates data coming from a constrained quadratic
+ ordination (CQO) model. In particular,
+ data coming from a \emph{species packing model} can be generated
+ with this function.
+ The species packing model states that species have equal tolerances,
+ equal maxima, and optima which are uniformly distributed over
+ the latent variable space. This can be achieved by assigning
+ the arguments \code{ESOptima = TRUE}, \code{EqualMaxima = TRUE},
+ \code{EqualTolerances = TRUE}.
+
+ At present, the Poisson and negative binomial abundances are
+ generated first using \code{loabundance} and \code{hiabundance},
+ and if \code{family} is binomial or ordinal then it is converted into
+ these forms.
+
+ In CQO theory the \eqn{n \times p}{n * p}
+ matrix \eqn{X} is partitioned
+ into two parts \eqn{X_1} and \eqn{X_2}. The matrix
+ \eqn{X_2} contains the `real' environmental variables whereas
+ the variables in \eqn{X_1} are just for adjustment purposes;
+ they contain the intercept terms and other variables that one
+ wants to adjust for when (primarily) looking at the
+ variables in \eqn{X_2}.
+ This function has \eqn{X_1} only being a matrix of ones,
+ i.e., containing an intercept only.
+
+}
+\value{
+ A \eqn{n \times (p-1+S)}{n * (p-1+S)} data frame with
+ components and attributes.
+ In the following the attributes are labelled with double
+ quotes.
+ \item{x2, x3, x4, \ldots, xp}{
+ The environmental variables. This makes up the
+ \eqn{n \times (p-1)}{n * (p-1)} \eqn{X_2} matrix.
+ Note that \code{x1} is not present; it is effectively a vector
+ of ones since it corresponds to an intercept term when
+ \code{\link{cqo}} is applied to the data.
+
+ }
+ \item{y1, y2, x3, \ldots, yS}{
+ The species data. This makes up the
+ \eqn{n \times S}{n * S} matrix \eqn{Y}.
+ This will be of the form described by the argument
+ \code{family}.
+
+ }
+ \item{"ccoefficients"}{
+ The \eqn{(p-1) \times R}{(p-1) * R} matrix of
+ constrained coefficients
+ (or canonical coefficients).
+ These are also known as weights or loadings.
+
+ }
+ \item{"formula"}{
+ The formula involving the species and environmental variable names.
+ This can be used directly in the \code{formula} argument of
+ \code{\link{cqo}}.
+
+ }
+ \item{"logmaxima"}{
+ The \eqn{S}-vector of species' maxima, on a log scale.
+ These are uniformly distributed between
+ \code{log(loabundance)} and \code{log(hiabundance)}.
+
+
+ }
+ \item{"lv"}{
+ The \eqn{n \times R}{n * R} matrix of site scores.
+ Each successive column (latent variable) has
+ sample standard deviation
+ equal to successive values of \code{sdlv}.
+
+ }
+ \item{"optima"}{
+ The \eqn{S \times R}{S * R} matrix of species' optima.
+
+ }
+ \item{"tolerances"}{
+ The \eqn{S \times R}{S * R} matrix of species' tolerances.
+ These are the
+ square root of the diagonal elements of the tolerance matrices
+ (recall that all tolerance matrices are restricted to being
+ diagonal in this function).
+
+ }
+ Other attributes are \code{"break"},
+ \code{"family"}, \code{"Rank"},
+ \code{"loabundance"}, \code{"hiabundance"},
+ \code{"EqualTolerances"}, \code{"EqualMaxima"},
+ \code{"seed"}, as inputted.
+
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+ter Braak, C. J. F. and Prentice, I. C. (1988)
+A theory of gradient analysis.
+\emph{Advances in Ecological Research},
+\bold{18}, 271--317.
+
+}
+\author{ T. W. Yee }
+\note{
+ This function is under development and is not finished yet.
+ There may be a few bugs.
+
+ Yet to do: add an argument that allows absences to be equal
+ to the first level if ordinal data is requested.
+
+}
+\seealso{
+ \code{\link{cqo}},
+ \code{\link{qrrvglm.control}},
+ \code{\link[base]{cut}},
+ \code{\link{binomialff}},
+ \code{\link{poissonff}},
+ \code{\link{negbinomial}},
+ \code{\link{gamma2}},
+ \code{gaussianff}.
+}
+\examples{
+
+# Example 1: Species packing model:
+n = 100; p = 5; S = 5
+mydata = rcqo(n, p, S, ESOpt=TRUE, EqualMax=TRUE)
+names(mydata)
+myform = attr(mydata, "formula")
+fit = cqo(myform, fam=poissonff, ITol=TRUE, data=mydata,
+ Bestof=3) # Fit a CQO model to the data
+\dontrun{
+matplot(attr(mydata, "lv"), mydata[,-(1:(p-1))], col=1:S)
+persp(fit, col=1:S, add=TRUE)
+lvplot(fit, lcol=1:S, y=TRUE, pcol=1:S) # The same plot as above
+}
+
+# Compare the fitted model with the 'truth'
+ccoef(fit) # The fitted model
+attr(mydata, "ccoefficients") # The 'truth'
+
+c(sd(attr(mydata, "lv")), sd(lv(fit))) # Both values should be approx equal
+
+
+# Example 2: negative binomial data fitted using a Poisson model:
+n = 200; p = 5; S = 5
+mydata = rcqo(n, p, S, fam="negbin", sqrt=TRUE)
+myform = attr(mydata, "formula")
+fit = cqo(myform, fam=poissonff, ITol=TRUE, dat=mydata)
+\dontrun{
+lvplot(fit, lcol=1:S, y=TRUE, pcol=1:S)
+}
+# Compare the fitted model with the 'truth'
+ccoef(fit) # The fitted model
+attr(mydata, "ccoefficients") # The 'truth'
+
+
+# Example 3: gamma2 data fitted using a Gaussian model:
+n = 200; p = 5; S = 3
+mydata = rcqo(n, p, S, fam="gamma2", Log=TRUE)
+fit = cqo(attr(mydata, "formula"), fam=gaussianff, ITol=TRUE, dat=mydata)
+\dontrun{
+matplot(attr(mydata, "lv"), exp(mydata[,-(1:(p-1))]), col=1:S) # 'raw' data
+lvplot(fit, lcol=1:S, y=TRUE, pcol=1:S) # Fitted model to transformed data
+}
+# Compare the fitted model with the 'truth'
+ccoef(fit) # The fitted model
+attr(mydata, "ccoefficients") # The 'truth'
+}
+\keyword{distribution}
+
+
diff --git a/man/rdiric.Rd b/man/rdiric.Rd
new file mode 100644
index 0000000..44fbd39
--- /dev/null
+++ b/man/rdiric.Rd
@@ -0,0 +1,61 @@
+\name{rdiric}
+\alias{rdiric}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ The Dirichlet distribution }
+\description{
+ Generates Dirichlet random variates.
+
+}
+\usage{
+rdiric(n, shape, dimension = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{n}{ number of observations. }
+ \item{shape}{
+ the shape parameters. These must be positive.
+ If \code{dimension} is specifed, values
+ are recycled if necessary to length \code{dimension}.
+
+ }
+ \item{dimension}{
+ the dimension of the distribution.
+ If \code{dimension} is not numeric then it is taken to be
+ \code{length(shape)}.
+
+ }
+}
+\details{
+ This function is based on a relationship between the gamma and
+ Dirichlet distribution. Random gamma variates are generated, and
+ then Dirichlet random variates are formed from these.
+
+}
+\value{
+ A \code{n} by \code{dimension} matrix of Dirichlet random variates.
+ Each element is positive, and each row will sum to unity.
+
+}
+
+\references{
+Lange, K. (2002)
+\emph{Mathematical and Statistical Methods for Genetic Analysis},
+2nd ed.
+New York: Springer-Verlag.
+
+}
+\author{ Thomas W. Yee }
+\seealso{
+ \code{\link{dirichlet}} is a \pkg{VGAM} family function for
+ fitting a Dirichlet distribution to data.
+
+}
+
+\examples{
+y = rdiric(n=1000, shape=c(3, 1, 4))
+fit = vglm(y ~ 1, dirichlet, trace = TRUE, crit="c")
+Coef(fit)
+coef(fit, matrix=TRUE)
+}
+\keyword{distribution}
+
diff --git a/man/recexp1.Rd b/man/recexp1.Rd
new file mode 100644
index 0000000..e0de865
--- /dev/null
+++ b/man/recexp1.Rd
@@ -0,0 +1,77 @@
+\name{recexp1}
+\alias{recexp1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Upper Record Values from a 1-parameter Exponential Distribution }
+\description{
+ Maximum likelihood estimation of the rate parameter of a
+ 1-parameter exponential distribution when the observations are upper
+ record values.
+}
+\usage{
+recexp1(lrate="loge", irate=NULL, method.init=1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lrate}{
+ Link function applied to the rate parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{irate}{
+ Numeric. Optional initial values for the rate.
+ The default value \code{NULL} means they are computed internally,
+ with the help of \code{method.init}.
+
+ }
+ \item{method.init}{
+ Integer, either 1 or 2 or 3. Initial method, three algorithms are
+ implemented. Choose the another value if convergence fails, or use
+ \code{irate}.
+
+ }
+}
+\details{
+ The response must be a vector or one-column matrix with strictly
+ increasing values.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+ Arnold, B. C. and Balakrishnan, N. and Nagaraja, H. N. (1998)
+ \emph{Records},
+ New York: John Wiley & Sons.
+
+}
+\author{ T. W. Yee }
+\note{
+ By default, this family function has the intercept-only MLE as the
+ initial value, therefore convergence may only take one iteration.
+ Fisher scoring is used.
+
+}
+
+\seealso{
+ \code{\link{exponential}}.
+}
+\examples{
+rawy = rexp(n <- 10000, rate=exp(1))
+
+# Keep only the records
+delete = c(FALSE, rep(TRUE, len=n-1))
+for(i in 2:length(rawy))
+ if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
+(y = rawy[!delete])
+
+length(y) / y[length(y)] # MLE of rate
+
+fit = vglm(y ~ 1, recexp1, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/reciprocal.Rd b/man/reciprocal.Rd
new file mode 100644
index 0000000..494e7c0
--- /dev/null
+++ b/man/reciprocal.Rd
@@ -0,0 +1,102 @@
+\name{reciprocal}
+\alias{reciprocal}
+\alias{nreciprocal}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Reciprocal link function }
+\description{
+ Computes the reciprocal transformation, including its inverse and the
+ first two derivatives.
+}
+\usage{
+reciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+nreciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse.arg}{
+ Logical. If \code{TRUE} the inverse function is computed
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+ }
+}
+\details{
+ The \code{reciprocal} link function is a special case of the power link
+ function. Numerical values of \code{theta} close to 0 result in
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The arguments
+ \code{short} and \code{tag} are used only if \code{theta} is character.
+
+ The \code{nreciprocal} link function computes the negative reciprocal,
+ i.e., \eqn{-1/ \theta}{-1/theta}.
+
+}
+\value{
+ For \code{reciprocal}:
+ for \code{deriv = 0}, the reciprocal of \code{theta}, i.e.,
+ \code{1/theta} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{1/theta}.
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+}
+%\section{Warning}{
+%}
+\author{ Thomas W. Yee }
+
+\note{ Numerical instability may occur when \code{theta} is
+close to 0.
+}
+
+\seealso{
+ \code{\link{identity}},
+ \code{powl}.
+ }
+\examples{
+reciprocal(1:5)
+reciprocal(1:5, inverse=TRUE, deriv=2)
+nreciprocal(1:5)
+nreciprocal(1:5, inverse=TRUE, deriv=2)
+
+x = (-3):3
+reciprocal(x) # Has Inf
+reciprocal(x, earg=list(bvalue= .Machine$double.eps)) # Has no Inf
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
diff --git a/man/recnormal1.Rd b/man/recnormal1.Rd
new file mode 100644
index 0000000..9511207
--- /dev/null
+++ b/man/recnormal1.Rd
@@ -0,0 +1,97 @@
+\name{recnormal1}
+\alias{recnormal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Upper Record Values from a Univariate Normal Distribution }
+\description{
+ Maximum likelihood estimation of the two parameters of a
+ univariate normal distribution when the observations are upper
+ record values.
+}
+\usage{
+recnormal1(lmean="identity", lsd="loge",
+ imean=NULL, isd=NULL, method.init=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmean, lsd}{
+ Link functions applied to the mean and sd parameters.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{imean, isd}{
+ Numeric. Optional initial values for the mean and sd.
+ The default value \code{NULL} means they are computed internally,
+ with the help of \code{method.init}.
+
+ }
+ \item{method.init}{
+ Integer, either 1 or 2 or 3. Initial method, three algorithms are
+ implemented. Choose the another value if convergence fails, or use
+ \code{imean} and/or \code{isd}.
+
+ }
+ \item{zero}{
+ An integer vector, containing the value 1 or 2. If so, the mean or
+ standard deviation respectively are modelled as an intercept only.
+ Usually, setting \code{zero=2} will be used, if used at all.
+ The default value \code{NULL} means both linear/additive predictors
+ are modelled as functions of the explanatory variables.
+
+ }
+}
+\details{
+ The response must be a vector or one-column matrix with strictly
+ increasing values.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+ Arnold, B. C. and Balakrishnan, N. and Nagaraja, H. N. (1998)
+ \emph{Records},
+ New York: John Wiley & Sons.
+
+}
+\author{ T. W. Yee }
+\note{
+ This family function tries to solve a difficult problem, and the
+ larger the data set the better.
+ Convergence failure can commonly occur, and
+ convergence may be very slow, so set \code{maxit=200, trace=TRUE}, say.
+ Inputting good initial values are advised.
+
+ This family function uses the BFGS quasi-Newton update formula for the
+ working weight matrices. Consequently the estimated variance-covariance
+ matrix may be inaccurate or simply wrong! The standard errors must be
+ therefore treated with caution; these are computed in functions such
+ as \code{vcov()} and \code{summary()}.
+
+}
+
+\seealso{
+ \code{\link{normal1}},
+ \code{\link{dcnormal1}}.
+}
+\examples{
+n = 10000
+mymean = 100
+# First value is reference value or trivial record
+rawy = c(mymean, rnorm(n, me=mymean, sd=16))
+
+# Keep only observations that are records
+delete = c(FALSE, rep(TRUE, len=n))
+for(i in 2:length(rawy))
+ if(rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE
+(y = rawy[!delete])
+
+fit = vglm(y ~ 1, recnormal1, trace=TRUE, maxit=200)
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/rhobit.Rd b/man/rhobit.Rd
new file mode 100644
index 0000000..133ac41
--- /dev/null
+++ b/man/rhobit.Rd
@@ -0,0 +1,111 @@
+\name{rhobit}
+\alias{rhobit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Rhobit Link Function }
+\description{
+ Computes the rhobit link transformation, including its inverse and the
+ first two derivatives.
+}
+\usage{
+rhobit(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{earg}{
+ Optional list. Extra argument for passing in additional information.
+ Values of \code{theta} which are less than or equal to -1 can be
+ replaced by the \code{bminvalue} component of the list \code{earg}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by the \code{bmaxvalue} component of the list \code{earg}
+ before computing the link function value.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+ }
+ \item{inverse}{
+ Logical. If \code{TRUE} the inverse function is computed.
+
+ }
+ \item{deriv}{
+ Order of the derivative. Integer with value 0, 1 or 2.
+
+ }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}. }
+
+}
+\details{
+ The \code{rhobit} link function is commonly used for parameters that
+ lie between \eqn{-1} and \eqn{1}. Numerical values of \code{theta}
+ close to \eqn{-1} or \eqn{1} or out of range result in \code{Inf},
+ \code{-Inf}, \code{NA} or \code{NaN}. The arguments \code{short}
+ and \code{tag} are used only if \code{theta} is character.
+
+}
+\value{
+ For \code{deriv = 0}, the rhobit of \code{theta}, i.e., \code{log((1 +
+ theta)/(1 - theta))} when \code{inverse = FALSE}, and if \code{inverse =
+ TRUE} then \code{(exp(theta) - 1)/(exp(theta) + 1)}.
+
+ For \code{deriv = 1}, then the function returns \emph{d} \code{theta}
+ / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse =
+ FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal.
+
+}
+\references{
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is close to \eqn{-1} or \eqn{1}.
+ One way of overcoming this is to use \code{earg}.
+
+ The correlation parameter of a standard bivariate normal distribution
+ lies between \eqn{-1} and \eqn{1}, therefore this function can be used
+ for modelling this parameter as a function of explanatory variables.
+
+ The link function \code{rhobit} is very similar to
+ \code{\link{fisherz}}, e.g., just twice the value of
+ \code{\link{fisherz}}.
+
+}
+\seealso{
+ \code{\link{Links}},
+ \code{\link{binom2.rho}},
+ \code{\link{fisherz}}.
+}
+
+\examples{
+theta = seq(-0.99, 0.99, by=0.01)
+y = rhobit(theta)
+\dontrun{
+plot(theta, y, type="l", las=1, ylab="", main="rhobit(theta)")
+abline(v=0, h=0, lty=2)
+}
+
+x = c(seq(-1.02, -0.98, by=0.01), seq(0.97, 1.02, by=0.01))
+rhobit(x) # Has NAs
+rhobit(x, earg=list(bminvalue= -1 + .Machine$double.eps,
+ bmaxvalue= 1 - .Machine$double.eps)) # Has no NAs
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/rig.Rd b/man/rig.Rd
new file mode 100644
index 0000000..20a632a
--- /dev/null
+++ b/man/rig.Rd
@@ -0,0 +1,55 @@
+\name{rig}
+\alias{rig}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Reciprocal Inverse Gaussian distribution }
+\description{
+ Estimation of the parameters of a
+ reciprocal inverse Gaussian distribution.
+
+}
+\usage{
+rig(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu, llambda}{
+ Link functions for \code{mu} and \code{lambda}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{imu, ilambda}{
+ Initial values for \code{mu} and \code{lambda}.
+ A \code{NULL} means a value is computed internally.
+
+ }
+}
+\details{
+ See Jorgensen (1997) for details.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Jorgensen, B. (1997)
+\emph{The Theory of Dispersion Models}.
+London: Chapman & Hall
+}
+\author{ T. W. Yee }
+\note{
+ This distribution is potentially useful for dispersion modelling.
+
+}
+\seealso{
+ \code{\link{simplex}}.
+}
+\examples{
+y = rchisq(n=100, df=14) # Not 'proper' data!!
+fit = vglm(y ~ 1, rig, trace=TRUE)
+fit = vglm(y ~ 1, rig, trace=TRUE, eps=1e-9, cri="c")
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/rlplot.egev.Rd b/man/rlplot.egev.Rd
new file mode 100644
index 0000000..2b8640f
--- /dev/null
+++ b/man/rlplot.egev.Rd
@@ -0,0 +1,142 @@
+\name{rlplot.egev}
+\alias{rlplot.egev}
+\alias{rlplot.gev}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Return Level Plot for GEV Fits }
+\description{
+ A return level plot is constructed for a GEV-type model.
+}
+\usage{
+rlplot.egev(object, plot.it = TRUE,
+ probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
+ add.arg = FALSE, xlab = "Return Period", ylab = "Return Level",
+ main = "Return Level Plot",
+ pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
+ llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+ slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
+ ylim = NULL, Log = TRUE, CI = TRUE, epsilon = 1e-05, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ A \pkg{VGAM} extremes model of the
+ GEV-type, produced by \code{\link{vglm}}
+ with a family function either
+ \code{"gev"} or \code{"egev"}.
+
+ }
+ \item{plot.it}{
+ Logical. Plot it? If \code{FALSE} no plot will be done.
+
+ }
+
+ \item{probability}{
+ Numeric vector of probabilities used.
+
+ }
+ \item{add.arg}{ Logical. Add the plot to an existing plot? }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
+ \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. }
+ \item{main}{ Title of the plot. See \code{\link[graphics]{title}}. }
+ \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. }
+ \item{pcol.arg}{ Color of the points.
+ See the \code{col} argument of \code{\link[graphics]{par}}. }
+ \item{pcex}{ Character expansion of the points.
+ See the \code{cex} argument of \code{\link[graphics]{par}}. }
+ \item{llty.arg}{ Line type. Line type.
+ See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ \item{lcol.arg}{ Color of the lines.
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+ }
+ \item{llwd.arg}{ Line width.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}.
+ }
+ \item{slty.arg, scol.arg, slwd.arg}{
+ Correponding arguments for the lines used for the
+ confidence intervals. Used only if \code{CI=TRUE}.
+ }
+ \item{ylim}{ Limits for the y-axis. Numeric of length 2. }
+ \item{Log}{ Logical. If \code{TRUE} then \code{log=""} otherwise
+ \code{log="x"}. This changes the labelling of the x-axis only.
+ }
+ \item{CI}{ Logical. Add in a 95 percent confidence interval? }
+ \item{epsilon}{
+ Numeric, close to zero. Used for the finite-difference
+ approximation to the first derivatives with respect to
+ each parameter. If too small, numerical problems will occur.
+
+ }
+ \item{\dots}{
+ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{sub} and \code{las}.
+
+ }
+}
+\details{
+ A return level plot plots \eqn{z_p}{zp} versus
+ \eqn{\log(y_p)}{log(yp)}. It is linear if the shape parameter
+ \eqn{\xi=0}{xi=0}. If \eqn{\xi<0}{xi<0} then the plot is convex
+ with asymptotic limit as \eqn{p} approaches zero at
+ \eqn{\mu-\sigma / \xi}{mu-sigma/xi}. And if
+ \eqn{\xi>0}{xi>0} then the plot is concave and has no finite bound.
+ Here, \eqn{G(z_p) = 1-p}{G(zp) = 1-p} where \eqn{0<p<1}
+ (\eqn{p} corresponds to the argument \code{probability})
+ and \eqn{G} is the cumulative distribution function of the
+ GEV distribution. The quantity \eqn{z_p}{zp} is known as the
+ \emph{return level} associated with the \emph{return period}
+ \eqn{1/p}. For many applications, this means \eqn{z_p}{zp}
+ is exceeded by the annual
+ maximum in any particular year with probability \eqn{p}.
+
+ The points in the plot are the actual data.
+
+
+}
+\value{
+ In the \code{post} slot of the object is a list called
+ \code{rlplot} with list components
+ \item{yp}{\code{-log(probability)}, which is used on the x-axis. }
+ \item{zp}{values which are used for the y-axis}
+ \item{lower, upper}{lower and upper confidence limits for the
+ 95 percent confidence intervals evaluated at the values of
+ \code{probability} (if \code{CI=TRUE}). }
+}
+\references{
+
+Coles, S. (2001)
+\emph{An Introduction to Statistical Modeling of Extreme Values}.
+London: Springer-Verlag.
+}
+
+\author{ T. W. Yee }
+\note{
+ The confidence intervals are approximate, being
+ based on finite-difference approximations to derivatives.
+
+}
+
+\seealso{
+ \code{\link{egev}}.
+}
+
+\examples{
+y = rgev(n <- 100, scale=exp(1), shape = -0.1)
+fit = vglm(y ~ 1, egev, trace=TRUE)
+
+# Identity link for all parameters:
+fit2 = vglm(y ~ 1, egev(lsh=identity, lsc=identity, isc=10), trace=TRUE)
+\dontrun{
+par(mfrow=c(1,2))
+rlplot(fit) -> i1
+rlplot(fit2, pcol="darkorange", lcol="blue", Log=FALSE,
+ scol="darkgreen", slty="dashed") -> i2
+range(i2 at post$rlplot$upper - i1 at post$rlplot$upper) # Should be near 0
+range(i2 at post$rlplot$lower - i1 at post$rlplot$lower) # Should be near 0
+}
+}
+\keyword{hplot}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/rposnegbin.Rd b/man/rposnegbin.Rd
new file mode 100644
index 0000000..3ff2fa8
--- /dev/null
+++ b/man/rposnegbin.Rd
@@ -0,0 +1,85 @@
+\name{rposnegbin}
+\alias{rposnegbin}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive-negative binomial distribution random variates }
+\description{
+ Generate random variates from a positive-negative binomial distribution.
+}
+\usage{
+rposnegbin(n, munb, k)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{n}{
+ number of random values to return.
+
+ }
+ \item{munb}{
+ vector of positive means (of an ordinary negative binomial distribution).
+
+ }
+ \item{k}{
+ vector of positive index parameters (of an ordinary negative binomial distribution).
+ This is called the \code{size} argument in
+ \code{\link[stats:NegBinomial]{rnbinom}}.
+
+% This is called \eqn{\theta}{theta} in the \code{\link[MASS]{rnegbin}}
+% function in the \code{MASS} library.
+
+ Short vectors are recycled.
+ The parameter \code{1/k} is known as a dispersion parameter;
+ as \code{k} approaches infinity, the negative binomial distribution
+ approaches a Poisson distribution.
+
+ }
+}
+\details{
+ The positive-negative binomial distribution is a negative binomial
+ distribution but with
+ the probability of a zero being zero. The other probabilities are scaled
+ to add to unity.
+ The mean therefore is
+ \deqn{\mu / (1-p(0))}{%
+ munb / (1-p(0))}
+ where \eqn{\mu}{munb} the mean of an ordinary negative binomial distribution.
+ The arguments of the function are fed into
+ \code{\link[stats:NegBinomial]{rnbinom}} until \eqn{n} positive values
+ are obtained.
+
+}
+\value{
+ \eqn{n} random deviates are returned.
+
+}
+\references{
+Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
+D. B. (1996)
+Modelling the abundances of rare species: statistical models
+for counts with extra zeros.
+\emph{Ecological Modelling},
+\bold{88},
+297--308.
+
+}
+\author{ T. W. Yee }
+\note{
+The running time is slow when \code{munb} is very close to zero.
+
+}
+
+\seealso{
+% \code{\link[MASS]{rnegbin}},
+ \code{\link[stats:NegBinomial]{rnbinom}},
+ \code{\link{posnegbinomial}},
+ \code{\link{zanegbinomial}}.
+
+}
+\examples{
+munb = 2; k = 4; n = 1000
+y = rposnegbin(n, munb=munb, k=k)
+table(y)
+mean(y) # sample mean
+munb / (1 - (k/(k+munb))^k) # population mean
+}
+\keyword{distribution}
+
diff --git a/man/rrar.Rd b/man/rrar.Rd
new file mode 100644
index 0000000..0a427a6
--- /dev/null
+++ b/man/rrar.Rd
@@ -0,0 +1,105 @@
+\name{rrar}
+\alias{rrar}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Nested reduced-rank autoregressive models for multiple
+time series }
+\description{
+ Estimates the parameters of a
+ nested reduced-rank autoregressive model for multiple
+ time series.
+}
+\usage{
+rrar(Ranks = 1, coefstart = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{Ranks}{ Vector of integers: the ranks of the model.
+ Each value must be at least one and no more than \code{M},
+ where \code{M} is the number of response variables in the time series.
+ The length of \code{Ranks} is the \emph{lag}, which is often denoted by
+ the symbol \emph{L} in the literature. }
+ \item{coefstart}{ Optional numerical vector of initial values for the
+ coefficients.
+ By default, the family function chooses these automatically. }
+}
+\details{
+ Full details are given in Ahn and Reinsel (1988).
+ Convergence may be very slow, so setting \code{maxits=50}, say, may help.
+ If convergence is not obtained, you might like to try inputting different
+ initial values.
+
+ Setting \code{trace=TRUE} in \code{\link{vglm}} is useful for monitoring
+ the progress at each iteration.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Ahn, S. and Reinsel, G. C. (1988)
+Nested reduced-rank autoregressive models for multiple
+time series.
+\emph{Journal of the American Statistical Association},
+\bold{83}, 849--856.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ T. W. Yee }
+\note{
+This family function should
+be used within \code{\link{vglm}} and
+not with \code{\link{rrvglm}} because
+it does not fit into the RR-VGLM framework exactly. Instead, the
+reduced-rank model is formulated as a VGLM!
+
+A methods function \code{Coef.rrar}, say, has yet to be written.
+It would return the quantities
+\code{Ak1},
+\code{C},
+\code{D},
+\code{omegahat},
+\code{Phi},
+etc. as slots, and then \code{print.Coef.rrar} would also need to be
+written.
+
+}
+
+\seealso{
+\code{\link{vglm}},
+\code{\link{usagrain}}.
+}
+\examples{
+\dontrun{
+data(usagrain)
+year = seq(1961+1/12, 1972+10/12, by=1/12)
+par(mar=c(4,4,2,2)+0.1, mfrow=c(2,2))
+for(i in 1:4) {
+ plot(year, usagrain[,i], main=names(usagrain)[i], type="l", xlab="", ylab="")
+ points(year, usagrain[,i], pch="*")
+}
+apply(usagrain, 2, mean) # mu vector
+cgrain = scale(usagrain, scale=FALSE) # Center the time series only
+fit = vglm(cgrain ~ 1, rrar(Ranks=c(4,1)), trace=TRUE)
+summary(fit)
+
+print(fit at misc$Ak1, dig=2)
+print(fit at misc$Cmatrices, dig=3)
+print(fit at misc$Dmatrices, dig=3)
+print(fit at misc$omegahat, dig=3)
+print(fit at misc$Phimatrices, dig=2)
+
+par(mar=c(4,4,2,2)+0.1, mfrow=c(4,1))
+for(i in 1:4) {
+ plot(year, fit at misc$Z[,i], main=paste("Z", i, sep=""),
+ type="l", xlab="", ylab="")
+ points(year, fit at misc$Z[,i], pch="*")
+}
+}
+}
+\keyword{ts}
+\keyword{regression}
+\keyword{models}
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
new file mode 100644
index 0000000..4dd7506
--- /dev/null
+++ b/man/rrvglm-class.Rd
@@ -0,0 +1,250 @@
+\name{rrvglm-class}
+\docType{class}
+\alias{rrvglm-class}
+\title{Class ``rrvglm'' }
+\description{
+Reduced-rank vector generalized linear models.
+}
+\section{Objects from the Class}{
+Objects can be created by calls to \code{\link{rrvglm}}.
+}
+\section{Slots}{
+ \describe{
+ \item{\code{extra}:}{
+ Object of class \code{"list"};
+ the \code{extra} argument on entry to \code{vglm}. This
+ contains any extra information that might be needed
+ by the family function.
+ }
+ \item{\code{family}:}{
+ Object of class \code{"vglmff"}.
+ The family function. }
+ \item{\code{iter}:}{
+ Object of class \code{"numeric"}.
+ The number of IRLS iterations used.
+ }
+ \item{\code{predictors}:}{
+ Object of class \code{"matrix"}
+ with \eqn{M} columns which holds the \eqn{M} linear predictors.
+ }
+ \item{\code{assign}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ This named list gives information matching the columns and the
+ (LM) model matrix terms.
+ }
+ \item{\code{call}:}{
+ Object of class \code{"call"}, from class \code{ "vlm"}.
+ The matched call.
+ }
+ \item{\code{coefficients}:}{
+ Object of class
+ \code{"numeric"}, from class \code{ "vlm"}.
+ A named vector of coefficients.
+ }
+ \item{\code{constraints}:}{
+ Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ A named list of constraint matrices used in the fitting.
+ }
+ \item{\code{contrasts}:}{
+ Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ The contrasts used (if any).
+ }
+ \item{\code{control}:}{
+ Object of class \code{"list"}, from class
+ \code{ "vlm"}.
+ A list of parameters for controlling the fitting process.
+ See \code{\link{vglm.control}} for details.
+ }
+ \item{\code{criterion}:}{
+ Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ List of convergence criterion evaluated at the
+ final IRLS iteration.
+ }
+ \item{\code{df.residual}:}{
+ Object of class
+ \code{"numeric"}, from class \code{ "vlm"}.
+ The residual degrees of freedom.
+ }
+ \item{\code{df.total}:}{
+ Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+ The total degrees of freedom.
+ }
+ \item{\code{dispersion}:}{
+ Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+ The scaling parameter.
+ }
+ \item{\code{effects}:}{
+ Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+ The effects.
+ }
+ \item{\code{fitted.values}:}{
+ Object of class
+ \code{"matrix"}, from class \code{ "vlm"}.
+ The fitted values. This may be missing or consist entirely
+ of \code{NA}s, e.g., the Cauchy model.
+ }
+ \item{\code{misc}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ A named list to hold miscellaneous parameters.
+ }
+ \item{\code{model}:}{
+ Object of class \code{"data.frame"},
+ from class \code{ "vlm"}.
+ The model frame.
+ }
+ \item{\code{na.action}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ A list holding information about missing values.
+ }
+ \item{\code{offset}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ If non-zero, a \eqn{M}-column matrix of offsets.
+ }
+ \item{\code{post}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}
+ where post-analysis results may be put.
+ }
+ \item{\code{preplot}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}
+ used by \code{\link{plotvgam}}; the plotting parameters
+ may be put here.
+ }
+ \item{\code{prior.weights}:}{
+ Object of class
+ \code{"numeric"}, from class \code{ "vlm"}
+ holding the initially supplied weights.
+ }
+ \item{\code{qr}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ QR decomposition at the final iteration.
+ }
+ \item{\code{R}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ The \bold{R} matrix in the QR decomposition used in the fitting.
+ }
+ \item{\code{rank}:}{
+ Object of class \code{"integer"},
+ from class \code{ "vlm"}.
+ Numerical rank of the fitted model.
+ }
+ \item{\code{residuals}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ The \emph{working} residuals at the final IRLS iteration.
+ }
+ \item{\code{rss}:}{
+ Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+ Residual sum of squares at the final IRLS iteration with
+ the adjusted dependent vectors and weight matrices.
+ }
+ \item{\code{smart.prediction}:}{
+ Object of class
+ \code{"list"}, from class \code{ "vlm"}.
+ A list of data-dependent parameters (if any)
+ that are used by smart prediction.
+ }
+ \item{\code{terms}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ The \code{\link[stats]{terms}} object used.
+ }
+ \item{\code{weights}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ The weight matrices at the final IRLS iteration.
+ This is in matrix-band form.
+ }
+ \item{\code{x}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ The model matrix (LM, not VGLM).
+ }
+ \item{\code{xlevels}:}{
+ Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ The levels of the factors, if any, used in fitting.
+ }
+ \item{\code{y}:}{
+ Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+ The response, in matrix form.
+ }
+ }
+}
+
+\section{Extends}{
+Class \code{"vglm"}, directly.
+Class \code{"vlm"}, by class "vglm".
+}
+\section{Methods}{
+ \describe{
+ \item{biplot}{\code{signature(x = "rrvglm")}: biplot. }
+ \item{Coef}{\code{signature(object = "rrvglm")}: more detailed
+ coefficients giving \bold{A},
+ \eqn{\bold{B}_1}{\bold{B}1}, \bold{C}, etc.
+ }
+ \item{biplot}{\code{signature(object = "rrvglm")}:
+ biplot. }
+ \item{print}{\code{signature(x = "rrvglm")}:
+ short summary of the object. }
+ \item{summary}{\code{signature(object = "rrvglm")}:
+ a more detailed summary of the object. }
+ }
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ The slots of \code{"rrvglm"} objects are currently identical to \code{"vglm"}
+ objects.
+}
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+% zzz need to make sure this function matches \code{\link{vglm-class}},
+%where \code{\link{vglm-class}} is definitive.
+
+\seealso{
+ \code{\link{rrvglm}},
+ \code{\link{lvplot.rrvglm}},
+ \code{\link{vglmff-class}}.
+}
+
+\examples{
+# Rank-1 stereotype model of Anderson (1984)
+data(pneumo)
+set.seed(111)
+pneumo = transform(pneumo, let=log(exposure.time),
+ x1=runif(nrow(pneumo))) # x1 is some unrelated covariate
+fit = rrvglm(cbind(normal, mild, severe) ~ let + x1,
+ multinomial, pneumo, Rank=1)
+Coef(fit)
+}
+\keyword{classes}
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
new file mode 100644
index 0000000..43e9b58
--- /dev/null
+++ b/man/rrvglm.Rd
@@ -0,0 +1,289 @@
+\name{rrvglm}
+\alias{rrvglm}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Fitting Reduced-Rank Vector Generalized Linear Models (RR-VGLMs) }
+\description{
+ A \emph{reduced-rank vector generalized linear model} (RR-VGLM) is fitted.
+ RR-VGLMs are VGLMs but some of the constraint matrices are estimated.
+ In this documentation, \eqn{M} is the number of linear predictors.
+
+}
+\usage{
+rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
+ na.action = na.fail, etastart = NULL, mustart = NULL,
+ coefstart = NULL, control = rrvglm.control(...), offset = NULL,
+ method = "rrvglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL, extra = NULL,
+ qr.arg = FALSE, smart = TRUE, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+
+ \item{formula}{
+ a symbolic description of the model to be fit.
+ The RHS of the formula is applied to each linear predictor. Different
+ variables in each linear predictor can be chosen by specifying
+ constraint matrices.
+
+ }
+ \item{family}{
+ a function of class \code{"vglmff"} describing
+ what statistical model is to be fitted.
+
+ }
+ \item{data}{
+ an optional data frame containing the variables in the model.
+ By default the variables are taken from \code{environment(formula)},
+ typically the environment from which \code{rrvglm} is called.
+
+ }
+ \item{weights}{
+ an optional vector or matrix of (prior) weights
+ to be used in the fitting process.
+ If \code{weights} is a matrix, then it must be in
+ \emph{matrix-band} form, whereby the first \eqn{M}
+ columns of the matrix are the
+ diagonals, followed by the upper-diagonal band, followed by the
+ band above that, etc. In this case, there can be up to \eqn{M(M+1)}
+ columns, with the last column corresponding to the
+ (1,\eqn{M}) elements of the weight matrices.
+
+ }
+ \item{subset}{
+ an optional logical vector specifying a subset of observations to be
+ used in the fitting process.
+
+ }
+ \item{na.action}{
+ a function which indicates what should happen when the data contain
+ \code{NA}s.
+ The default is set by the \code{na.action} setting
+ of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+
+ }
+ \item{etastart}{
+ starting values for the linear predictors.
+ It is a \eqn{M}-column matrix.
+ If \eqn{M=1} then it may be a vector.
+ }
+ \item{mustart}{
+ starting values for the fitted values. It can be a vector or a matrix.
+ Some family functions do not make use of this argument.
+
+ }
+ \item{coefstart}{
+ starting values for the coefficient vector.
+
+ }
+ \item{control}{
+ a list of parameters for controlling the fitting process.
+ See \code{\link{rrvglm.control}} for details.
+
+ }
+ \item{offset}{
+ a vector or \eqn{M}-column matrix of offset values.
+ These are \emph{a priori} known and are
+ added to the linear predictors during fitting.
+
+ }
+ \item{method}{
+ the method to be used in fitting the model.
+ The default (and presently only) method \code{rrvglm.fit}
+ uses iteratively reweighted least squares (IRLS).
+
+ }
+ \item{model}{
+ a logical value indicating whether the \emph{model frame}
+ should be assigned in the \code{model} slot.
+
+ }
+ \item{x.arg, y.arg}{
+ logical values indicating whether
+ the model matrix and response vector/matrix used in the fitting
+ process should be assigned in the \code{x} and \code{y} slots.
+ Note the model matrix is the LM model matrix; to get the VGLM
+ model matrix type \code{model.matrix(vglmfit)} where
+ \code{vglmfit} is a \code{vglm} object.
+
+ }
+ \item{contrasts}{
+ an optional list. See the \code{contrasts.arg}
+ of \code{\link{model.matrix.default}}.
+
+ }
+ \item{constraints}{
+ an optional list of constraint matrices.
+ The components of the list must be named with the term it
+ corresponds to (and it must match in character format).
+ Each constraint matrix must have \eqn{M} rows, and be of
+ full-column rank.
+ By default, constraint matrices are the \eqn{M} by \eqn{M}
+ identity
+ matrix unless arguments in the family function itself override
+ these values.
+ If \code{constraints} is used it must contain \emph{all} the
+ terms; an incomplete list is not accepted.
+
+ }
+ \item{extra}{
+ an optional list with any extra information that might be needed
+ by the family function.
+
+ }
+ \item{qr.arg}{
+ logical value indicating whether
+ the slot \code{qr}, which returns the QR decomposition of the
+ VLM model matrix, is returned on the object.
+
+ }
+ \item{smart}{
+ logical value indicating whether smart prediction
+ (\code{\link{smartpred}}) will be used.
+
+ }
+ \item{\dots}{
+ further arguments passed into \code{\link{rrvglm.control}}.
+
+ }
+}
+\details{
+ The central formula is given by
+ \deqn{\eta = B_1^T x_1 + A \nu}{%
+ eta = B_1^T x_1 + A nu}
+ where \eqn{x_1}{x1} is a vector (usually just a 1 for an intercept),
+ \eqn{x_2}{x2} is another vector of explanatory variables, and
+ \eqn{\nu=C^T x_2}{nu=C^T x_2} is an \eqn{R}-vector of
+ latent variables.
+ Here, \eqn{\eta}{eta} is a vector of linear predictors, e.g., the
+ \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m = log(E[Y_m])}
+ for the \eqn{m}th Poisson response. The matrices \eqn{B_1}, \eqn{A}
+ and \eqn{C} are estimated from the data, i.e., contain the regression
+ coefficients. For ecologists, the central formula represents a
+ \emph{constrained linear ordination} (CLO) since it is linear in
+ the latent variables. It means that the response is a monotonically
+ increasing or decreasing function of the latent variables.
+
+ The underlying algorithm of RR-VGLMs is iteratively reweighted least
+ squares (IRLS) with an optimizing algorithm applied within each IRLS
+ iteration (e.g., alternating algorithm).
+
+ In theory, any \pkg{VGAM} family function that works for
+ \code{\link{vglm}} and \code{\link{vgam}} should work for \code{rrvglm}
+ too.
+
+ \code{rrvglm.fit} is the function that actually does the work. It is
+ \code{vglm.fit} with some extra code.
+
+}
+\value{
+ An object of class \code{"rrvglm"}, which has the the same slots as
+ a \code{"vglm"} object. The only difference is that the some of the
+ constraint matrices are estimates rather than known. But \pkg{VGAM}
+ stores the models the same internally. The slots of \code{"vglm"}
+ objects are described in \code{\link{vglm-class}}.
+
+}
+\references{
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Anderson, J. A. (1984)
+Regression and ordered categorical variables.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{46}, 1--30.
+
+}
+
+\author{ Thomas W. Yee }
+\note{
+ The smart prediction (\code{\link{smartpred}}) library is packed with
+ the \pkg{VGAM} library.
+
+ The arguments of \code{rrvglm} are the same as those of
+ \code{\link{vglm}} but with some extras in \code{\link{rrvglm.control}}.
+
+ In the example below, a rank-1 stereotype model of Anderson (1984)
+ is fitted to some car data. The reduced-rank regression is performed,
+ adjusting for two covariates. Setting a trivial constraint matrix for
+ the latent variable variables in \eqn{x_2}{x2} avoids a warning message
+ when it is overwritten by a (common) estimated constraint matrix.
+ It shows that German cars tend to be more expensive than American cars,
+ given a car of fixed weight and width.
+
+ If \code{fit <- rrvglm(..., data=mydata)} then \code{summary(fit)}
+ requires corner constraints and no missing values in \code{mydata}.
+ Often the estimated variance-covariance matrix of the parameters is
+ not positive-definite; if this occurs, try refitting the model with
+ a different value for \code{Index.corner}.
+
+ For \emph{constrained quadratic ordination} (CQO) see \code{\link{cqo}}
+ for more details about QRR-VGLMs.
+
+ With multivariate binary responses, one must use
+ \code{binomialff(mv=TRUE)} to indicate that the response (matrix)
+ is multivariate. Otherwise, it is interpreted as a single binary
+ response variable.
+
+}
+
+% zzz; arguments of \code{\link{vglm}} are definitive. They're copied here.
+
+\seealso{
+ \code{\link{rrvglm.control}},
+% \code{\link{qrrvglm.control}},
+ \code{\link{lvplot.rrvglm}}
+ (same as \code{\link{biplot.rrvglm}}),
+% \code{\link{vcovqrrvglm}},
+ \code{\link{rrvglm-class}},
+ \code{\link{grc}},
+ \code{\link{cqo}},
+ \code{\link{vglmff-class}},
+ \code{\link{vglm}},
+ \code{\link{vglm-class}},
+ \code{\link{smartpred}},
+ \code{rrvglm.fit}.
+ Methods functions include
+ \code{\link{Coef.rrvglm}},
+ \code{summary.rrvglm},
+ etc.
+}
+
+\examples{
+data(car.all)
+attach(car.all)
+index = Country == "Germany" | Country == "USA" |
+ Country == "Japan" | Country == "Korea"
+detach(car.all)
+scar = car.all[index, ] # standardized car data
+fcols = c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
+scar[,-fcols] = scale(scar[,-fcols]) # Standardize all numerical vars
+ones = matrix(1, 3, 1)
+cms = list("(Intercept)"=diag(3), Width=ones, Weight=ones,
+ Disp.=diag(3), Tank=diag(3), Price=diag(3),
+ Frt.Leg.Room=diag(3))
+set.seed(111)
+fit = rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
+ multinomial, data = scar, Rank = 2, trace = TRUE,
+ constraints=cms, Norrr = ~ 1 + Width + Weight,
+ Uncor=TRUE, Corner=FALSE, Bestof=2)
+fit at misc$deviance # A history of the fits
+Coef(fit)
+\dontrun{
+biplot(fit, chull=TRUE, scores=TRUE, clty=2, ccol="blue", scol="red",
+ Ccol="darkgreen", Clwd=2, Ccex=2,
+ main="1=Germany, 2=Japan, 3=Korea, 4=USA")
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
new file mode 100644
index 0000000..5b4f10e
--- /dev/null
+++ b/man/rrvglm.control.Rd
@@ -0,0 +1,215 @@
+\name{rrvglm.control}
+\alias{rrvglm.control}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Control function for rrvglm }
+\description{
+ Algorithmic constants and parameters for running \code{rrvglm} are set
+ using this function.
+}
+\usage{
+rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
+ Corner = TRUE, Uncor = FALSE, Wmat = NULL, Svd.arg = FALSE,
+ Index.corner = if (length(Structural.zero))
+ ((1:1000)[-Structural.zero])[1:Rank] else 1:Rank,
+ Alpha = 0.5, Bestof = 1, Cinit = NULL,
+ Etamat.colmax = 10,
+ SD.Cinit = 0.02, Structural.zero = NULL,
+ Norrr = ~1, trace = FALSE, Use.Init.Poisson.QO = FALSE,
+ checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ \item{Rank}{ The numerical rank \eqn{R} of the model.
+ Must be an element from the set \{1,2,\ldots,min(\eqn{M},\emph{p2})\}.
+ Here, the vector of explanatory variables \bold{x} is partitioned into
+ (\bold{x1},\bold{x2}), which is of dimension \emph{p1}+\emph{p2}.
+ The variables making up \bold{x1} are given by the terms in
+ \code{Norrr} argument, and the rest of the terms comprise \bold{x2}.
+ }
+ \item{Algorithm}{ Character string indicating what algorithm is
+ to be used. The default is the first one.
+
+ }
+ \item{Corner}{ Logical indicating whether corner constraints are
+ to be used. This is one method for ensuring a unique solution.
+ If \code{TRUE}, \code{Index.corner} specifies the \eqn{R} rows
+ of the constraint matrices that are use as the corner constraints,
+ i.e., they hold an order-\eqn{R} identity matrix.
+
+ }
+\item{Uncor}{ Logical indicating whether uncorrelated
+ latent variables are to be used. This is another normalization that
+ forces the variance-covariance matrix of the latent variables to be
+ \code{diag(Rank)}, i.e., unit variance and uncorrelated. This constraint
+ does not lead to a unique solution because it can be rotated.
+
+}
+ \item{Wmat}{ Yet to be done. }
+ \item{Svd.arg}{ Logical indicating whether a singular value decomposition
+ of the outer product is to computed. This is another normalization
+ which ensures uniqueness. See the argument \code{Alpha} below.
+
+}
+ \item{Index.corner}{
+ Specifies the \eqn{R} rows of the constraint matrices that are
+ used for the corner constraints, i.e., they hold an order-\eqn{R}
+ identity matrix.
+
+ }
+ \item{Alpha}{
+ The exponent in the singular value decomposition that is used in
+ the first part: if the SVD is
+ \eqn{U D V^T}{ U \%*\% D \%*\% t(V) } then the
+ first and second parts are
+ \eqn{U D^{\alpha}}{ U \%*\% D^Alpha}
+ and
+ \eqn{D^{1-\alpha} V^T}{D^(1-Alpha) \%*\% t(V)} respectively.
+ A value of 0.5 is `symmetrical'.
+ This argument is used only when \code{Svd.arg=TRUE}.
+ }
+ \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is
+ returned. This argument helps guard against local solutions by
+ (hopefully) finding the global solution from many fits. The
+ argument works only when the function generates its own initial value
+ for \bold{C},
+ i.e., when \bold{C} is \emph{not} passed in as initial values.
+ }
+ \item{Cinit}{
+ Initial \bold{C} matrix which may speed up convergence.
+ It must be of the correct dimension.
+ }
+ \item{Etamat.colmax}{
+ Positive integer, no smaller than \code{Rank}. Controls the amount
+ of memory used by \code{.Init.Poisson.QO()}. It is the maximum
+ number of columns allowed for the pseudo-response and its weights.
+ In general, the larger the value, the better the initial value.
+ Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
+ }
+
+% \item{Quadratic}{
+% Logical indicating whether a \emph{Quadratic}
+% RR-VGLM is to be fitted. If \code{TRUE}, an object of class
+% \code{"qrrvglm"} will be returned, otherwise \code{"rrvglm"}.
+% }
+ \item{Structural.zero}{
+ Integer vector specifying which rows
+ of the constraint matrices are to be all zeros.
+ }
+ \item{SD.Cinit}{ Standard deviation of the initial values for the elements
+ of \bold{C}.
+ These are normally distributed with mean zero.
+ This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
+ }
+% \item{ppar}{ Ignore this. }
+ \item{Norrr}{
+ Formula giving terms that are not to be included in the reduced-rank
+ regression. These variables constitute the \eqn{\bold{B}_1}{\bold{B}1}
+ matrix in the Yee and Hastie paper. Those variables which
+ are subject to the reduced-rank regression correspond to the
+ \eqn{\bold{B}_2}{\bold{B}2} matrix.
+
+ }
+ \item{trace}{ Logical indicating if output should be produced for
+ each iteration.
+
+% Useful when \code{Quadratic=TRUE} because QRR-VGLMs are
+% computationally expensive and it's good to see that the program
+% is working!
+
+ }
+ \item{Use.Init.Poisson.QO}{
+ Logical indicating whether the \code{.Init.Poisson.QO()} should
+ be used to obtain initial values for the \bold{C}. The function
+ uses a new method that can work well if the data are Poisson counts
+ coming from an equal-tolerances QRR-VGLM (CQO). This option is less
+ realistic for RR-VGLMs compared to QRR-VGLMs.
+
+ }
+ \item{checkwz}{ logical indicating whether the diagonal elements of
+ the working weight matrices should be checked whether they are
+ sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
+ any values less than \code{wzepsilon} are replaced with this value.
+
+ }
+ \item{wzepsilon}{
+ Small positive number used to test whether the diagonals of the working
+ weight matrices are sufficiently positive.
+
+ }
+ \item{\dots}{ Variables in \dots are passed into
+ \code{\link{vglm.control}}. If the derivative algorithm is used, then
+ \dots are also passed into \code{\link{rrvglm.optim.control}}.
+ }
+ In the above, \eqn{R} is the \code{Rank} and
+ \eqn{M} is the number of linear predictors.
+
+}
+\details{
+% QRR-VGLMs are an extension of RR-VGLMs and are useful for constrained
+% ordination. QRR-VGLMs fitted with \pkg{VGAM} allow a maximum
+% likelihood solution to constrained quadratic ordination (CQO;
+% formerly called canonical Gaussian ordination) models.
+
+% For QRR-VGLMs, if \code{EqualTolerances=TRUE} and
+% \code{ITolerances=FALSE} then the default is that the \bold{C}
+% matrix is constrained by forcing the latent variables to have sample
+% variance-covariance matrix equalling \code{diag(Rank)}, i.e., unit
+% variance and uncorrelated.
+
+ \pkg{VGAM} supports three normalizations to ensure a unique
+ solution. Of these, only corner constraints will work with
+ \code{summary} of RR-VGLM objects.
+
+}
+\value{
+ A list with components matching the input names.
+ Some error checking is done, but not much.
+}
+\references{
+ Yee, T. W. and Hastie, T. J. (2003)
+ Reduced-rank vector generalized linear models.
+ \emph{Statistical Modelling},
+ \bold{3}, 15--41.
+
+}
+\author{ Thomas W. Yee }
+\note{
+
+% The function call \code{cqo(...)} is equivalent to
+% \code{rrvglm(..., Quadratic=TRUE)}, and hence uses this function.
+% For QRR-VGLMs, the function \code{\link{qrrvglm.control}} is called too.
+
+ The arguments in this function begin with an upper case letter to help
+ avoid interference with those of \code{\link{vglm.control}}.
+
+ In the example below a rank-1 stereotype model (Anderson, 1984)
+ is fitted.
+
+}
+
+%- \section{Warning }{ }
+
+\seealso{
+ \code{\link{rrvglm}},
+ \code{\link{rrvglm.optim.control}},
+ \code{\link{rrvglm-class}},
+ \code{\link{vglm}},
+ \code{\link{vglm.control}},
+ \code{\link{cqo}}.
+}
+
+\examples{
+data(pneumo)
+set.seed(111)
+pneumo = transform(pneumo, let=log(exposure.time),
+ x1 = runif(nrow(pneumo))) # x1 is some unrelated covariate
+fit = rrvglm(cbind(normal, mild, severe) ~ let + x1,
+ multinomial, pneumo, Rank=1, Index.corner=2)
+constraints(fit)
+vcov(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
new file mode 100644
index 0000000..33380c6
--- /dev/null
+++ b/man/rrvglm.optim.control.Rd
@@ -0,0 +1,54 @@
+\name{rrvglm.optim.control}
+\alias{rrvglm.optim.control}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Control function for rrvglm() calling optim() }
+\description{
+ Algorithmic constants and parameters for running \code{optim} within
+ \code{rrvglm} are set using this function.
+}
+\usage{
+rrvglm.optim.control(Fnscale = 1, Maxit = 100,
+ Switch.optimizer = 3, Abstol = -Inf,
+ Reltol = sqrt(.Machine$double.eps), ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{Fnscale}{ Passed into \code{optim} as \code{fnscale}. }
+ \item{Maxit}{ Passed into \code{optim} as \code{maxit}. }
+ \item{Switch.optimizer}{ Iteration number when the "Nelder-Mead" method
+ of \code{optim} is switched to the quasi-Newton "BFGS" method.
+ Assigning \code{Switch.optimizer} a negative number
+ means always BFGS, while assigning \code{Switch.optimizer} a value
+ greater than \code{maxits} means always use Nelder-Mead.
+ }
+ \item{Abstol}{ Passed into \code{optim} as \code{abstol}. }
+ \item{Reltol}{ Passed into \code{optim} as \code{reltol}. }
+ \item{\dots}{ Ignored. }
+}
+\details{
+See \code{\link[stats]{optim}} for more details.
+}
+\value{
+ A list with components equal to the arguments.
+}
+%\references{ ~put references to the literature/web site here ~ }
+\author{ Thomas W. Yee }
+\note{
+The transition between optimization methods may be unstable, so users
+may have to vary the value of \code{Switch.optimizer}.
+
+Practical experience with \code{Switch.optimizer} shows that setting
+it to too large a value may lead to a local solution, whereas setting
+it to a low value will obtain the global solution. It appears that,
+if BFGS kicks in too late when the Nelder-Mead algorithm is starting to
+converge to a local solution, then switching to BFGS will not be sufficient
+to bypass convergence to that local solution.
+}
+
+\seealso{ \code{\link{rrvglm.control}},
+\code{\link[stats]{optim}}.
+}
+%\examples{
+%}
+\keyword{models}
+\keyword{regression}
diff --git a/man/s.Rd b/man/s.Rd
new file mode 100644
index 0000000..4e3fa46
--- /dev/null
+++ b/man/s.Rd
@@ -0,0 +1,107 @@
+\name{s}
+\alias{s}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Defining smooths in VGAM formulae }
+\description{
+ \code{s} is used in the definition of (vector) smooth terms within
+ \code{vgam} formulae.
+}
+\usage{
+s(x, df = 4, spar = 0, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{M} is the number of additive predictors
+ and \eqn{r} is the number of component functions to be
+ estimated (so that \eqn{r} is an element from the set
+ \{1,2,\ldots,\eqn{M}\}).
+ Also, if \eqn{n} is the number of \emph{distinct} abscissae, then
+ \code{s} will fail if \eqn{n < 7}.
+
+ \item{x}{
+ covariate (abscissae) to be smoothed.
+
+ }
+ \item{df}{
+ numerical vector of length \eqn{r}.
+ Effective degrees of freedom: must lie between 1 (linear fit)
+ and \eqn{n} (interpolation).
+ Recycling of values will be used if \code{df} is not of length \eqn{r}.
+
+ }
+ \item{spar}{ numerical vector of length \eqn{r}.
+ Positive smoothing parameters (after scaling) .
+ Larger values mean more smoothing so that the solution approaches
+ a linear fit for that component function.
+ A zero value means that \code{df} is used.
+ Recycling of values will be used if \code{spar} is not of length
+ \eqn{r}.
+
+ }
+ \item{\dots}{ Ignored for now. }
+}
+\details{
+ \code{s}, which is symbolic and does not perform any smoothing itself,
+ only handles a single covariate.
+ It differs from the S-PLUS \code{s} and also the one from
+ the \code{mgcv} library. They should not be mixed together.
+
+ S-PLUS's \code{s} allowed \code{spar} to be negative;
+ \pkg{VGAM} does not allow this.
+
+}
+\value{
+ A vector with attributes that are used by \code{vgam}.
+
+}
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The \code{x} argument of \code{s()} must be a single variable
+ and not a function of a variable.
+ For example, \code{s(x)} is fine but \code{s(log(x))} will fail.
+ In this case, let \code{logx <- log(x)}, say, and use
+ \code{s(logx)}.
+
+ The vector cubic smoothing spline which \code{s()} represents is
+ computationally demanding for large \eqn{M}. The cost is approximately
+ \eqn{O(M^3)}.
+
+}
+
+% ~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+ \code{\link{vgam}},
+ \code{\link{vsmooth.spline}}.
+}
+
+\examples{
+# Nonparametric logistic regression
+data(hunua)
+fit = vgam(agaaus ~ s(altitude, df=3), binomialff, hunua)
+\dontrun{
+plot(fit, se=TRUE)}
+
+
+# Bivariate logistic model with artificial data
+n = 300
+mydf = data.frame(x1=runif(n), x2=runif(n))
+mydf = transform(mydf,
+ y1 = ifelse(runif(n) < 1/(1+exp(-sin(2*x2))), 1, 0),
+ y2 = ifelse(runif(n) < 1/(1+exp(-sin(2*x2))), 1, 0))
+fit = vgam(cbind(y1,y2) ~ x1 + s(x2, 3), trace=TRUE,
+ binom2.or(exchangeable = TRUE ~ s(x2,3)), data=mydf)
+coef(fit, matrix=TRUE)
+\dontrun{
+plot(fit, se=TRUE, which.term= 2, scol="blue")}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
diff --git a/man/setup.smart.Rd b/man/setup.smart.Rd
new file mode 100644
index 0000000..1ee0d94
--- /dev/null
+++ b/man/setup.smart.Rd
@@ -0,0 +1,83 @@
+\name{setup.smart}
+\alias{setup.smart}
+\title{ Smart Prediction Setup }
+\description{
+ Sets up smart prediction in one of two modes:
+ \code{"write"} and \code{"read"}.
+}
+\usage{
+setup.smart(mode.arg, smart.prediction=NULL, max.smart=30)
+}
+\arguments{
+\item{mode.arg}{
+ \code{mode.arg} must be \code{"write"} or \code{"read"}. If in
+ \code{"read"} mode then \code{smart.prediction} must be assigned the
+ data structure \code{.smart.prediction} that was created while
+ fitting. This is stored in \code{object at smart.prediction} or
+ \code{object$smart.prediction} where
+ \code{object} is the name of the fitted object.
+}
+
+\item{smart.prediction}{
+If in \code{"read"} mode then \code{smart.prediction} must be assigned
+the list of data dependent parameters, which is stored
+on the fitted object.
+Otherwise, \code{smart.prediction} is ignored.
+}
+
+\item{max.smart}{
+\code{max.smart} is the initial length of the list \code{.smart.prediction}.
+It is not important because \code{.smart.prediction} is made larger if
+needed.
+}}
+\value{
+ Nothing is returned.
+}
+\section{Side Effects}{
+In \code{"write"} mode
+\code{.smart.prediction} in
+\code{smartpredenv} (\R) or frame 1 (S-PLUS)
+is assigned an empty list with \code{max.smart} components.
+In \code{"read"} mode
+\code{.smart.prediction} in
+\code{smartpredenv} (\R) or frame 1 (S-PLUS)
+is assigned \code{smart.prediction}.
+In both cases,
+\code{.smart.prediction.counter} in
+\code{smartpredenv} (\R) or
+frame 1 (S-PLUS)
+is assigned the value 0, and
+\code{.smart.prediction.mode} and \code{.max.smart} are written to
+\code{smartpredenv} (\R) or frame 1 (S-PLUS) too.
+}
+\details{
+This function is only required by programmers writing a modelling
+function such as \code{\link[stats]{lm}}
+and \code{\link[stats]{glm}}, or a prediction functions of such,
+e.g., \code{\link[stats]{predict.lm}}.
+The function
+\code{setup.smart} operates by mimicking the operations of a
+first-in first-out stack (better known as a \emph{queue}).
+}
+\seealso{
+ \code{\link[stats]{lm}},
+ \code{\link[stats]{predict.lm}}.
+
+}
+\examples{
+\dontrun{# Put at the beginning of lm
+setup.smart("write")
+}
+
+\dontrun{# Put at the beginning of predict.lm
+setup.smart("read", smart.prediction=object$smart.prediction)
+}
+
+
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/simplex.Rd b/man/simplex.Rd
new file mode 100644
index 0000000..40cd850
--- /dev/null
+++ b/man/simplex.Rd
@@ -0,0 +1,74 @@
+\name{simplex}
+\alias{simplex}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Simplex distribution }
+\description{
+ The two parameters of the univariate simplex distribution are estimated.
+}
+\usage{
+simplex(lmu = "logit", lsigma = "loge", imu = NULL, isigma = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lmu}{
+ Link function for \code{mu}.
+ See \code{\link{Links}} for more choices.
+ The parameter lies in the unit interval.
+
+ }
+ \item{lsigma}{
+ Link function for \code{sigma}.
+ See \code{\link{Links}} for more choices.
+ The parameter is positive, therefore the log link is the default.
+
+ }
+ \item{imu, isigma}{
+ Optional initial values for \code{mu} and \code{sigma}.
+ A \code{NULL} means a value is obtained internally.
+
+ }
+}
+\details{
+ See Jorgensen (1997) for details.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Jorgensen, B. (1997)
+\emph{The Theory of Dispersion Models}.
+London: Chapman & Hall
+}
+\author{ T. W. Yee }
+\note{
+ This distribution is potentially useful for dispersion modelling.
+ This family function only works for intercept-only models, i.e.,
+ the formula should have \code{~ 1}.
+
+}
+
+\seealso{
+ \code{\link{rig}}.
+}
+\examples{
+x = runif(n <- 100)
+y = rbeta(n, shape1=3+x, shape2=4-x)
+fit = vglm(y ~ 1, simplex, trace=TRUE, eps=1e-9, cri="c")
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+% zz fitted values and formulas needed here
+
+
+
+
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
new file mode 100644
index 0000000..9d147e5
--- /dev/null
+++ b/man/sinmad.Rd
@@ -0,0 +1,102 @@
+\name{sinmad}
+\alias{sinmad}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Singh-Maddala Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 3-parameter
+ Singh-Maddala distribution.
+}
+\usage{
+sinmad(link.a = "loge", link.scale = "loge", link.q = "loge",
+ init.a = NULL, init.scale = NULL, init.q = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.a, link.scale, link.q}{
+ Parameter link functions applied to the
+ (positive) parameters \code{a}, \code{scale}, and \code{q}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.a, init.scale, init.q}{
+ Optional initial values for \code{a}, \code{scale}, and \code{q}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ Here, the values must be from the set \{1,2,3\} which correspond to
+ \code{a}, \code{scale}, \code{q}, respectively.
+
+ }
+}
+\details{
+ The 3-parameter Singh-Maddala distribution is the 4-parameter
+ generalized beta II distribution with shape parameter \eqn{p=1}.
+It is known under various other names, such as the Burr XII (or
+just the Burr distribution), Pareto IV,
+beta-P, and generalized log-logistic distribution.
+ More details can be found in Kleiber and Kotz (2003).
+
+Some distributions which are special cases of the 3-parameter Singh-Maddala
+are the Lomax (\eqn{a=1}), Fisk (\eqn{q=1}), and
+paralogistic (\eqn{a=q}).
+
+The Singh-Maddala distribution has density
+ \deqn{f(y) = aq y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+q}]}{%
+ f(y) = aq y^(a-1) / [b^a (1 + (y/b)^a)^(1+q)]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{q > 0}, \eqn{y > 0}.
+Here, \eqn{b} is the scale parameter \code{scale},
+and the others are shape parameters.
+The cumulative distribution function is
+ \deqn{F(y) = 1 - [1 + (y/b)^a]^{-q}.}{%
+ F(y) = 1 - [1 + (y/b)^a]^(-q).}
+The mean is
+ \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(q - 1/a) / \Gamma(q)}{%
+ E(Y) = b gamma(1 + 1/a) gamma(q - 1/a) / gamma(q)}
+provided \eqn{-a < 1 < aq}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and
+ Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+}
+
+\author{ T. W. Yee }
+\note{
+If the self-starting initial values fail, try experimenting
+with the initial value arguments, especially those whose
+default value is not \code{NULL}.
+
+}
+
+\seealso{
+ \code{\link{Sinmad}},
+ \code{\link{genbetaII}},
+ \code{\link{betaII}},
+ \code{\link{dagum}},
+ \code{\link{fisk}},
+ \code{\link{invlomax}},
+ \code{\link{lomax}},
+ \code{\link{paralogistic}},
+ \code{\link{invparalogistic}}.
+}
+
+\examples{
+y = rsinmad(n=3000, 3, 5, 2)
+fit = vglm(y ~ 1, sinmad, trace=TRUE)
+fit = vglm(y ~ 1, sinmad, trace=TRUE, crit="c")
+coef(fit, mat=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/skewnormal1.Rd b/man/skewnormal1.Rd
new file mode 100644
index 0000000..e98a103
--- /dev/null
+++ b/man/skewnormal1.Rd
@@ -0,0 +1,111 @@
+\name{skewnormal1}
+\alias{skewnormal1}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Univariate Skew-Normal Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the shape parameter of a univariate
+ skew-normal distribution.
+
+}
+\usage{
+skewnormal1(lshape = "identity", ishape = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape}{
+ Link function applied to the shape parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ishape}{
+ Optional inital value for the shape parameter.
+ The default is to choose one internally.
+ See the note below.
+
+ }
+}
+\details{
+ The univariate skew-normal distribution has a density
+ function that can be written
+ \deqn{f(y) = 2 \, \phi(y) \, \Phi(\alpha y)}{%
+ f(y) = 2 * phi(y) * Phi(alpha * y)}
+ where \eqn{\alpha}{alpha} is the shape parameter.
+ Here, \eqn{\phi}{phi} is the standard normal density and
+ \eqn{\Phi}{Phi} its cumulative distribution function.
+ When \eqn{\alpha=0}{alpha=0} the result is a standard normal distribution.
+ When \eqn{\alpha=1}{alpha=1} it models the distribution of the maximum of
+ two independent standard normal variates.
+ When the absolute value of the shape parameter
+ increases the skewness of the distribution increases.
+ The limit as the shape parameter tends to positive infinity
+ results in the folded normal distribution or half-normal distribution.
+ When the shape parameter changes its sign, the density is reflected
+ about \eqn{y=0}.
+
+ The mean of the distribution is
+ \eqn{\mu=\alpha \sqrt{2/(\pi (1+\alpha^2))}}{mu=alpha*sqrt(2/(pi*(1+alpha^2)))}
+ and these are returned as the fitted values.
+ The variance of the distribution is \eqn{1-\mu^2}{1-mu^2}.
+ The Newton-Raphson algorithm is used.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+
+Azzalini, A. A. (1985).
+A class of distributions which include the normal.
+\emph{Scandinavian Journal of Statistics},
+\bold{12}, 171--178.
+
+Azzalini, A. and Capitanio, A. (1999).
+Statistical applications of the multivariate skew-normal
+distribution.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{61}, 579--602.
+
+}
+
+\author{ Thomas W. Yee }
+\note{
+ It is a good idea to use several different initial values to ensure
+ that the global solution is obtained.
+
+ This family function will be modified (hopefully soon) to handle a
+ location and scale parameter too.
+
+}
+%\section{Warning }{
+% Numerical problems may occur.
+%}
+\seealso{
+ \code{\link{snorm}},
+ \code{\link{normal1}}.
+}
+
+\examples{
+y = rsnorm(n <- 1000, shape=5)
+fit = vglm(y ~ 1, skewnormal1, trace=TRUE)
+coef(fit, matrix=TRUE)
+fitted(fit)[1:4,]
+mean(y)
+\dontrun{
+hist(y, prob=TRUE)
+x = seq(min(y), max(y), len=200)
+lines(x, dsnorm(x, shape=Coef(fit)), col="blue")
+}
+
+x = runif(n)
+y = rsnorm(n, shape=1 + 2*x)
+fit = vglm(y ~ x, skewnormal1, trace=TRUE, crit="coef")
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/smart.expression.Rd b/man/smart.expression.Rd
new file mode 100644
index 0000000..c811152
--- /dev/null
+++ b/man/smart.expression.Rd
@@ -0,0 +1,36 @@
+\name{smart.expression}
+\alias{smart.expression}
+\title{ S Expression for Smart Functions }
+\description{
+ \code{smart.expression} is an S expression for
+ a smart function to call itself. It is best if you go through it line
+ by line, but most users will not need to know anything about it.
+ It requires the primary argument of the smart function to be called
+ \code{"x"}.
+
+ The list component \code{match.call} must be assigned the
+ value of \code{match.call()} in the smart function; this is so
+ that the smart function can call itself later.
+}
+\seealso{
+ \code{\link[base]{match.call}}.
+}
+\examples{
+"my2" <- function(x, minx=min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if(smart.mode.is("read")) {
+ return(eval(smart.expression))
+ } else
+ if(smart.mode.is("write"))
+ put.smart(list(minx=minx, match.call=match.call()))
+ (x-minx)^2
+}
+attr(my2, "smart") <- TRUE
+}
+%\keyword{smartpred}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
+% Edited manually 17/2/03, 9/7/03
diff --git a/man/smart.mode.is.Rd b/man/smart.mode.is.Rd
new file mode 100644
index 0000000..28a4c6a
--- /dev/null
+++ b/man/smart.mode.is.Rd
@@ -0,0 +1,57 @@
+\name{smart.mode.is}
+\alias{smart.mode.is}
+\title{ Determine What Mode the Smart Prediction is In }
+\description{
+Determine which of three modes the smart prediction is currently in.
+}
+\usage{
+smart.mode.is(mode.arg=NULL)
+}
+\arguments{
+\item{mode.arg}{
+ a character string, either \code{"read"}, \code{"write"} or \code{"neutral"}.
+}}
+\value{
+If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE} is returned.
+If \code{mode.arg} is not given, then the mode (\code{"neutral"},
+ \code{"read"} or \code{"write"})
+is returned. Usually, the mode is \code{"neutral"}.
+}
+\seealso{
+ \code{\link{put.smart}},
+ \code{\link[splines]{bs}},
+ \code{\link[stats]{poly}}.
+}
+\details{
+ Smart functions such as
+ \code{\link[splines]{bs}} and
+ \code{\link[stats]{poly}} need to know what mode
+ smart prediction is in. If it is in \code{"write"} mode
+ then the parameters are saved to \code{.smart.prediction}
+ using \code{\link{put.smart}}. If in \code{"read"} mode
+ then the parameters are read in using \code{\link{get.smart}}.
+ If in \code{"neutral"} mode then the smart function behaves like an
+ ordinary function.
+
+}
+\examples{
+my1 <- function(x, minx = min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if(smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else if(smart.mode.is("write"))
+ put.smart(list(minx = minx))
+ sqrt(x - minx)
+}
+attr(my1, "smart") <- TRUE
+
+smart.mode.is() # Returns "neutral"
+smart.mode.is(smart.mode.is()) # Returns TRUE
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/smartpred.Rd b/man/smartpred.Rd
new file mode 100644
index 0000000..83a6069
--- /dev/null
+++ b/man/smartpred.Rd
@@ -0,0 +1,214 @@
+\name{smartpred}
+\alias{smartpred}
+\title{ Smart Prediction }
+\description{
+ Data-dependent parameters in formula terms
+ can cause problems in when predicting.
+ The \pkg{smartpred} package for \R and S-PLUS saves
+ data-dependent parameters on the object so that the bug is fixed.
+ The \code{\link[stats]{lm}} and \code{\link[stats]{glm}} functions have
+ been fixed properly. Note that the \pkg{VGAM} package by T. W. Yee
+ automatically comes with smart prediction.
+
+}
+%\usage{
+%lm()
+%glm()
+%ns()
+%bs()
+%poly()
+%scale()
+%vglm()
+%rrvglm()
+%vgam()
+%cao()
+%cqo()
+%uqo()
+%}
+\value{
+ Returns the usual object, but with one list/slot component called
+ \code{smart.prediction} containing any data-dependent parameters.
+}
+\section{Side Effects}{
+ The variables
+ \code{.max.smart},
+ \code{.smart.prediction} and
+ \code{.smart.prediction.counter}
+ are created while the model is being fitted.
+ In \R they are created in a new environment called \code{smartpredenv}.
+ In S-PLUS they are created in frame 1.
+ These variables are deleted after the model has been fitted.
+ However, in \R,
+ if there is an error in the model fitting function or the fitting
+ model is killed (e.g., by typing control-C) then these variables will
+ be left in \code{smartpredenv}. At the beginning of model fitting,
+ these variables are deleted if present in \code{smartpredenv}.
+
+ During prediction, the variables
+ \code{.smart.prediction} and
+ \code{.smart.prediction.counter}
+ are reconstructed and read by the smart functions when the model
+ frame is re-evaluated.
+ After prediction, these variables are deleted.
+
+ If the modelling function is used with argument \code{smart=FALSE}
+ (e.g., \code{vglm(..., smart=FALSE)}) then smart prediction will not
+ be used, and the results should match with the original \R or S-PLUS
+ functions.
+
+}
+\details{
+ \R version 1.6.0 introduced a partial fix for the prediction
+ problem because it does not work all the time,
+ e.g., for terms such as
+ \code{I(poly(x, 3))},
+ \code{poly(c(scale(x)), 3)},
+ \code{bs(scale(x), 3)},
+ \code{scale(scale(x))}.
+ See the examples below.
+ Smart prediction, however, will always work.
+
+% albeit, not so elegantly.
+
+ The basic idea is that the functions in the formula are now smart, and the
+ modelling functions make use of these smart functions. Smart prediction
+ works in two ways: using \code{\link{smart.expression}}, or using a
+ combination of \code{\link{put.smart}} and \code{\link{get.smart}}.
+
+}
+
+\author{T. W. Yee and T. J. Hastie}
+\note{
+ In S-PLUS you will need to load in the \pkg{smartpred} library with
+ the argument \code{first=T}, e.g.,
+ \code{library(smartpred, lib="./mys8libs", first=T)}.
+ Here, \code{mys8libs} is the name of a directory of installed packages.
+ To install the smartpred package in Linux/Unix, type something like
+ \code{Splus8 INSTALL -l ./mys8libs ./smartpred_0.8-2.tar.gz}.
+
+}
+
+%\note{
+% In \R and
+% prior to the \pkg{VGAM} package using name spaces, the location of the
+% variables was the workspace. The present use of \code{smartpredenv}
+% is superior, and is somewhat similar to the S-PLUS implementation in
+% that the user is more oblivious to its existence.
+%
+%}
+
+\seealso{
+ \code{\link{get.smart.prediction}},
+ \code{\link{get.smart}},
+ \code{\link{put.smart}},
+ \code{\link{smart.expression}},
+ \code{\link{smart.mode.is}},
+ \code{\link{setup.smart}},
+ \code{\link{wrapup.smart}}.
+ Commonly used data-dependent functions include
+ \code{\link[base]{scale}},
+ \code{\link[base]{poly}},
+ \code{\link[splines]{bs}},
+ \code{\link[splines]{ns}}.
+ In \R,
+ the functions \code{\link[splines]{bs}}
+ and \code{\link[splines]{ns}} are in the
+ \pkg{splines} package, and this library is automatically
+ loaded in because it contains compiled code that
+ \code{\link[splines]{bs}} and \code{\link[splines]{ns}} call.
+
+ The website \url{http://www.stat.auckland.ac.nz/~yee}
+ contains more information such as how to write a
+ smart function, and other technical details.
+
+ The functions \code{\link[VGAM]{vglm}},
+ \code{\link[VGAM]{vgam}},
+ \code{\link[VGAM]{rrvglm}}
+ and
+ \code{\link[VGAM]{cqo}}
+ in T. W. Yee's \pkg{VGAM}
+ package are examples of modelling functions that employ smart prediction.
+
+}
+\section{WARNING }{
+ In S-PLUS,
+ if the \code{"bigdata"} library is loaded then it is
+ \code{detach()}'ed. This is done because
+ \code{scale} cannot be made smart if \code{"bigdata"} is loaded
+ (it is loaded by default in the Windows version of
+ Splus 8.0, but not in Linux/Unix).
+ The function \code{\link[base]{search}} tells what is
+ currently attached.
+
+ In \R and S-PLUS the functions
+ \code{\link[splines]{predict.bs}} and
+ \code{predict.ns}
+ are not smart.
+ That is because they operate on objects that contain attributes only
+ and do not have list components or slots.
+ In \R the function
+ \code{\link[stats:poly]{predict.poly}} is not smart.
+
+}
+
+\examples{
+# Create some data first
+n = 20
+set.seed(86) # For reproducibility of the random numbers
+x = sort(runif(n))
+y = sort(runif(n))
+\dontrun{if(is.R()) library(splines) # To get ns() in R
+}
+
+# This will work for R 1.6.0 and later, but fail for S-PLUS
+fit = lm(y ~ ns(x, df=5))
+\dontrun{plot(x, y)
+lines(x, fitted(fit))
+newx = seq(0, 1, len=n)
+points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
+}
+
+# The following fails for R 1.6.x and later but works with smart prediction
+\dontrun{fit = lm(y ~ ns(scale(x), df=5))
+fit$smart.prediction
+plot(x, y)
+lines(x, fitted(fit))
+newx = seq(0, 1, len=n)
+points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
+}
+
+# The following requires the VGAM package to be loaded
+\dontrun{library(VGAM)
+fit = vlm(y ~ ns(scale(x), df=5))
+fit at smart.prediction
+plot(x, y)
+lines(x, fitted(fit))
+newx = seq(0, 1, len=n)
+points(newx, predict(fit, data.frame(x=newx)), type="b", col=2, err=-1)
+}
+}
+%\keyword{smart}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+
+
+%lm(..., smart=TRUE)
+%glm(..., smart=TRUE)
+%ns()
+%bs()
+%poly()
+%scale()
+%vglm(..., smart=TRUE)
+%rrvglm(..., smart=TRUE)
+%vgam(..., smart=TRUE)
+%cao(..., smart=TRUE)
+%cqo(..., smart=TRUE)
+%uqo(..., smart=TRUE)
+
+%library(smartpred, lib="./mys8libs", first=T)
+
+
+
+
diff --git a/man/snormUC.Rd b/man/snormUC.Rd
new file mode 100644
index 0000000..df1767e
--- /dev/null
+++ b/man/snormUC.Rd
@@ -0,0 +1,81 @@
+\name{snorm}
+\alias{snorm}
+\alias{dsnorm}
+%\alias{psnorm}
+%\alias{qsnorm}
+\alias{rsnorm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Skew-Normal Distribution }
+\description{
+ Density and
+% , distribution function, quantile function and
+ random generation
+ for the skew-normal distribution.
+
+}
+\usage{
+dsnorm(x, location = 0, scale = 1, shape = 0)
+%psnorm(q, lambda)
+%qsnorm(p, lambda)
+rsnorm(n, location = 0, scale = 1, shape = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{vector of quantiles.}
+% \item{x, q}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{location}{
+ The location parameter \eqn{\xi}{xi}. A vector.
+ }
+ \item{scale}{
+ The scale parameter \eqn{\omega}{w}. A positive vector.
+ }
+ \item{shape}{
+ The shape parameter. It is called \eqn{\alpha}{alpha} in
+ \code{\link{skewnormal1}}.
+ }
+}
+\details{
+ See \code{\link{skewnormal1}}, which currently only estimates the shape
+ parameter.
+ More generally here, \eqn{Z = \xi + \omega Y}{Z = xi + w * Y} where
+ \eqn{Y} has a standard skew-normal distribution (see \code{\link{skewnormal1}}),
+ \eqn{\xi}{xi} is the location parameter and
+ \eqn{\omega}{w} is the scale parameter.
+
+}
+\value{
+ \code{dsnorm} gives the density,
+% \code{psnorm} gives the distribution function,
+% \code{qsnorm} gives the quantile function, and
+ \code{rsnorm} generates random deviates.
+}
+\references{
+ \url{http://tango.stat.unipd.it/SN}.
+}
+
+\author{ T. W. Yee }
+\note{
+ The default values of all three parameters corresponds to the skew-normal
+ being the standard normal distribution.
+
+}
+
+\seealso{
+ \code{\link{skewnormal1}}.
+}
+\examples{
+\dontrun{
+n = 200 # grid resolution
+shape = 7
+x = seq(-4, 4, len=n)
+plot(x, dsnorm(x, shape=shape), type="l", col="blue", las=1, ylab="")
+abline(v=0, h=0, lty="dashed", col="darkgreen")
+lines(x, dnorm(x), col="red")
+legend(-3.5, 0.6, leg=c(paste("Blue=dsnorm(x, ", shape,")", sep=""),
+ "standard normal density"), lty=1, col=c("blue","red"))
+}
+}
+\keyword{distribution}
+
diff --git a/man/sratio.Rd b/man/sratio.Rd
new file mode 100644
index 0000000..7c9975b
--- /dev/null
+++ b/man/sratio.Rd
@@ -0,0 +1,124 @@
+\name{sratio}
+\alias{sratio}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Ordinal Regression with Stopping Ratios }
+\description{
+ Fits a stopping ratio logit/probit/cloglog/cauchit/...
+ regression model to an ordered (preferably) factor response.
+}
+\usage{
+sratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ In the following, the response \eqn{Y} is assumed to be a factor
+ with ordered values \eqn{1,2,\dots,M+1}, so that
+ \eqn{M} is the number of linear/additive predictors
+ \eqn{\eta_j}{eta_j}.
+
+ \item{link}{
+ Link function applied to the \eqn{M}
+ stopping ratio probabilities.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{parallel}{
+ A logical, or formula specifying which terms have
+ equal/unequal coefficients.
+
+ }
+ \item{reverse}{
+ Logical.
+ By default, the stopping ratios used are
+ \eqn{\eta_j = logit(P[Y=j|Y \geq j])}{eta_j = logit(P[Y=j|Y>=j])}
+ for \eqn{j=1,\dots,M}.
+ If \code{reverse} is \code{TRUE}, then
+ \eqn{\eta_j = logit(P[Y=j+1|Y \leq j+1])}{eta_j = logit(P[Y=j+1|Y<=j+1])}
+ will be used.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+ The default value means none are modelled as intercept-only terms.
+
+ }
+}
+\details{
+ There are a number of definitions for the \emph{continuation ratio}
+ in the literature. To make life easier, in the \pkg{VGAM} package,
+ we use \emph{continuation} ratios (see \code{\link{cratio}})
+ and \emph{stopping} ratios.
+ Continuation ratios deal with quantities such as
+ \code{logit(P[Y>j|Y>=j])}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+Simonoff, J. S. (2003)
+\emph{Analyzing Categorical Data},
+New York: Springer-Verlag.
+
+McCullagh, P. and Nelder, J. A. (1989)
+\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The response should be either a matrix of counts (with row sums that
+ are all positive), or a factor. In both cases, the \code{y} slot
+ returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
+ of counts.
+
+ For a nominal (unordered) factor response, the multinomial
+ logit model (\code{\link{multinomial}}) is more appropriate.
+
+ Here is an example of the usage of the \code{parallel} argument.
+ If there are covariates \code{x1}, \code{x2} and \code{x3}, then
+ \code{parallel = TRUE ~ x1 + x2 -1} and
+ \code{parallel = FALSE ~ x3} are equivalent. This would constrain
+ the regression coefficients for \code{x1} and \code{x2} to be
+ equal; those of the intercepts and \code{x3} would be different.
+}
+\section{Warning }{
+ No check is made to verify that the response is ordinal.
+}
+
+\seealso{
+ \code{\link{cratio}},
+ \code{\link{acat}},
+ \code{\link{cumulative}},
+ \code{\link{multinomial}},
+ \code{\link{pneumo}},
+ \code{\link{logit}},
+ \code{\link{probit}},
+ \code{\link{cloglog}},
+ \code{\link{cauchit}}.
+}
+
+\examples{
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal,mild,severe) ~ let, sratio(parallel=TRUE), pneumo))
+coef(fit, matrix=TRUE)
+constraints(fit)
+predict(fit)
+predict(fit, untransform=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/studentt.Rd b/man/studentt.Rd
new file mode 100644
index 0000000..8874460
--- /dev/null
+++ b/man/studentt.Rd
@@ -0,0 +1,75 @@
+\name{studentt}
+\alias{studentt}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Student t Distribution }
+\description{
+ Estimation of the degrees of freedom for a Student t distribution.
+}
+\usage{
+studentt(link.df = "loglog")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.df}{
+ Parameter link function for the degrees of freedom \eqn{\nu}{nu}.
+ See \code{\link{Links}} for more choices.
+ The default ensures the parameter is greater than unity.
+
+ }
+}
+\details{
+ The density function is
+ \deqn{f(y) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\nu \pi} \Gamma(\nu/2)}
+ \left(1 + \frac{y^2}{\nu} \right)^{-(\nu+1)/2}}{%
+ f(y) = (gamma((nu+1)/2) / (sqrt(nu*pi) gamma(nu/2))) *
+ (1 + y^2 / nu)^{-(nu+1)/2} }
+ for all real \eqn{y}.
+ Then \eqn{E(Y)=0} if \eqn{\nu>1}{nu>1} (returned as the fitted values),
+ and \eqn{Var(Y)= \nu/(\nu-2)}{Var(Y)= nu/(nu-2)}
+ for \eqn{\nu > 2}{nu > 2}.
+ When \eqn{\nu=1}{nu=1} then the Student \eqn{t}-distribution
+ corresponds to the standard Cauchy distribution.
+ The degrees of freedom is treated as a parameter to be estimated,
+ and as real and not integer.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+
+Student (1908)
+The probable error of a mean.
+\emph{Biometrika}, \bold{6}, 1--25.
+
+}
+
+\author{ T. W. Yee }
+\note{
+ A standard normal distribution corresponds to a \emph{t} distribution
+ with infinite degrees of freedom. Consequently, if the data is close
+ to normal, there may be convergence problems.
+
+}
+\seealso{
+ \code{\link{normal1}},
+ \code{\link{loglog}},
+ \code{\link[stats]{TDist}}.
+
+
+}
+\examples{
+n = 200
+y = rt(n, df=exp(2))
+fit = vglm(y ~ 1, studentt)
+coef(fit, matrix=TRUE)
+Coef(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
new file mode 100644
index 0000000..b882cd9
--- /dev/null
+++ b/man/tikuv.Rd
@@ -0,0 +1,114 @@
+\name{tikuv}
+\alias{tikuv}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Short-tailed Symmetric Distribution Family Function }
+\description{
+ Fits the short-tailed symmetric distribution of Tiku and Vaughan (1999).
+
+}
+\usage{
+tikuv(d, lmean="identity", lsigma="loge", isigma=NULL, zero=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{d}{
+ The \eqn{d} parameter. It must be a single numeric value less than 2.
+ Then \eqn{h=2-d>0} is another parameter.
+
+ }
+ \item{lmean, lsigma}{
+ Link functions for the mean and standard
+ deviation parameters of the usual univariate normal distribution
+ (see \bold{Details} below).
+ They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{isigma}{
+ Optional initial value for \eqn{\sigma}{sigma}.
+ A \code{NULL} means a value is computed internally.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The values must be from the set \{1,2\} corresponding
+ respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}.
+ If \code{zero=NULL} then all linear/additive predictors are modelled as
+ a linear combination of the explanatory variables.
+ For many data sets having \code{zero=2} is a good idea.
+
+ }
+}
+\details{
+ The short-tailed symmetric distribution of Tiku and Vaughan (1999)
+ has a probability density function that can be written
+ \deqn{f(y) = \frac{K}{\sqrt{2\pi} \sigma}
+ \left[ 1 + \frac{1}{2h}
+ \left( \frac{y-\mu}{\sigma} \right)^2
+ \right]^2
+ \exp\left( -\frac12
+ (y-\mu)^2 / \sigma^2 \right) }{%
+ f(y) = (K/(sqrt(2*pi)*sigma)) *
+ [1 + (1/(2*h)) * ((y-mu)/sigma)^2]^2 *
+ exp( -0.5 * (y-mu)^2/ sigma^2) }
+ where \eqn{h=2-d>0},
+ \eqn{K} is a function of \eqn{h},
+ \eqn{-\infty < y < \infty}{-Inf < y < Inf},
+ \eqn{\sigma > 0}{sigma > 0}.
+ The mean of \eqn{Y} is
+ \eqn{E(Y) = \mu}{E(Y) = mu} and this is returned as the fitted values.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+
+ Akkaya, A. D. and Tiku, M. L. (2006)
+ Short-tailed distributions and inliers.
+ \emph{Test}, \bold{15}(2), in press.
+
+ Tiku, M. L. and Vaughan, D. C. (1999)
+ A family of short-tailed symmetric distributions.
+ \emph{Technical report, McMaster University, Canada}.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ The density function is the product of a univariate normal
+ density and a polynomial in the response \eqn{y}.
+ The distribution is bimodal if \eqn{d>0}, else is unimodal.
+ A normal distribution arises as the limit as \eqn{d} approaches
+ \eqn{-\infty}{-Inf}, i.e., as \eqn{h} approaches \eqn{\infty}{Inf}.
+ Fisher scoring is implemented.
+ After fitting the value of \code{d} is stored as \code{@misc\$d}.
+
+}
+
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned,
+ e.g., when \eqn{d} is very close to 2 or approaches \code{-Inf}.
+
+}
+\seealso{
+ \code{\link{dtikuv}},
+ \code{\link{normal1}}.
+}
+
+\examples{
+m = 1.0; sigma = exp(0.5)
+sy = sort(y <- rtikuv(n=1000, d=1, m=m, s=sigma))
+fit = vglm(y ~ 1, fam=tikuv(d=1), trace=TRUE)
+coef(fit, mat=TRUE)
+(Cfit = Coef(fit))
+mean(y)
+\dontrun{
+hist(y, prob=TRUE)
+lines(sy, dtikuv(sy, d=1, m=Cfit[1], s=Cfit[2]), col="red")
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
new file mode 100644
index 0000000..d2873f5
--- /dev/null
+++ b/man/tikuvUC.Rd
@@ -0,0 +1,87 @@
+\name{Tikuv}
+\alias{Tikuv}
+\alias{dtikuv}
+\alias{ptikuv}
+\alias{qtikuv}
+\alias{rtikuv}
+\title{A Short-tailed Symmetric Distribution }
+\description{
+ Density, cumulative distribution function, quantile function and
+ random generation for
+ the short-tailed symmetric distribution of Tiku and Vaughan (1999).
+
+}
+\usage{
+dtikuv(x, d, mean=0, sigma=1)
+ptikuv(q, d, mean=0, sigma=1)
+qtikuv(p, d, mean=0, sigma=1, ...)
+rtikuv(n, d, mean=0, sigma=1, Smallno=1.0e-6)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ Must be a positive integer of length 1.}
+ \item{d, mean, sigma }{
+ arguments for the parameters of the distribution.
+ See \code{\link{tikuv}} for more details.
+ For \code{rtikuv}, arguments \code{mean} and \code{sigma} must be of
+ length 1.
+
+ }
+ \item{Smallno}{
+ Numeric, a small value used by the rejection method for determining
+ the lower and upper limits of the distribution.
+ That is, \code{ptikuv(L) < Smallno} and \code{ptikuv(U) > 1-Smallno}
+ where \code{L} and \code{U} are the lower and upper limits respectively.
+
+ }
+ \item{\ldots}{
+ Arguments that can be passed into \code{\link[stats]{uniroot}}.
+
+ }
+}
+\value{
+ \code{dtikuv} gives the density,
+ \code{ptikuv} gives the cumulative distribution function,
+ \code{qtikuv} gives the quantile function, and
+ \code{rtikuv} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{tikuv}} for more details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{tikuv}}.
+
+}
+\examples{
+\dontrun{
+x = seq(-5, 5, len=401)
+plot(x, dnorm(x), type="l", col="black", ylab="",
+ main="Black is standard normal, others are dtikuv(x, d)")
+lines(x, dtikuv(x, d=-10), col="red")
+lines(x, dtikuv(x, d=-1 ), col="blue")
+lines(x, dtikuv(x, d= 1 ), col="green")
+legend(2, 0.35, col=c("red","blue","green"), lty=rep(1, len=3),
+ legend=paste("d =", c(-10, -1, 1)))
+
+plot(x, pnorm(x), type="l", col="black", ylab="",
+ main="Black is standard normal, others are ptikuv(x, d)")
+lines(x, ptikuv(x, d=-10), col="red")
+lines(x, ptikuv(x, d=-1 ), col="blue")
+lines(x, ptikuv(x, d= 1 ), col="green")
+legend(2, 0.35, col=c("red","blue","green"), lty=rep(1, len=3),
+ legend=paste("d =", c(-10, -1, 1)))
+
+probs = seq(0.1, 0.9, by=0.1)
+ptikuv(qtikuv(p=probs, d= 1), d=1) - probs # Should be all 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/tobit.Rd b/man/tobit.Rd
new file mode 100644
index 0000000..d08890b
--- /dev/null
+++ b/man/tobit.Rd
@@ -0,0 +1,127 @@
+\name{tobit}
+\alias{tobit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Tobit Model }
+\description{
+ Fits a Tobit model to a univariate response.
+}
+\usage{
+tobit(Lower = 0, Upper = Inf, lmu="identity", lsd="loge",
+ imethod=1, zero=2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{Lower}{
+ Numeric of length 1, it is the value \eqn{L} described below.
+ Any value of the linear model
+ \eqn{x_i^T \beta}{x_i^T beta} that
+ is less than this value is assigned this value.
+ Hence this should be the smallest possible value in the response variable.
+
+ }
+ \item{Upper}{
+ Numeric of length 1, it is the value \eqn{U} described below.
+ Any value of the linear model
+ \eqn{x_i^T \beta}{x_i^T beta} that
+ is greater than this value is assigned this value.
+ Hence this should be the largest possible value in the response variable.
+
+ }
+ \item{lmu, lsd}{
+ Parameter link functions applied to the mean and
+ standard deviation parameters.
+ See \code{\link{Links}} for more choices.
+ The standard deviation is a positive quantity, therefore a log link
+ is its default.
+
+ }
+ \item{imethod}{
+ Initialization method. Either 1 or 2, this specifies
+ two methods for obtaining initial values for the parameters.
+
+ }
+ \item{zero}{
+ An integer vector, containing the value 1 or 2. If so,
+ the mean or standard deviation respectively are modelled as an intercept only.
+ Setting \code{zero=NULL} means both linear/additive predictors
+ are modelled as functions of the explanatory variables.
+
+ }
+}
+\details{
+ The Tobit model can be written
+ \deqn{y_i^* = x_i^T \beta + \varepsilon_i}{%
+ y_i^* = x_i^T beta + e_i}
+ where the \eqn{e_i \sim N(0,\sigma^2)}{e_i ~ N(0,sigma^2)} independently and
+ \eqn{i=1,\ldots,n}{i=1,...,n}.
+ However, we measure \eqn{y_i = y_i^*} only if \eqn{y_i^* > L} and
+ \eqn{y_i^* < U} for some
+ cutpoints \eqn{L} and \eqn{U}. Otherwise we let \eqn{y_i=L} or
+ \eqn{y_i=U}, whatever is closer.
+ The Tobit model is thus a multiple linear regression but with censored
+ responses if it is below or above certain cutpoints.
+
+ The defaults for \code{Lower} and \code{Upper} correspond to the
+ classical Tobit model. By default, the mean \eqn{x_i^T \beta}{x_i^T
+ beta} is the first linear/additive predictor, and the log of the
+ standard deviation is the second linear/additive predictor. The Fisher
+ information matrix for uncensored data is diagonal.
+ The fitted values are the estimates of \eqn{x_i^T \beta}{x_i^T beta}.
+
+}
+%\section{Warning }{
+% The working weight matrices for most observations
+% are not positive-definite. These responses would otherwise have a
+% fitted value much less than the cutpoint.
+%}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+}
+\references{
+Tobin, J. (1958).
+Estimation of relationships for limited dependent variables.
+\emph{Econometrica} \bold{26}, 24--36.
+}
+
+\author{ Thomas W. Yee }
+\note{
+ The response must be univariate. If there is no censoring then
+ \code{\link{normal1}} is recommended instead. Any value of the
+ response less than \code{Lower} or greater than \code{Upper} will
+ be assigned the value \code{Lower} and \code{Upper} respectively,
+ and a warning will be issued.
+ The fitted object has components \code{censoredL} and \code{censoredU}
+ in the \code{extra} slot which specifies whether observations
+ are censored in that direction.
+
+}
+\seealso{
+ \code{\link{normal1}},
+ \code{\link{cnormal1}}.
+ \code{\link{dcnormal1}}.
+}
+\examples{\dontrun{
+n = 1000
+x = seq(-1, 1, len=n)
+f = function(x) 1 + 4*x
+ystar = f(x) + rnorm(n)
+Lower = 1
+Upper = 4
+y = pmax(ystar, Lower)
+y = pmin(y, Upper)
+plot(x, y, main="Tobit model", las=1)
+legend(-0.9, 3, c("Truth", "Estimate"), col=c("Blue", "Red"), lwd=2)
+table(y==Lower | y==Upper) # How many censored values?
+fit = vglm(y ~ x, tobit(Lower=Lower, Upper=Upper), trace=TRUE)
+table(fit at extra$censoredL)
+table(fit at extra$censoredU)
+coef(fit, matrix=TRUE)
+lines(x, f(x), col="blue", lwd=2) # The truth
+lines(x, fitted(fit), col="red", lwd=2, lty="dashed") # The estimate
+summary(fit)
+}
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/tparetoUC.Rd b/man/tparetoUC.Rd
new file mode 100644
index 0000000..b25d3ec
--- /dev/null
+++ b/man/tparetoUC.Rd
@@ -0,0 +1,77 @@
+\name{Tpareto}
+\alias{Tpareto}
+\alias{dtpareto}
+\alias{ptpareto}
+\alias{qtpareto}
+\alias{rtpareto}
+\title{The Truncated Pareto Distribution}
+\description{
+ Density, distribution function, quantile function and random generation
+ for the upper truncated Pareto(I) distribution with parameters
+ \code{lower}, \code{upper} and \code{shape}.
+
+}
+\usage{
+dtpareto(x, lower, upper, shape)
+ptpareto(q, lower, upper, shape)
+qtpareto(p, lower, upper, shape)
+rtpareto(n, lower, upper, shape)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{lower, upper, shape}{
+ the lower, upper and shape (\eqn{k}) parameters.
+ If necessary, values are recycled.
+ }
+}
+\value{
+ \code{dtpareto} gives the density,
+ \code{ptpareto} gives the distribution function,
+ \code{qtpareto} gives the quantile function, and
+ \code{rtpareto} generates random deviates.
+}
+\references{
+ Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006).
+ Parameter estimation for the truncated Pareto distribution,
+ \emph{Journal of the American Statistical Association},
+ \bold{101}(473),
+ 270--277.
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{tpareto1}}, the \pkg{VGAM} family function
+ for estimating the parameter \eqn{k} by maximum likelihood estimation,
+ for the formula of the probability density function and the
+ range restrictions imposed on the parameters.
+
+}
+%%\note{
+%% The truncated Pareto distribution is
+%%}
+\seealso{
+ \code{\link{tpareto1}}.
+
+}
+\examples{
+lower = 3; upper = 8; k = exp(0.5)
+\dontrun{
+x = seq(lower, upper, len=200)
+plot(x, dtpareto(x, lo=lower, up=upper, shape=k), type="l",
+ main="Truncated Pareto density split into 10 equal areas")
+abline(h=0, col="blue", lty=2)
+qq = qtpareto(seq(0.1,0.9,by=0.1),lo=lower, up=upper,shape=k)
+lines(qq, dtpareto(qq, lo=lower, up=upper, shape=k),
+ col="purple", lty=3, type="h")
+}
+pp = seq(0.1,0.9,by=0.1)
+qq = qtpareto(pp, lo=lower, up=upper, shape=k)
+ptpareto(qq, lo=lower, up=upper, shape=k)
+qtpareto(ptpareto(qq, lo=lower, up=upper, shape=k),
+ lo=lower, up=upper, shape=k) - qq # Should be all 0
+}
+\keyword{distribution}
+
+
diff --git a/man/trplot.Rd b/man/trplot.Rd
new file mode 100644
index 0000000..f2ce821
--- /dev/null
+++ b/man/trplot.Rd
@@ -0,0 +1,81 @@
+\name{trplot}
+\alias{trplot}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Trajectory Plot }
+\description{
+ Generic function for a trajectory plot.
+}
+\usage{
+trplot(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which a trajectory plot is meaningful.
+ }
+ \item{\dots}{ Other arguments fed into the specific
+ methods function of the model. They usually are graphical
+ parameters, and sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+ }
+}
+\details{
+ Trajectory plots can be defined in different ways for different
+ models.
+ Many models have no such notion or definition.
+
+ For quadratic and additive ordination models they plot the
+ fitted values of two species against each other (more than
+ two is theoretically possible, but not implemented in this software
+ yet).
+}
+\value{
+ The value returned depends specifically on the methods
+ function invoked.
+}
+\references{
+
+Yee, T. W. (2005)
+On constrained and unconstrained
+quadratic ordination.
+\emph{Manuscript in preparation}.
+
+}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+
+\seealso{
+ \code{\link{trplot.qrrvglm}},
+ \code{\link{persp.qrrvglm}},
+ \code{\link{lvplot}}.
+}
+
+\examples{
+\dontrun{
+data(hspider)
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+set.seed(123)
+
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+
+nos = ncol(p1 at y)
+clr = 1:nos # OR (1:(nos+1))[-7] to omit yellow
+
+trplot(p1, whichSpecies=1:3, log="xy",
+ col=c("blue","red","green"), lwd=2, label=TRUE) -> ii
+legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
+ lwd=2, lty=1, col=c("blue","red","green"))
+abline(a=0, b=1, lty="dashed")
+
+
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
new file mode 100644
index 0000000..4f0f553
--- /dev/null
+++ b/man/trplot.qrrvglm.Rd
@@ -0,0 +1,167 @@
+\name{trplot.qrrvglm}
+\alias{trplot.qrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Trajectory plot for QRR-VGLMs }
+\description{
+Produces a trajectory plot for
+\emph{quadratic reduced-rank vector generalized linear models}
+(QRR-VGLMs).
+It is only applicable for rank-1 models with argument
+\code{Norrr = ~ 1}.
+}
+\usage{
+trplot.qrrvglm(object, whichSpecies = NULL,
+ add=FALSE, plot.it=TRUE,
+ label.sites = FALSE,
+ sitenames = dimnames(object at y)[[1]],
+ axes.equal = TRUE, cex = par()$cex,
+ col = 1:(nos * (nos - 1)/2), log = "",
+ lty = rep(par()$lty, len = nos * (nos - 1)/2),
+ lwd = rep(par()$lwd, len = nos * (nos - 1)/2),
+ tcol = rep(par()$col, len = nos * (nos - 1)/2),
+ xlab = NULL, ylab = NULL,
+ main = "", type = "b", check.ok=TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ Object of class \code{"qrrvglm"}, i.e., a CQO object. }
+ \item{whichSpecies}{ Integer or character vector specifying the
+ species to be plotted. If integer, these are the columns of the
+ response matrix. If character, these must match exactly with the
+ species' names.
+ The default is to use all species.
+ }
+ \item{add}{ Logical. Add to an existing plot? If \code{FALSE} (default),
+ a new plot is made. }
+ \item{plot.it}{ Logical. Plot it? }
+ \item{label.sites}{ Logical. If \code{TRUE}, the points on the
+ curves/trajectories are labelled with the \code{sitenames}. }
+ \item{sitenames}{ Character vector. The names of the sites. }
+ \item{axes.equal}{ Logical. If \code{TRUE}, the x- and y-axes
+ will be on the same scale.
+ }
+ \item{cex}{ Character expansion of the labelling of the site names.
+ Used only if \code{label.sites} is \code{TRUE}.
+ See the \code{cex} argument in \code{\link[graphics]{par}}.
+ }
+ \item{col}{Color of the lines.
+ See the \code{col} argument in \code{\link[graphics]{par}}.
+ Here, \code{nos} is the number of species.
+ }
+ \item{log}{ Character, specifying which (if any) of the x- and
+ y-axes are to be on a logarithmic scale.
+ See the \code{log} argument in \code{\link[graphics]{par}}.
+ }
+ \item{lty}{ Line type.
+ See the \code{lty} argument of \code{\link[graphics]{par}}.
+ }
+ \item{lwd}{ Line width.
+ See the \code{lwd} argument of \code{\link[graphics]{par}}.
+ }
+ \item{tcol}{Color of the text for the site names.
+ See the \code{col} argument in \code{\link[graphics]{par}}.
+ Used only if \code{label.sites} is \code{TRUE}.
+ }
+ \item{xlab}{Character caption for the x-axis.
+ By default, a suitable caption is found.
+ See the \code{xlab} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+ }
+ \item{ylab}{Character caption for the y-axis.
+ By default, a suitable caption is found.
+ See the \code{xlab} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+ }
+ \item{main}{ Character, giving the title of the plot.
+ See the \code{main} argument in \code{\link[graphics]{plot}}
+ or \code{\link[graphics]{title}}.
+ }
+ \item{type}{ Character, giving the type of plot. A common
+ option is to use \code{type="l"} for lines only.
+ See the \code{type} argument of \code{\link[graphics]{plot}}.
+ }
+ \item{check.ok}{ Logical. Whether a check is performed to see
+ that \code{Norrr = ~ 1} was used.
+ It doesn't make sense to have a trace plot unless this is so.
+ }
+ \item{\dots}{ Arguments passed into the \code{plot} function
+ when setting up the entire plot. Useful arguments here include
+ \code{xlim} and \code{ylim}.
+ }
+}
+\details{
+ A trajectory plot plots the fitted values of a `second' species
+ against a `first' species. The argument \code{whichSpecies} must
+ therefore contain at least two species. By default, all of the
+ species that were fitted in \code{object} are plotted.
+ With more than a few species
+ the resulting plot will be very congested, and so it is recommended
+ that only a few species be selected for plotting.
+
+In the above, \eqn{M} is the number of species selected for plotting,
+so there will be \eqn{M(M-1)/2}{M*(M-1)/2} curves/trajectories in total.
+
+A trajectory plot will be fitted only if \code{Norrr = ~ 1} because
+otherwise the trajectory will not be a smooth function of the latent
+variables.
+
+}
+\value{
+ A list with the following components.
+ \item{species.names}{
+ A matrix of characters giving the `first' and `second' species. The
+ number of different combinations of species is given by the number
+ of rows. This is useful for creating a legend.
+
+ }
+ \item{sitenames}{A character vector of site names, sorted by
+ the latent variable (from low to high).
+ }
+}
+\references{
+
+Yee, T. W. (2005)
+On constrained and unconstrained
+quadratic ordination.
+\emph{Manuscript in preparation}.
+
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ Plotting the axes on a log scale is often a good idea.
+ The use of \code{xlim} and \code{ylim} to control the axis limits
+ is also a good idea, so as to limit the extent of the curves at low
+ abundances or probabilities.
+ Setting \code{label.sites=TRUE} is a good idea only if the number of
+ sites is small, otherwise there is too much clutter.
+
+}
+
+\seealso{
+\code{\link{cqo}},
+\code{\link[graphics]{par}},
+\code{\link[graphics]{title}}.
+}
+
+\examples{\dontrun{
+data(hspider)
+set.seed(111) # This leads to the global solution
+# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = poissonff, data = hspider, trace=FALSE)
+
+trplot(p1, whichSpecies=1:3, log="xy", type="b", lty=1,
+ main="Trajectory plot of three hunting spiders species",
+ col=c("blue","red","green"), lwd=2, label=TRUE) -> ii
+legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
+ lwd=2, lty=1, col=c("blue","red","green"))
+abline(a=0, b=1, lty="dashed") # Useful reference line
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{hplot}
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
new file mode 100644
index 0000000..362a9fb
--- /dev/null
+++ b/man/undocumented-methods.Rd
@@ -0,0 +1,222 @@
+\name{undocumented-methods}
+\docType{methods}
+%\alias{ccoef,ANY-method}
+%\alias{ccoef-method}
+%
+%
+%
+\alias{attrassign,lm-method}
+\alias{calibrate,qrrvglm-method}
+\alias{calibrate,cao-method}
+\alias{calibrate,uqo-method}
+\alias{cdf,vglm-method}
+\alias{cdf,vgam-method}
+\alias{coefficients,cao-method}
+\alias{coefficients,vlm-method}
+\alias{coefficients,vglm-method}
+\alias{coefficients,qrrvglm-method}
+\alias{coefficients,uqo-method}
+\alias{coefficients,vsmooth.spline-method}
+\alias{coefficients,vsmooth.spline.fit-method}
+\alias{Coefficients,vlm-method}
+\alias{coef,cao-method}
+\alias{coef,vlm-method}
+\alias{coef,vglm-method}
+\alias{coef,qrrvglm-method}
+\alias{coef,uqo-method}
+\alias{coef,vsmooth.spline-method}
+\alias{coef,vsmooth.spline.fit-method}
+\alias{Coef,cao-method}
+\alias{Coef,vlm-method}
+\alias{Coef,qrrvglm-method}
+\alias{Coef,rrvglm-method}
+\alias{Coef,uqo-method}
+\alias{constraints,vlm-method}
+\alias{deplot,vglm-method}
+\alias{deplot,vgam-method}
+\alias{deviance,qrrvglm-method}
+\alias{deviance,vlm-method}
+\alias{deviance,vglm-method}
+\alias{deviance,uqo-method}
+\alias{df.residual,vlm-method}
+\alias{effects,vlm-method}
+\alias{fitted.values,qrrvglm-method}
+\alias{fitted.values,vlm-method}
+\alias{fitted.values,vglm-method}
+\alias{fitted.values,uqo-method}
+\alias{fitted.values,vsmooth.spline-method}
+\alias{fitted,qrrvglm-method}
+\alias{fitted,vlm-method}
+\alias{fitted,vglm-method}
+\alias{fitted,uqo-method}
+\alias{fitted,vsmooth.spline-method}
+\alias{guplot,numeric-method}
+\alias{guplot,vlm-method}
+\alias{model.frame,vlm-method}
+\alias{plot,cao,ANY-method}
+\alias{plot,vlm,ANY-method}
+\alias{plot,vglm,ANY-method}
+\alias{plot,vgam,ANY-method}
+\alias{plot,qrrvglm,ANY-method}
+\alias{predictors,vglm-method}
+\alias{rlplot,vglm-method}
+\alias{terms,vlm-method}
+\alias{is.bell,uqo-method}
+\alias{is.bell,qrrvglm-method}
+\alias{is.bell,rrvglm-method}
+\alias{is.bell,vlm-method}
+\alias{is.bell,cao-method}
+\alias{is.bell,Coef.qrrvglm-method}
+\alias{logLik,vlm-method}
+\alias{logLik,summary.vglm-method}
+\alias{lvplot,cao-method}
+\alias{lvplot,qrrvglm-method}
+\alias{lvplot,rrvglm-method}
+\alias{lvplot,uqo-method}
+\alias{lv,cao-method}
+\alias{lv,Coef.cao-method}
+\alias{lv,rrvglm-method}
+\alias{lv,qrrvglm-method}
+\alias{lv,Coef.rrvglm-method}
+\alias{lv,Coef.qrrvglm-method}
+\alias{lv,uqo-method}
+\alias{Max,qrrvglm-method}
+\alias{Max,Coef.qrrvglm-method}
+\alias{Max,uqo-method}
+\alias{meplot,numeric-method}
+\alias{meplot,vlm-method}
+\alias{model.matrix,qrrvglm-method}
+\alias{model.matrix,vlm-method}
+\alias{Opt,qrrvglm-method}
+\alias{Opt,Coef.qrrvglm-method}
+\alias{Opt,uqo-method}
+\alias{persp,cao-method}
+\alias{persp,qrrvglm-method}
+\alias{persp,uqo-method}
+\alias{plot,uqo,ANY-method}
+\alias{plot,vsmooth.spline,ANY-method}
+\alias{predict,cao-method}
+\alias{predict,qrrvglm-method}
+\alias{predict,vgam-method}
+\alias{predict,vglm-method}
+\alias{predict,rrvglm-method}
+\alias{predict,vlm-method}
+\alias{predict,uqo-method}
+\alias{predict,vsmooth.spline-method}
+\alias{predict,vsmooth.spline.fit-method}
+\alias{print,vglmff-method}
+\alias{print,Coef.cao-method}
+\alias{print,summary.cao-method}
+\alias{print,qrrvglm-method}
+\alias{print,Coef.qrrvglm-method}
+\alias{print,rrvglm-method}
+\alias{print,summary.qrrvglm-method}
+\alias{print,Coef.rrvglm-method}
+\alias{print,vlm-method}
+\alias{print,vglm-method}
+\alias{print,vgam-method}
+\alias{print,summary.rrvglm-method}
+\alias{print,summary.vgam-method}
+\alias{print,summary.vglm-method}
+\alias{print,summary.vlm-method}
+\alias{print,uqo-method}
+\alias{print,Coef.uqo-method}
+\alias{print,summary.uqo-method}
+\alias{print,vsmooth.spline-method}
+\alias{qtplot,vglm-method}
+\alias{qtplot,vgam-method}
+\alias{residuals,qrrvglm-method}
+\alias{residuals,vlm-method}
+\alias{residuals,vglm-method}
+\alias{residuals,vgam-method}
+\alias{residuals,uqo-method}
+\alias{residuals,vsmooth.spline-method}
+\alias{resid,vlm-method}
+\alias{resid,vglm-method}
+\alias{resid,vgam-method}
+\alias{resid,uqo-method}
+\alias{resid,vsmooth.spline-method}
+\alias{show,vglmff-method}
+\alias{show,Coef.cao-method}
+\alias{show,summary.cao-method}
+\alias{show,qrrvglm-method}
+\alias{show,Coef.qrrvglm-method}
+\alias{show,rrvglm-method}
+\alias{show,summary.qrrvglm-method}
+\alias{show,Coef.rrvglm-method}
+\alias{show,vlm-method}
+\alias{show,vglm-method}
+\alias{show,vgam-method}
+\alias{show,summary.rrvglm-method}
+\alias{show,summary.vgam-method}
+\alias{show,summary.vglm-method}
+\alias{show,summary.vlm-method}
+\alias{show,uqo-method}
+\alias{show,Coef.uqo-method}
+\alias{show,summary.uqo-method}
+\alias{show,vsmooth.spline-method}
+\alias{summary,grc-method}
+\alias{summary,cao-method}
+\alias{summary,qrrvglm-method}
+\alias{summary,rrvglm-method}
+\alias{summary,vgam-method}
+\alias{summary,vglm-method}
+\alias{summary,vlm-method}
+\alias{summary,uqo-method}
+\alias{Tol,cao-method}
+\alias{Tol,qrrvglm-method}
+\alias{Tol,Coef.qrrvglm-method}
+\alias{Tol,uqo-method}
+\alias{Tol,Coef.uqo-method}
+\alias{trplot,qrrvglm-method}
+\alias{trplot,uqo-method}
+\alias{vcov,rrvglm-method}
+\alias{vcov,qrrvglm-method}
+\alias{vcov,vlm-method}
+\alias{vcov,vglm-method}
+\alias{vplot,factor-method}
+\alias{vplot,list-method}
+\alias{vplot,matrix-method}
+\alias{vplot,numeric-method}
+\alias{weights,vlm-method}
+\alias{weights,vglm-method}
+%
+%
+% This does not work (need one line for each one):
+%\alias{trplot,qrrvglm,uqo-method}
+%
+%
+%
+\title{ Undocumented Methods Functions }
+\description{
+ Lots of undocumented methods functions are aliased here.
+ In the \pkg{VGAM} package there are currently many
+ objects/methods/classes which are currently internal and/or
+ undocumented. The help file suppresses the warnings when the package is
+ 'CHECK'ed.
+
+}
+%\usage{
+% \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm}(object, ...)
+%}
+
+\section{Methods}{
+ There are many methods and these will be documented over time.
+
+\describe{
+
+\item{object}{
+ This argument is often used, and it is the primary object from which the
+ function operates on.
+
+}
+}
+}
+\keyword{methods}
+\keyword{classes}
+%\keyword{ ~~ other possible keyword(s)}
+\keyword{models}
+\keyword{regression}
+
+
+
diff --git a/man/uqo.Rd b/man/uqo.Rd
new file mode 100644
index 0000000..77d4fa7
--- /dev/null
+++ b/man/uqo.Rd
@@ -0,0 +1,269 @@
+\name{uqo}
+\alias{uqo}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Fitting Unconstrained Quadratic Ordination (UQO)}
+\description{
+ An \emph{unconstrained quadratic ordination} (UQO)
+ (equivalently, noncanonical Gaussian ordination) model
+ is fitted using the
+ \emph{quadratic unconstrained vector generalized linear model}
+ (QU-VGLM) framework.
+ In this documentation, \eqn{M} is the number of linear predictors
+ or species.
+
+}
+\usage{
+uqo(formula, family, data = list(), weights = NULL, subset = NULL,
+ na.action = na.fail, etastart = NULL, mustart = NULL,
+ coefstart = NULL, control = uqo.control(...), offset = NULL,
+ method = "uqo.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL, extra = NULL,
+ qr.arg = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{formula}{ a symbolic description of the model to be fit.
+ Since there is no \eqn{x_2} vector by definition, the RHS of
+ the formula has all terms belonging to the \eqn{x_1} vector.
+
+ }
+ \item{family}{ a function of class \code{"vglmff"} describing
+ what statistical model is to be fitted. Currently two families
+ are supported: Poisson and binomial.
+ }
+ \item{data}{ an optional data frame containing the variables
+ in the model. By default the variables are taken from
+ \code{environment(formula)}, typically the environment from
+ which \code{uqo} is called.
+ }
+ \item{weights}{ an optional vector or matrix of (prior) weights
+ to be used in the fitting process.
+ This argument should not be used.
+
+}
+ \item{subset}{ an optional logical vector specifying a subset of
+ observations to
+ be used in the fitting process.
+ }
+ \item{na.action}{
+ a function which indicates what should happen when
+ the data contain \code{NA}s.
+ The default is set by the \code{na.action} setting
+ of \code{\link[base]{options}}, and is \code{na.fail}
+ if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+ }
+ \item{etastart}{ starting values for the linear predictors.
+ It is a \eqn{M}-column matrix. If \eqn{M=1} then it may be a vector.
+ }
+ \item{mustart}{ starting values for the
+ fitted values. It can be a vector or a matrix.
+ Some family functions do not make use of this argument.
+ }
+ \item{coefstart}{ starting values for the
+ coefficient vector. }
+ \item{control}{ a list of parameters for controlling the fitting process.
+ See \code{\link{uqo.control}} for details.
+ }
+ \item{offset}{ a vector or \eqn{M}-column matrix of offset values.
+ This argument should not be used.
+ }
+ \item{method}{
+ the method to be used in fitting the model.
+ The default (and presently only) method \code{uqo.fit}
+ uses iteratively reweighted least squares (IRLS).
+ }
+ \item{model}{ a logical value indicating whether the
+ \emph{model frame}
+ should be assigned in the \code{model} slot. }
+ \item{x.arg, y.arg}{ logical values indicating whether
+ the model matrix and response matrix used in the fitting
+ process should be assigned in the \code{x} and \code{y} slots.
+ Note the model matrix is the LM model matrix.
+
+ }
+ \item{contrasts}{ an optional list. See the \code{contrasts.arg}
+ of \code{\link{model.matrix.default}}. }
+ \item{constraints}{ an optional list of constraint matrices.
+ This argument should not be used.
+ }
+ \item{extra}{ an optional list with any extra information that
+ might be needed by the family function.
+ }
+ \item{qr.arg}{ logical value indicating whether
+ the slot \code{qr}, which returns the QR decomposition of the
+ VLM model matrix, is returned on the object.
+ This argument should not be set \code{TRUE}.
+ }
+ \item{\dots}{ further arguments passed into \code{\link{uqo.control}}. }
+
+}
+
+\details{
+ \emph{Unconstrained quadratic ordination} models fit symmetric bell-shaped
+ response curves/surfaces to response data, but the latent variables
+ are largely free parameters and are not constrained to be linear
+ combinations of the environmental variables. This poses a
+ difficult optimization problem. The current algorithm is very simple
+ and will often fail (even for \code{Rank=1}) but hopefully this will
+ be improved in the future.
+
+ The central formula is given by
+ \deqn{\eta = B_1^T x_1 + A \nu +
+ \sum_{m=1}^M (\nu^T D_m \nu) e_m}{%
+ eta = B_1^T x_1 + A nu +
+ sum_{m=1}^M (nu^T D_m nu) e_m}
+ where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept),
+ \eqn{\nu}{nu} is a \eqn{R}-vector of latent variables, \eqn{e_m} is
+ a vector of 0s but with a 1 in the \eqn{m}th position.
+ The \eqn{\eta}{eta} are a vector of linear/additive predictors,
+ e.g., the \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m =
+ log(E[Y_m])} for the \eqn{m}th species. The matrices \eqn{B_1},
+ \eqn{A}, and \eqn{D_m} are estimated from the data, i.e.,
+ contain the regression coefficients. Also, \eqn{\nu}{nu} is
+ estimated.
+ The tolerance matrices satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s =
+ -(0.5 D_s^(-1)}. Many important UQO details are directly related to
+ arguments in \code{\link{uqo.control}};
+ see also \code{\link{cqo}} and \code{\link{qrrvglm.control}}.
+
+Currently, only Poisson and binomial \pkg{VGAM} family functions are
+implemented for this function, and dispersion parameters for these are
+assumed known. Thus the Poisson is catered for by
+\code{\link{poissonff}}, and the binomial by \code{\link{binomialff}}.
+Those beginning with \code{"quasi"} have dispersion parameters that are
+estimated for each species, hence will give an error message here.
+
+}
+\value{
+ An object of class \code{"uqo"}
+ (this may change to \code{"quvglm"} in the future).
+}
+\references{
+
+Yee, T. W. (2004)
+A new technique for maximum-likelihood
+canonical Gaussian ordination.
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
+
+Yee, T. W. (2005)
+On constrained and unconstrained
+quadratic ordination.
+\emph{Manuscript in preparation}.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{Thomas W. Yee}
+
+\note{
+
+ The site scores are centered.
+ When \eqn{R>1}, they are uncorrelated and should be unique up
+ to a rotation.
+
+The argument \code{Bestof} in \code{\link{uqo.control}} controls
+the number of models fitted (each uses different starting values) to
+the data. This argument is important because convergence may be to a
+\emph{local} solution rather than the \emph{global} solution. Using more
+starting values increases the chances of finding the global solution.
+Local solutions arise because the optimization problem is highly
+nonlinear.
+
+In the example below, a CQO model is fitted and used for providing
+initial values for a UQO model.
+
+}
+\section{Warning }{
+
+ Local solutions are not uncommon when fitting UQO models. To increase
+ the chances of obtaining the global solution, set
+ \code{ITolerances=TRUE} or \code{EqualTolerances=TRUE} and increase
+ the value of the argument \code{Bestof} in \code{\link{uqo.control}}.
+ For reproducibility of the results, it pays to set a different random
+ number seed before calling \code{uqo} (the function
+ \code{\link[base:Random]{set.seed}} does this).
+
+The function \code{uqo} is very sensitive to initial values, and there
+is a lot of room for improvement here.
+
+UQO is computationally expensive. It pays to keep the rank to no more
+than 2, and 1 is much preferred over 2.
+The data needs to conform closely to the statistical model.
+
+Currently there is a bug with the argument \code{Crow1positive}
+in \code{\link{uqo.control}}. This argument might be interpreted
+as controlling the sign of the first site score, but currently
+this is not done.
+
+}
+
+\seealso{
+ \code{\link{uqo.control}},
+ \code{\link{cqo}},
+ \code{\link{qrrvglm.control}},
+ \code{\link{rcqo}},
+% \code{\link{cao}},
+\code{\link{poissonff}},
+\code{\link{binomialff}},
+ \code{Coef.uqo},
+ \code{lvplot.uqo},
+ \code{persp.uqo},
+ \code{trplot.uqo},
+ \code{vcov.uqo},
+ \code{\link[base:Random]{set.seed}},
+ \code{\link{hspider}}.
+}
+\examples{
+data(hspider)
+set.seed(123) # This leads to the global solution
+hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITolerances = TRUE, fam = poissonff, data = hspider,
+ Crow1positive=TRUE, Bestof=3, trace=FALSE)
+if(deviance(p1) > 1589.0) stop("suboptimal fit obtained")
+
+set.seed(111)
+up1 = uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~ 1,
+ family = poissonff, data = hspider,
+ ITolerances = TRUE,
+ Crow1positive = TRUE, lvstart = lv(p1))
+if(deviance(up1) > 1310.0) stop("suboptimal fit obtained")
+
+\dontrun{
+nos = ncol(up1 at y) # Number of species
+clr = (1:(nos+1))[-7] # to omit yellow
+lvplot(up1, las=1, y=TRUE, pch=1:nos, scol=clr, lcol=clr,
+ pcol=clr, llty=1:nos, llwd=2)
+legend(x=2, y=135, dimnames(up1 at y)[[2]], col=clr, lty=1:nos,
+ lwd=2, merge=FALSE, ncol=1, x.inter=4.0, bty="l", cex=0.9)
+
+# Compare the site scores between the two models
+plot(lv(p1), lv(up1), xlim=c(-3,4), ylim=c(-3,4), las=1)
+abline(a=0, b=-1, lty=2, col="blue", xpd=FALSE)
+cor(lv(p1, ITol=TRUE), lv(up1))
+
+# Another comparison between the constrained & unconstrained models
+# The signs are not right so they are similar when reflected about 0
+par(mfrow=c(2,1))
+persp(up1, main="Red/Blue are the constrained/unconstrained models",
+ label=TRUE, col="blue", las=1)
+persp(p1, add=FALSE, col="red")
+1-pchisq(deviance(p1) - deviance(up1), df=52-30)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+% 6/10/06; when the bug is fixed:
+%persp(p1, add=TRUE, col="red")
+
+
diff --git a/man/uqo.control.Rd b/man/uqo.control.Rd
new file mode 100644
index 0000000..7d93e41
--- /dev/null
+++ b/man/uqo.control.Rd
@@ -0,0 +1,266 @@
+\name{uqo.control}
+\alias{uqo.control}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Control Function for UQO models }
+\description{
+ Algorithmic constants and parameters for an
+ unconstrained quadratic ordination (UQO) model, by fitting a
+ \emph{quadratic unconstrained vector generalized additive model}
+ (QU-VGLM), are set using this function.
+ It is the control function of \code{\link{uqo}}.
+
+}
+\usage{
+uqo.control(Rank=1, Bestof = if(length(lvstart) &&
+ !jitter.sitescores) 1 else 10, CA1 = FALSE, Crow1positive
+ = TRUE, epsilon = 1.0e-07, EqualTolerances = ITolerances,
+ Etamat.colmax = 10, GradientFunction=TRUE, Hstep = 0.001,
+ isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ ITolerances = FALSE, lvstart = NULL, jitter.sitescores
+ = FALSE, maxitl = 40, Maxit.optim = 250, MUXfactor =
+ rep(3, length=Rank), optim.maxit = 20, nRmax = 250,
+ SD.sitescores = 1.0, SmallNo = 5.0e-13, trace = TRUE,
+ Use.Init.Poisson.QO=TRUE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{Rank}{ The numerical rank \eqn{R} of the model,
+ i.e., the number of latent variables or ordination axes.
+ Currently only \eqn{R=1} is recommended.
+ }
+ \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is
+ returned. This argument helps guard against local solutions by
+ (hopefully) finding the global solution from many fits.
+ The argument has value 1 if an initial value for the site scores is
+ inputted using \code{lvstart}.
+
+ }
+ \item{CA1}{
+ Logical. If \code{TRUE} the site scores from a correspondence analysis
+ (CA) are computed and used on the first axis as initial values.
+ Both \code{CA1} and \code{Use.Init.Poisson.QO} cannot both be
+ \code{TRUE}.
+
+ }
+ \item{Crow1positive}{
+ Logical vector of length \code{Rank} (recycled if necessary):
+ are the elements of the first row of the latent variable matrix
+ \eqn{\nu}{nu} positive?
+ For example, if \code{Rank} is 2, then specifying
+ \code{Crow1positive=c(FALSE, TRUE)} will force the first
+ site score's first element to be negative, and the first site
+ score's second element to be positive. Note that there is no
+ \eqn{C} matrix with UQO, but the argument's name comes from
+ \code{\link{qrrvglm.control}} and is left unchanged for convenience.
+
+ }
+ \item{epsilon}{
+ Positive numeric. Used to test for convergence for GLMs fitted
+ in FORTRAN. Larger values mean a loosening of the convergence
+ criterion.
+
+ }
+ \item{EqualTolerances}{
+ Logical indicating whether each (quadratic) predictor will have
+ equal tolerances. Setting \code{EqualTolerances=TRUE} can
+ help avoid numerical problems, especially with binary data.
+ Note that the estimated (common) tolerance matrix may or may not be
+ positive-definite. If it is, then it can be scaled to the \eqn{R}
+ x \eqn{R} identity matrix. Setting \code{ITolerances=TRUE} will
+ fit a common \eqn{R} x \eqn{R} identity matrix as the tolerance
+ matrix to the data, but this is model-driven rather than being
+ data-driven because it \emph{forces} bell-shaped curves/surfaces
+ onto the data. If the estimated (common) tolerance matrix happens
+ to be positive-definite, then this model is essentially equivalent
+ to the model with \code{ITolerances=TRUE}.
+ See \bold{Details} in \code{\link{cqo}} and \code{\link{qrrvglm.control}}
+ for more details.
+
+ }
+ \item{Etamat.colmax}{
+ Positive integer, no smaller than \code{Rank}. Controls the amount
+ of memory used by \code{.Init.Poisson.QO()}. It is the maximum
+ number of columns allowed for the pseudo-response and its weights.
+ In general, the larger the value, the better the initial value.
+ Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
+ }
+
+ \item{GradientFunction}{
+ Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr} is
+ used or not, i.e., to compute gradient values. The default value is
+ usually faster on most problems.
+
+ }
+ \item{Hstep}{
+ Positive value. Used as the step size in the finite difference
+ approximation to the derivatives by \code{\link[stats]{optim}}.
+
+ }
+ \item{isdlv}{
+ Initial standard deviations for the latent variables (site scores).
+ Numeric, positive and of length \eqn{R} (recycled if necessary).
+ This argument is used only if \code{ITolerances=TRUE}. Used by
+ \code{.Init.Poisson.QO()} to obtain initial values for the constrained
+ coefficients \eqn{C} adjusted to a reasonable value. It adjusts the
+ spread of the site scores relative to a common species tolerance of 1
+ for each ordination axis. A value between 0.5 and 10 is recommended;
+ a value such as 10 means that the range of the environmental space is
+ very large relative to the niche width of the species. The successive
+ values should decrease because the first ordination axis should have
+ the most spread of site scores, followed by the second ordination
+ axis, etc.
+
+ }
+ \item{ITolerances}{
+ Logical. If \code{TRUE} then the (common) tolerance matrix is
+ the \eqn{R} x \eqn{R} identity matrix by definition. Note that
+ \code{ITolerances=TRUE} implies \code{EqualTolerances=TRUE}, but
+ not vice versa. Internally, the quadratic terms will be treated
+ as offsets (in GLM jargon) and so the models can potentially be
+ fitted very efficiently.
+ See \bold{Details} in \code{\link{cqo}} and \code{\link{qrrvglm.control}}
+ for more details.
+ more details. The success of \code{ITolerances=TRUE} often depends
+ on suitable values for \code{isdlv} and/or \code{MUXfactor}.
+
+ }
+ \item{lvstart}{
+ Optional matrix of initial values of the site scores. If given, the
+ matrix must be \eqn{n} by \eqn{R}, where \eqn{n} is the number
+ of sites and \eqn{R} is the rank. This argument overrides the
+ arguments \code{Use.Init.Poisson.QO} and \code{CA1}.
+ Good possibilities for \code{lvstart} are the site scores from a
+ constrained ordination, e.g., from \code{\link{cqo}}.
+
+ }
+ \item{jitter.sitescores}{ Logical.
+ If \code{TRUE} the initial values for the site scores are jittered
+ to add a random element to the starting values.
+
+ }
+
+ \item{maxitl}{
+ Positive integer. Number of iterations allowed for the IRLS algorithm
+ implemented in the compiled code.
+
+ }
+ \item{Maxit.optim}{
+ Positive integer. Number of iterations given to the function
+ \code{\link[stats]{optim}} at each of the \code{optim.maxit}
+ iterations.
+
+ }
+ \item{MUXfactor}{
+ Multiplication factor for detecting large offset values. Numeric,
+ positive and of length \eqn{R} (recycled if necessary). This argument
+ is used only if \code{ITolerances=TRUE}. Offsets are \eqn{-0.5}
+ multiplied by the sum of the squares of all \eqn{R} latent variable
+ values. If the latent variable values are too large then this will
+ result in numerical problems. By too large, it is meant that the
+ standard deviation of the latent variable values are greater than
+ \code{MUXfactor[r] * isdlv[r]} for \code{r=1:Rank} (this is why
+ centering and scaling all the numerical predictor variables in
+ \eqn{x_2} is recommended). A value about 3 or 4 is recommended.
+ If failure to converge occurs, try a slightly lower value.
+
+}
+ \item{optim.maxit}{
+ Positive integer. Number of times \code{\link[stats]{optim}}
+ is invoked.
+
+% At iteration \code{i}, the \code{i}th value of \code{Maxit.optim}
+% is fed into \code{\link[stats]{optim}}.
+
+ }
+ \item{nRmax}{
+ Positive integer. If the number of parameters making up the latent
+ variable values (\eqn{n} multiplied by \eqn{R}) is greater than this
+ argument then a conjugate-gradients algorithm is used, otherwise a
+ quasi-Newton algorithm is used by \code{\link[stats]{optim}}. The
+ conjugate-gradients method is more suitable when the number of
+ parameters is large because it requires less memory.
+
+ }
+ \item{SD.sitescores}{ Numeric. Standard deviation of the
+ initial values of the site scores, which are generated from
+ a normal distribution.
+ }
+% \item{Dzero}{ Integer vector specifying which squared terms
+% are to be zeroed. These linear predictors will correspond to
+% a RR-VGLM.
+% The values must be elements from the set \{1,2,\ldots,\eqn{M}\}.
+% Used only if \code{Quadratic=TRUE} and \code{FastAlgorithm=FALSE}.
+% }
+ \item{SmallNo}{ Positive numeric between \code{.Machine$double.eps} and
+ \code{0.0001}.
+ Used to avoid under- or over-flow in the IRLS algorithm.
+ }
+ \item{trace}{ Logical indicating if output should be produced for
+ each iteration.
+ }
+% \item{Kinit}{ Initial values for the index parameters \code{k} in the
+% negative binomial distribution (one per species).
+% In general, a smaller number is preferred over a larger number.
+% The vector is recycled to the number of responses (species).
+% }
+ \item{Use.Init.Poisson.QO}{
+ Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO()} is
+ used to obtain initial values for the site scores. If \code{FALSE}
+ then random numbers are used instead. Both \code{CA1} and
+ \code{Use.Init.Poisson.QO} cannot both be \code{TRUE}.
+
+ }
+ \item{\dots}{ Ignored at present. }
+}
+\details{
+ The algorithm currently used by \code{\link{uqo}} is unsophisticated
+ and fails often. Improvements will hopefully be made soon.
+
+ See \code{\link{cqo}} and \code{\link{qrrvglm.control}} for more details
+ that are equally pertinent to UQO.
+
+% zz site scores are centered. Possibly uncorrelated too?
+
+ To reduce the number of parameters being estimated, setting
+ \code{ITolerances = TRUE} or \code{EqualTolerances = TRUE} is advised.
+
+}
+\value{
+ A list with the components corresponding to its arguments, after
+ some basic error checking.
+}
+\references{
+
+Yee, T. W. (2005)
+On constrained and unconstrained
+quadratic ordination.
+\emph{Manuscript in preparation}.
+
+Yee, T. W. (2006)
+Constrained additive ordination.
+\emph{Ecology}, \bold{87}, 203--213.
+
+}
+\author{T. W. Yee}
+\note{
+ This is a difficult optimization problem, and the current
+ algorithm needs to be improved.
+}
+\seealso{
+ \code{\link{uqo}}.
+}
+
+\section{Warning }{
+ This function is currently very sensitive to initial values. Setting
+ \code{Bestof} some reasonably large integer is recommended.
+
+
+}
+
+\examples{
+uqo.control()
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/usagrain.Rd b/man/usagrain.Rd
new file mode 100644
index 0000000..53e53d3
--- /dev/null
+++ b/man/usagrain.Rd
@@ -0,0 +1,42 @@
+\name{usagrain}
+\alias{usagrain}
+\non_function{}
+\title{USA grain prices}
+\usage{data(usagrain)}
+\description{
+A 4-column matrix.
+}
+\format{
+ The columns are:
+ \describe{
+ \item{wheat.flour}{numeric}
+ \item{corn}{numeric}
+ \item{wheat}{numeric}
+ \item{rye}{numeric}
+ }
+}
+\details{
+Monthly averages of grain prices
+in the United States for wheat flour, corn, wheat, and rye for the
+period January 1961 through October 1972.
+The units are US dollars per 100 pound sack for wheat flour, and
+per bushel for corn, wheat and rye.
+}
+\source{
+Ahn and Reinsel (1988)
+}
+\references{
+Ahn, S. K and Reinsel, G. C. (1988)
+Nested reduced-rank autoregressive models for multiple time series.
+\emph{Journal of the American Statistical Association},
+\bold{83}, 849--856.
+}
+\examples{
+data(usagrain)
+cgrain = scale(usagrain, scale=FALSE) # Center the time series only
+fit = vglm(cgrain ~ 1, rrar(Rank=c(4,1)),
+ eps=1e-3, step=0.5, trace=TRUE, maxit=40)
+summary(fit)
+}
+\keyword{datasets}
+
diff --git a/man/venice.Rd b/man/venice.Rd
new file mode 100644
index 0000000..2e11efb
--- /dev/null
+++ b/man/venice.Rd
@@ -0,0 +1,72 @@
+\name{venice}
+\alias{venice}
+\docType{data}
+\title{ Venice Maximum Sea Levels }
+\description{
+ The maximum heights of sea levels recorded at Venice, Italy, between
+ 1931 and 1981.
+}
+\usage{data(venice)}
+\format{
+ A data frame with 51 observations on the following 11 variables.
+ \describe{
+ \item{year}{a numeric vector. }
+ \item{r1}{a numeric vector; the highest recorded value. }
+ \item{r2}{a numeric vector; the second highest recorded value. }
+ \item{r3}{a numeric vector; the third highest recorded value. }
+ \item{r4}{a numeric vector; the fourth highest recorded value. }
+ \item{r5}{a numeric vector; the fifth highest recorded value. }
+ \item{r6}{a numeric vector; the sixth highest recorded value. }
+ \item{r7}{a numeric vector; the seventh highest recorded value. }
+ \item{r8}{a numeric vector; the eighth highest recorded value. }
+ \item{r9}{a numeric vector; the ninth highest recorded value. }
+ \item{r10}{a numeric vector; the tenth highest recorded value. }
+ }
+}
+\details{
+ The units are cm.
+ In 1935 only the top six values were recorded.
+}
+\source{
+Pirazzoli, P. (1982)
+Maree estreme a Venezia (periodo 1872--1981).
+\emph{Acqua Aria}, \bold{10}, 1023--1039.
+
+}
+\references{
+ Smith, R. L. (1986)
+ Extreme value theory based on the \emph{r} largest annual events.
+ \emph{Journal of Hydrology},
+ \bold{86}, 27--43.
+
+ Rosen, O. and Cohen, A. (1996)
+ Extreme Percentile Regression.
+ In: Haerdle, W. and Schimek, M. G. (eds.)
+ \emph{Statistical Theory and Computational Aspects of
+ Smoothing: Proceedings of the COMPSTAT '94 Satellite
+ Meeting held in Semmering, Austria, 27--28 August 1994},
+ Physica-Verlag: Heidelberg,
+ pp.200--214.
+
+}
+\examples{
+data(venice)
+\dontrun{
+matplot(venice[["year"]], venice[,-1], xlab="Year", ylab="Sea level (cm)")
+}
+
+y = as.matrix(venice[,paste("r",1:10,sep="")])
+fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
+ data=venice, trace=TRUE, na.action=na.pass)
+fitted(fit1)[1:4,]
+
+\dontrun{
+par(mfrow=c(2,1), xpd=TRUE)
+plot(fit1, se=TRUE, lcol="blue", llwd=2, slty="dashed")
+
+par(mfrow=c(1,1), bty="l", xpd=TRUE, las=1)
+qtplot(fit1, mpv=TRUE, lcol=c(1,2,5), tcol=c(1,2,5),
+ llwd=2, pcol="blue", tadj=0.1)
+}
+}
+\keyword{datasets}
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
new file mode 100644
index 0000000..ce3bcf6
--- /dev/null
+++ b/man/vgam-class.Rd
@@ -0,0 +1,237 @@
+\name{vgam-class}
+\docType{class}
+\alias{vgam-class}
+\title{Class ``vgam'' }
+\description{ Vector generalized additive models. }
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{vgam(...)}.
+% ~~ describe objects here ~~
+}
+\section{Slots}{
+ \describe{
+ \item{\code{nl.chisq}:}{Object of class \code{"numeric"}.
+Nonlinear chi-squared values. }
+ \item{\code{nl.df}:}{Object of class \code{"numeric"}.
+Nonlinear chi-squared degrees of freedom values. }
+ \item{\code{spar}:}{Object of class \code{"numeric"}
+containing the (scaled) smoothing parameters. }
+ \item{\code{s.xargument}:}{Object of class \code{"character"}
+holding the variable name of any \code{s()} terms. }
+ \item{\code{var}:}{Object of class \code{"matrix"} holding
+ approximate pointwise standard error information. }
+ \item{\code{Bspline}:}{Object of class \code{"list"}
+ holding the scaled (internal and boundary) knots, and the
+ fitted B-spline coefficients. These are used
+for prediction. }
+ \item{\code{extra}:}{Object of class \code{"list"};
+the \code{extra} argument on entry to \code{vglm}. This
+contains any extra information that might be needed
+ by the family function. }
+ \item{\code{family}:}{Object of class \code{"vglmff"}.
+The family function. }
+ \item{\code{iter}:}{Object of class \code{"numeric"}.
+The number of IRLS iterations used. }
+ \item{\code{predictors}:}{Object of class \code{"matrix"}
+with \eqn{M} columns which holds the \eqn{M} linear predictors. }
+ \item{\code{assign}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ This named list gives information matching the columns and the
+ (LM) model matrix terms.
+ }
+ \item{\code{call}:}{Object of class \code{"call"}, from class
+ \code{ "vlm"}.
+ The matched call.
+ }
+ \item{\code{coefficients}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}.
+A named vector of coefficients.
+ }
+ \item{\code{constraints}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ A named list of constraint matrices used in the fitting.
+ }
+ \item{\code{contrasts}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ The contrasts used (if any).
+ }
+ \item{\code{control}:}{Object of class \code{"list"}, from class
+ \code{ "vlm"}.
+ A list of parameters for controlling the fitting process.
+ See \code{\link{vglm.control}} for details.
+ }
+ \item{\code{criterion}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ List of convergence criterion evaluated at the
+ final IRLS iteration.
+ }
+ \item{\code{df.residual}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}.
+The residual degrees of freedom.
+ }
+ \item{\code{df.total}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The total degrees of freedom.
+ }
+ \item{\code{dispersion}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The scaling parameter.
+ }
+ \item{\code{effects}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The effects.
+ }
+ \item{\code{fitted.values}:}{Object of class
+\code{"matrix"}, from class \code{ "vlm"}.
+The fitted values. This may be missing or consist entirely
+of \code{NA}s, e.g., the Cauchy model.
+ }
+ \item{\code{misc}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+A named list to hold miscellaneous parameters.
+ }
+ \item{\code{model}:}{Object of class \code{"data.frame"},
+ from class \code{ "vlm"}.
+The model frame.
+ }
+ \item{\code{na.action}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+A list holding information about missing values.
+ }
+ \item{\code{offset}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+If non-zero, a \eqn{M}-column matrix of offsets.
+ }
+ \item{\code{post}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}
+where post-analysis results may be put.
+ }
+ \item{\code{preplot}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}
+used by \code{\link{plotvgam}}; the plotting parameters
+ may be put here.
+ }
+ \item{\code{prior.weights}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}
+holding the initially supplied weights.
+ }
+ \item{\code{qr}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+QR decomposition at the final iteration.
+ }
+ \item{\code{R}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The \bold{R} matrix in the QR decomposition used in the fitting.
+ }
+ \item{\code{rank}:}{Object of class \code{"integer"},
+ from class \code{ "vlm"}.
+Numerical rank of the fitted model.
+ }
+ \item{\code{residuals}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The \emph{working} residuals at the final IRLS iteration.
+ }
+ \item{\code{rss}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+Residual sum of squares at the final IRLS iteration with
+the adjusted dependent vectors and weight matrices.
+ }
+ \item{\code{smart.prediction}:}{Object of class
+\code{"list"}, from class \code{ "vlm"}.
+A list of data-dependent parameters (if any)
+ that are used by smart prediction.
+ }
+ \item{\code{terms}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+The \code{\link[stats]{terms}} object used.
+ }
+ \item{\code{weights}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The weight matrices at the final IRLS iteration.
+ This is in matrix-band form.
+ }
+ \item{\code{x}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The model matrix (LM, not VGLM).
+ }
+ \item{\code{xlevels}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+The levels of the factors, if any, used in fitting.
+ }
+ \item{\code{y}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The response, in matrix form.
+ }
+ }
+}
+
+
+\section{Extends}{
+Class \code{"vglm"}, directly.
+Class \code{"vlm"}, by class "vglm".
+}
+\section{Methods}{
+ \describe{
+ \item{cdf}{\code{signature(object = "vglm")}:
+cumulative distribution function.
+Useful for quantile regression and extreme value data models.}
+ \item{deplot}{\code{signature(object = "vglm")}:
+ density plot.
+ Useful for quantile regression models.}
+ }
+ \item{deviance}{\code{signature(object = "vglm")}:
+deviance of the model (where applicable). }
+ \item{plot}{\code{signature(x = "vglm")}:
+diagnostic plots. }
+ \item{predict}{\code{signature(object = "vglm")}:
+extract the additive predictors or
+predict the additive predictors at a new data frame.}
+ \item{print}{\code{signature(x = "vglm")}:
+short summary of the object. }
+ \item{qtplot}{\code{signature(object = "vglm")}:
+quantile plot (only applicable to some models). }
+ \item{resid}{\code{signature(object = "vglm")}:
+residuals. There are various types of these. }
+ \item{residuals}{\code{signature(object = "vglm")}:
+residuals. Shorthand for \code{resid}. }
+ \item{rlplot}{\code{signature(object = "vglm")}: return level plot.
+Useful for extreme value data models.}
+ \item{summary}{\code{signature(object = "vglm")}:
+a more detailed summary of the object. }
+ }
+}
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+}
+\author{ Thomas W. Yee }
+\note{
+ VGAMs have all the slots that \code{\link{vglm}} objects
+ have (\code{\link{vglm-class}}), plus the first few slots
+ described in the section above.
+}
+
+%~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+\code{\link{vgam.control}},
+\code{\link{vglm}},
+\code{\link[VGAM]{s}},
+\code{\link{vglm-class}},
+\code{\link{vglmff-class}}.
+}
+
+\examples{
+# Fit a nonparametric proportional odds model
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vgam(cbind(normal, mild, severe) ~ s(let),
+ cumulative(parallel=TRUE), pneumo)
+}
+\keyword{classes}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
diff --git a/man/vgam.Rd b/man/vgam.Rd
new file mode 100644
index 0000000..6103466
--- /dev/null
+++ b/man/vgam.Rd
@@ -0,0 +1,289 @@
+\name{vgam}
+\alias{vgam}
+%\alias{vgam.fit}
+\title{ Fitting Vector Generalized Additive Models }
+% 15/2/03; based a lot from vglm.Rd
+\description{
+ Fit a vector generalized additive model (VGAM). This is a large class
+ of models that includes generalized additive models (GAMs) and vector
+ generalized linear models (VGLMs) as special cases.
+
+}
+\usage{
+vgam(formula, family, data = list(), weights = NULL, subset = NULL,
+ na.action = na.fail, etastart = NULL, mustart = NULL,
+ coefstart = NULL, control = vgam.control(...), offset = NULL,
+ method = "vgam.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL,
+ extra = list(), qr.arg = FALSE, smart = TRUE, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ % The following comes from vglm.Rd but with minor tweaks
+
+ In the following, \eqn{M} is the number of additive predictors.
+
+ \item{formula}{
+ a symbolic description of the model to be fit.
+ The RHS of the formula is applied to each linear/additive predictor.
+ Different
+ variables in each linear/additive predictor can be chosen by specifying
+ constraint matrices.
+
+ }
+ \item{family}{
+ a function of class \code{"vglmff"}
+ (see \code{\link{vglmff-class}}) describing
+ what statistical model is to be fitted.
+ These are called ``\pkg{VGAM} family functions''.
+
+ }
+ \item{data}{
+ an optional data frame containing the variables in the model.
+ By default the variables are taken from
+ \code{environment(formula)}, typically the environment from which
+ \code{vgam} is called.
+
+ }
+ \item{weights}{
+ an optional vector or matrix of (prior) weights
+ to be used in the fitting process.
+ If \code{weights} is a matrix, then it must be in
+ \emph{matrix-band} form, whereby the first \eqn{M}
+ columns of the matrix are the
+ diagonals, followed by the upper-diagonal band, followed by the
+ band above that, etc. In this case, there can be up to \eqn{M(M+1)}
+ columns, with the last column corresponding to the (1,\eqn{M}) elements
+ of the weight matrices.
+
+ }
+ \item{subset}{
+ an optional logical vector specifying a subset of
+ observations to
+ be used in the fitting process.
+
+ }
+ \item{na.action}{
+ a function which indicates what should happen when
+ the data contain \code{NA}s.
+ The default is set by the \code{na.action} setting
+ of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+
+ }
+ \item{etastart}{
+ starting values for the linear/additive predictors.
+ It is a \eqn{M}-column matrix. If \eqn{M=1} then it may be a vector.
+
+ }
+ \item{mustart}{
+ starting values for the
+ fitted values. It can be a vector or a matrix.
+ Some family functions do not make use of this argument.
+
+ }
+ \item{coefstart}{
+ starting values for the coefficient vector.
+
+ }
+ \item{control}{
+ a list of parameters for controlling the fitting process.
+ See \code{\link{vgam.control}} for details.
+
+ }
+ \item{offset}{
+ a vector or \eqn{M}-column matrix of offset values.
+ These are \emph{a priori} known and are added to the linear/additive
+ predictors during fitting.
+
+ }
+ \item{method}{
+ the method to be used in fitting the model.
+ The default (and presently only) method \code{vgam.fit}
+ uses iteratively reweighted least squares (IRLS).
+
+ }
+ \item{model}{
+ a logical value indicating whether the \emph{model frame} should be
+ assigned in the \code{model} slot.
+
+ }
+ \item{x.arg, y.arg}{
+ logical values indicating whether the model matrix and response
+ vector/matrix used in the fitting process should be assigned in the
+ \code{x} and \code{y} slots. Note the model matrix is the LM model
+ matrix; to get the VGAM model matrix type \code{model.matrix(vgamfit)}
+ where \code{vgamfit} is a \code{vgam} object.
+
+ }
+ \item{contrasts}{
+ an optional list. See the \code{contrasts.arg} of
+ \code{\link{model.matrix.default}}.
+
+ }
+ \item{constraints}{
+ an optional list of constraint matrices. The components of the list
+ must be named with the term it corresponds to (and it must match in
+ character format exactly). Each constraint matrix must have \eqn{M} rows, and
+ be of full-column rank. By default, constraint matrices are the \eqn{M}
+ by \eqn{M} identity matrix unless arguments in the family function
+ itself override these values. If \code{constraints} is used it must
+ contain \emph{all} the terms; an incomplete list is not accepted.
+
+ }
+ \item{extra}{
+ an optional list with any extra information that might be needed by
+ the \pkg{VGAM} family function.
+
+ }
+ \item{qr.arg}{
+ logical value indicating whether the slot \code{qr}, which returns
+ the QR decomposition of the VLM model matrix, is returned on the object.
+
+ }
+ \item{smart}{
+ logical value indicating whether smart prediction
+ (\code{\link{smartpred}}) will be used.
+
+ }
+ \item{\dots}{
+ further arguments passed into \code{\link{vgam.control}}.
+
+ }
+
+}
+\details{
+ A vector generalized additive model (VGAM) is loosely defined
+ as a statistical model that is a function of \eqn{M} additive
+ predictors.
+ The central formula is given by
+ \deqn{\eta_j = \sum_{k=1}^p f_{(j)k}(x_k)}{%
+ eta_j = sum_{k=1}^p f_{(j)k}(x_k)}
+ where \eqn{x_k}{x_k} is the \eqn{k}th explanatory variable
+ (almost always \eqn{x_1=1} for the intercept term),
+ and
+ \eqn{f_{(j)k}} are smooth functions of \eqn{x_k} that are estimated
+ by smoothers. The first term in the summation is just the intercept.
+ Currently only one type of smoother is
+ implemented and this is called a \emph{vector (cubic smoothing spline)
+ smoother}.
+ Here, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
+ If all the functions are constrained to be linear then the resulting
+ model is a vector generalized linear model (VGLM).
+ VGLMs are best fitted with \code{\link{vglm}}.
+
+ Vector (cubic smoothing spline) smoothers are represented
+ by \code{s()} (see \code{\link[VGAM]{s}}).
+ Local regression via \code{lo()} is \emph{not}
+ supported. The results of \code{vgam} will differ from the S-PLUS and \R
+ \code{gam} function (in the \pkg{gam} \R package) because \code{vgam}
+ uses a different knot selection algorithm. In general, fewer knots
+ are chosen because the computation becomes expensive when the number
+ of additive predictors \eqn{M} is large.
+
+ The underlying algorithm of VGAMs is iteratively
+ reweighted least squares (IRLS) and modified vector backfitting
+ using vector splines. B-splines are used as the basis functions
+ for the vector (smoothing) splines.
+ \code{vgam.fit} is the function that actually does the work.
+ The smoothing code is based on F. O'Sullivan's BART code.
+
+% If more than one of \code{etastart}, \code{start} and \code{mustart}
+% is specified, the first in the list will be used.
+
+ A closely related methodology based on VGAMs called
+ \emph{constrained additive ordination} (CAO)
+ first forms a linear combination of the explanatory variables
+ (called \emph{latent variables}) and then fits a GAM to these.
+ This is implemented in the function \code{\link{cao}} for a very
+ limited choice of family functions.
+
+}
+\value{
+ An object of class \code{"vgam"}
+ (see \code{\link{vgam-class}} for further information).
+}
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+}
+
+\author{ Thomas W. Yee }
+\note{
+ This function can fit a wide variety of statistical models. Some of
+ these are harder to fit than others because of inherent numerical
+ difficulties associated with some of them. Successful model fitting
+ benefits from cumulative experience. Varying the values of arguments
+ in the \pkg{VGAM} family function itself is a good first step if
+ difficulties arise, especially if initial values can be inputted.
+ A second, more general step, is to vary the values of arguments in
+ \code{\link{vgam.control}}.
+ A third step is to make use of arguments such as \code{etastart},
+ \code{coefstart} and \code{mustart}.
+
+ Some \pkg{VGAM} family functions end in \code{"ff"} to avoid
+ interference with other functions, e.g., \code{\link{binomialff}},
+ \code{\link{poissonff}}, \code{\link{gaussianff}},
+ \code{gammaff}. This is because \pkg{VGAM} family
+ functions are incompatible with \code{\link[stats]{glm}}
+ (and also \code{\link[gam]{gam}} in the \pkg{gam} library and
+ \code{\link[mgcv]{gam}} in the \pkg{mgcv} library).
+
+ The smart prediction (\code{\link{smartpred}}) library is packed with
+ the \pkg{VGAM} library.
+
+ The theory behind the scaling parameter is currently being made more
+ rigorous, but it it should give the same value as the scale parameter
+ for GLMs.
+
+}
+
+
+
+%~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+\code{\link{vgam.control}},
+\code{\link{vgam-class}},
+\code{\link{vglmff-class}},
+\code{\link{plotvgam}},
+\code{\link{vglm}},
+\code{\link[VGAM]{s}},
+\code{\link{vsmooth.spline}},
+\code{\link{cao}}.
+}
+
+\examples{
+# Nonparametric proportional odds model
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vgam(cbind(normal,mild,severe) ~ s(let), cumulative(par=TRUE), pneumo)
+
+# Nonparametric logistic regression
+data(hunua)
+fit = vgam(agaaus ~ s(altitude), binomialff, hunua)
+\dontrun{
+plot(fit, se=TRUE)
+}
+
+# Fit two species simultaneously
+fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(mv=TRUE), hunua)
+coef(fit2, mat=TRUE) # Not really interpretable
+\dontrun{
+plot(fit2, se=TRUE, overlay=TRUE, lcol=1:2, scol=1:2)
+attach(hunua)
+o = order(altitude)
+matplot(altitude[o], fitted(fit2)[o,], type="l", lwd=2, las=1,
+ xlab="Altitude (m)", ylab="Probability of presence",
+ main="Two plant species' response curves", ylim=c(0,.8))
+rug(altitude)
+detach(hunua)
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
new file mode 100644
index 0000000..788dc32
--- /dev/null
+++ b/man/vgam.control.Rd
@@ -0,0 +1,172 @@
+\name{vgam.control}
+\alias{vgam.control}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Control function for vgam }
+\description{
+ Algorithmic constants and parameters for running \code{\link{vgam}}
+ are set using this function.
+}
+\usage{
+vgam.control(all.knots = FALSE, backchat = if (is.R()) FALSE else TRUE,
+ bf.epsilon = 1e-07, bf.maxit = 30,
+ checkwz=TRUE,
+ criterion = names(.min.criterion.VGAM),
+ epsilon = 1e-07, maxit = 30, na.action = na.fail,
+ nk = NULL, save.weight = FALSE, se.fit = TRUE,
+ trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
+ xij = NULL, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+% zz na.action differs from vglm
+
+In the following, we let \eqn{d} be the number of \code{\link{s}} terms
+in the formula.
+
+ \item{all.knots}{
+ logical indicating if all distinct points of
+ the smoothing variables are to be used as knots.
+ By default, \code{all.knots=TRUE} for
+ \eqn{n \leq 40}{n <= 40}, and
+ for \eqn{n > 40},
+ the number of knots is approximately
+ \eqn{40 + (n-40)^{0.25}}{40 + (n-40)^0.25}.
+ This increases very slowly with \eqn{n}
+ so that the number of knots is approximately between 50 and 60
+ for large \eqn{n}.
+ }
+ \item{backchat}{
+ logical indicating if a backchat is to be used (not
+ applicable in \R).
+ }
+ \item{bf.epsilon}{
+ tolerance used by the modified vector
+ backfitting algorithm for testing convergence.
+ Must be a positive number.
+ }
+ \item{bf.maxit}{
+ maximum number of iterations allowed in
+ the modified vector
+ backfitting algorithm. Must be a positive integer.
+ }
+ \item{checkwz}{
+ logical indicating whether the diagonal elements of
+ the working weight matrices should be checked whether they are
+ sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
+ any values less than \code{wzepsilon} are replaced with this value.
+
+ }
+ \item{criterion}{
+ character variable describing what criterion is to
+ be used to test for convergence.
+ The possibilities are listed in \code{.min.criterion.VGAM}, but
+ most family functions only implement a few of these.
+ }
+ \item{epsilon}{
+ positive convergence tolerance epsilon. Roughly
+ speaking, the Newton-Raphson/Fisher-scoring/local-scoring iterations
+ are assumed to have
+ converged when two successive \code{criterion} values are within
+ \code{epsilon} of each other.
+ }
+ \item{maxit}{
+ maximum number of
+ Newton-Raphson/Fisher-scoring/local-scoring iterations allowed.
+ }
+ \item{na.action}{
+ how to handle missing values. Unlike the
+ SPLUS \code{gam} function, \code{\link{vgam}} cannot handle
+ \code{NA}s when smoothing.
+ }
+ \item{nk}{
+ vector of length \eqn{d} containing positive integers.
+ Recycling is used if necessary.
+ The \eqn{i}th value is the number of B-spline coefficients to be
+ estimated for each component function of the \eqn{i}th
+ \code{s()} term.
+ \code{nk} differs from the number of knots by some constant.
+ If specified, \code{nk} overrides the automatic knot selection
+ procedure.
+ }
+ \item{save.weight}{
+ logical indicating whether the \code{weights} slot
+ of a \code{"vglm"} object will be saved on the object. If not, it will
+ be reconstructed when needed, e.g., \code{summary}.
+ }
+ \item{se.fit}{
+ logical indicating whether approximate
+ pointwise standard errors are to be saved on the object.
+ If \code{TRUE}, then these can be plotted with \code{plot(..., se=TRUE)}.
+
+ }
+ \item{trace}{
+ logical indicating if output should be produced for
+ each iteration.
+ }
+ \item{wzepsilon}{
+ Small positive number used to test whether the diagonals of the working
+ weight matrices are sufficiently positive.
+
+ }
+ \item{xij}{
+ formula giving terms making up a covariate-dependent term.
+
+ }
+ \item{\dots}{
+ other parameters that may be picked up from control
+ functions that are specific to the \pkg{VGAM} family function.
+% zz see later.
+
+ }
+}
+\details{
+ Most of the control parameters are used within \code{vgam.fit} and
+ you will have to look at that to understand the full details. Many of
+ the control parameters are used in a similar manner by \code{vglm.fit}
+ (\code{\link{vglm}}) because the algorithm (IRLS) is very similar.
+
+ Setting \code{save.weight=FALSE} is useful for some models because the
+ \code{weights} slot of the object is often the largest and so less
+ memory is used to store the object. However, for some \pkg{VGAM}
+ family function, it is necessary to set \code{save.weight=TRUE} because
+ the \code{weights} slot cannot be reconstructed later.
+
+}
+\value{
+ A list with components matching the input names. A little error
+ checking is done, but not much.
+ The list is assigned to the \code{control} slot of \code{\link{vgam}} objects.
+}
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+}
+\author{ Thomas W. Yee}
+
+\note{
+ \code{\link{vgam}} does not implement half-stepsizing, therefore parametric
+ models should be fitted with \code{\link{vglm}}. Also, \code{\link{vgam}} is
+ slower than \code{\link{vglm}} too.
+}
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{vgam}},
+ \code{\link{vsmooth.spline}},
+ \code{\link{vglm}}.
+}
+
+\examples{
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vgam(cbind(normal, mild, severe) ~ s(let, df=3), multinomial,
+ pneumo, trace=TRUE, eps=1e-4, maxit=10)
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
new file mode 100644
index 0000000..9193b5a
--- /dev/null
+++ b/man/vglm-class.Rd
@@ -0,0 +1,217 @@
+\name{vglm-class}
+\docType{class}
+\alias{vglm-class}
+\title{Class ``vglm'' }
+\description{ Vector generalized linear models. }
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{vglm(...)}.
+% ~~ describe objects here ~~
+}
+\section{Slots}{
+
+In the following, \eqn{M} is the number of linear predictors.
+
+ \describe{
+ \item{\code{extra}:}{Object of class \code{"list"};
+the \code{extra} argument on entry to \code{vglm}. This
+contains any extra information that might be needed
+ by the family function. }
+ \item{\code{family}:}{Object of class \code{"vglmff"}.
+The family function. }
+ \item{\code{iter}:}{Object of class \code{"numeric"}.
+The number of IRLS iterations used. }
+ \item{\code{predictors}:}{Object of class \code{"matrix"}
+with \eqn{M} columns which holds the \eqn{M} linear predictors. }
+ \item{\code{assign}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+ This named list gives information matching the columns and the
+ (LM) model matrix terms.
+ }
+ \item{\code{call}:}{Object of class \code{"call"}, from class
+ \code{ "vlm"}.
+ The matched call.
+ }
+ \item{\code{coefficients}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}.
+A named vector of coefficients.
+ }
+ \item{\code{constraints}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ A named list of constraint matrices used in the fitting.
+ }
+ \item{\code{contrasts}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ The contrasts used (if any).
+ }
+ \item{\code{control}:}{Object of class \code{"list"}, from class
+ \code{ "vlm"}.
+ A list of parameters for controlling the fitting process.
+ See \code{\link{vglm.control}} for details.
+ }
+ \item{\code{criterion}:}{Object of class \code{"list"}, from
+ class \code{ "vlm"}.
+ List of convergence criterion evaluated at the
+ final IRLS iteration.
+ }
+ \item{\code{df.residual}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}.
+The residual degrees of freedom.
+ }
+ \item{\code{df.total}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The total degrees of freedom.
+ }
+ \item{\code{dispersion}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The scaling parameter.
+ }
+ \item{\code{effects}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+The effects.
+ }
+ \item{\code{fitted.values}:}{Object of class
+\code{"matrix"}, from class \code{ "vlm"}.
+The fitted values. This may be missing or consist entirely
+of \code{NA}s, e.g., the Cauchy model.
+ }
+ \item{\code{misc}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+A named list to hold miscellaneous parameters.
+ }
+ \item{\code{model}:}{Object of class \code{"data.frame"},
+ from class \code{ "vlm"}.
+The model frame.
+ }
+ \item{\code{na.action}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+A list holding information about missing values.
+ }
+ \item{\code{offset}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+If non-zero, a \eqn{M}-column matrix of offsets.
+ }
+ \item{\code{post}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}
+where post-analysis results may be put.
+ }
+ \item{\code{preplot}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}
+used by \code{\link{plotvgam}}; the plotting parameters
+ may be put here.
+ }
+ \item{\code{prior.weights}:}{Object of class
+\code{"numeric"}, from class \code{ "vlm"}
+holding the initially supplied weights.
+ }
+ \item{\code{qr}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+QR decomposition at the final iteration.
+ }
+ \item{\code{R}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The \bold{R} matrix in the QR decomposition used in the fitting.
+ }
+ \item{\code{rank}:}{Object of class \code{"integer"},
+ from class \code{ "vlm"}.
+Numerical rank of the fitted model.
+ }
+ \item{\code{residuals}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The \emph{working} residuals at the final IRLS iteration.
+ }
+ \item{\code{rss}:}{Object of class \code{"numeric"},
+ from class \code{ "vlm"}.
+Residual sum of squares at the final IRLS iteration with
+the adjusted dependent vectors and weight matrices.
+ }
+ \item{\code{smart.prediction}:}{Object of class
+\code{"list"}, from class \code{ "vlm"}.
+A list of data-dependent parameters (if any)
+ that are used by smart prediction.
+ }
+ \item{\code{terms}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+The \code{\link[stats]{terms}} object used.
+ }
+ \item{\code{weights}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The weight matrices at the final IRLS iteration.
+ This is in matrix-band form.
+ }
+ \item{\code{x}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The model matrix (LM, not VGLM).
+ }
+ \item{\code{xlevels}:}{Object of class \code{"list"},
+ from class \code{ "vlm"}.
+The levels of the factors, if any, used in fitting.
+ }
+ \item{\code{y}:}{Object of class \code{"matrix"},
+ from class \code{ "vlm"}.
+The response, in matrix form.
+ }
+ }
+}
+\section{Extends}{
+Class \code{"vlm"}, directly.
+}
+\section{Methods}{
+ \describe{
+ \item{cdf}{\code{signature(object = "vglm")}:
+cumulative distribution function.
+Applicable to, e.g., quantile regression and extreme value data models.}
+ \item{deplot}{\code{signature(object = "vglm")}:
+Applicable to, e.g., quantile regression.}
+ \item{deviance}{\code{signature(object = "vglm")}:
+deviance of the model (where applicable). }
+ \item{plot}{\code{signature(x = "vglm")}:
+diagnostic plots. }
+ \item{predict}{\code{signature(object = "vglm")}:
+extract the linear predictors or
+predict the linear predictors at a new data frame.}
+ \item{print}{\code{signature(x = "vglm")}:
+short summary of the object. }
+ \item{qtplot}{\code{signature(object = "vglm")}:
+quantile plot (only applicable to some models). }
+ \item{resid}{\code{signature(object = "vglm")}:
+residuals. There are various types of these. }
+ \item{residuals}{\code{signature(object = "vglm")}:
+residuals. Shorthand for \code{resid}. }
+ \item{rlplot}{\code{signature(object = "vglm")}: return level plot.
+Useful for extreme value data models.}
+ \item{summary}{\code{signature(object = "vglm")}:
+a more detailed summary of the object. }
+ }
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+
+}
+\author{ Thomas W. Yee }
+%\note{ ~~further notes~~ }
+
+%~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+\code{\link{vglm}},
+\code{\link{vglmff-class}},
+\code{\link{vgam-class}}.
+}
+
+\examples{
+# Multinomial logit model
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vglm(cbind(normal,mild,severe) ~ let, multinomial, pneumo)
+}
+\keyword{classes}
diff --git a/man/vglm.Rd b/man/vglm.Rd
new file mode 100644
index 0000000..3e7a8f4
--- /dev/null
+++ b/man/vglm.Rd
@@ -0,0 +1,370 @@
+\name{vglm}
+\alias{vglm}
+%\alias{vglm.fit}
+\title{Fitting Vector Generalized Linear Models }
+\description{
+ \code{vglm} is used to fit vector generalized linear models (VGLMs).
+ This is a large class of models that includes
+ generalized linear models (GLMs) as special cases.
+
+}
+\usage{
+vglm(formula, family, data = list(), weights = NULL, subset = NULL,
+ na.action = na.fail, etastart = NULL, mustart = NULL,
+ coefstart = NULL, control = vglm.control(...), offset = NULL,
+ method = "vglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL, constraints = NULL, extra = list(),
+ qr.arg = FALSE, smart = TRUE, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ In the following, \eqn{M} is the number of linear predictors.
+
+ \item{formula}{
+ a symbolic description of the model to be fit.
+ The RHS of the formula is applied to each linear predictor. Different
+ variables in each linear predictor can be chosen by specifying
+ constraint matrices.
+
+ }
+ \item{family}{
+ a function of class \code{"vglmff"} describing
+ what statistical model is to be fitted.
+ These are called ``\pkg{VGAM} family functions''.
+
+ }
+ \item{data}{
+ an optional data frame containing the variables in the model.
+ By default the variables are taken from
+ \code{environment(formula)}, typically the environment from which
+ \code{vglm} is called.
+
+ }
+ \item{weights}{
+ an optional vector or matrix of (prior) weights
+ to be used in the fitting process.
+ If \code{weights} is a matrix, then it must be in
+ \emph{matrix-band} form, whereby the first \eqn{M}
+ columns of the matrix are the
+ diagonals, followed by the upper-diagonal band, followed by the
+ band above that, etc. In this case, there can be up to \eqn{M(M+1)}
+ columns, with the last column corresponding to the (1,\eqn{M}) elements
+ of the weight matrices.
+
+ }
+ \item{subset}{
+ an optional logical vector specifying a subset of
+ observations to
+ be used in the fitting process.
+ }
+ \item{na.action}{
+ a function which indicates what should happen when
+ the data contain \code{NA}s.
+ The default is set by the \code{na.action} setting
+ of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
+ The ``factory-fresh'' default is \code{na.omit}.
+
+ }
+ \item{etastart}{
+ starting values for the linear predictors.
+ It is a \eqn{M}-column matrix. If \eqn{M=1} then it may be a vector.
+
+ }
+ \item{mustart}{
+ starting values for the
+ fitted values. It can be a vector or a matrix.
+ Some family functions do not make use of this argument.
+
+ }
+ \item{coefstart}{
+ starting values for the coefficient vector.
+
+ }
+ \item{control}{
+ a list of parameters for controlling the fitting process.
+ See \code{\link{vglm.control}} for details.
+
+ }
+ \item{offset}{
+ a vector or \eqn{M}-column matrix of offset values. These are \emph{a
+ priori} known and are added to the linear predictors during fitting.
+
+ }
+ \item{method}{
+ the method to be used in fitting the model. The default (and
+ presently only) method \code{vglm.fit} uses iteratively reweighted
+ least squares (IRLS).
+
+ }
+ \item{model}{
+ a logical value indicating whether the
+ \emph{model frame}
+ should be assigned in the \code{model} slot.
+
+ }
+ \item{x.arg, y.arg}{
+ logical values indicating whether
+ the model matrix and response vector/matrix used in the fitting
+ process should be assigned in the \code{x} and \code{y} slots.
+ Note the model matrix is the LM model matrix; to get the VGLM
+ model matrix type \code{model.matrix(vglmfit)} where
+ \code{vglmfit} is a \code{vglm} object.
+
+ }
+ \item{contrasts}{
+ an optional list. See the \code{contrasts.arg}
+ of \code{\link{model.matrix.default}}.
+
+ }
+ \item{constraints}{
+ an optional list of constraint matrices.
+ The components of the list must be named with the term it corresponds
+ to (and it must match in character format exactly).
+ Each constraint matrix must have \eqn{M} rows, and be of full-column
+ rank. By default, constraint matrices are the \eqn{M} by \eqn{M}
+ identity
+ matrix unless arguments in the family function itself override
+ these values.
+ If \code{constraints} is used it must contain \emph{all} the
+ terms; an incomplete list is not accepted.
+
+ }
+ \item{extra}{
+ an optional list with any extra information that might be needed by
+ the \pkg{VGAM} family function.
+
+ }
+ \item{qr.arg}{
+ logical value indicating whether
+ the slot \code{qr}, which returns the QR decomposition of the
+ VLM model matrix, is returned on the object.
+
+ }
+ \item{smart}{
+ logical value indicating whether smart prediction
+ (\code{\link{smartpred}}) will be used.
+
+ }
+ \item{\dots}{
+ further arguments passed into \code{\link{vglm.control}}.
+ }
+
+}
+\details{
+ A vector generalized linear model (VGLM) is loosely defined
+ as a statistical model that is a function of \eqn{M} linear
+ predictors.
+ The central formula is given by
+ \deqn{\eta_j = \beta_j^T x}{%
+ eta_j = beta_j^T x}
+ where \eqn{x}{x} is a vector of explanatory variables
+ (sometimes just a 1 for an intercept),
+ and
+ \eqn{\beta_j}{beta_j} is a vector of regression coefficients
+ to be estimated.
+ Here, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
+ Then one can write
+ \eqn{\eta=(\eta_1,\ldots,\eta_M)^T}{eta=(eta_1,\ldots,\eta_M)^T}
+ as a vector of linear predictors.
+
+ Most users will find \code{vglm} similar in flavour to
+ \code{\link[stats]{glm}}. The function \code{vglm.fit} actually does
+ the work.
+
+% If more than one of \code{etastart}, \code{start} and \code{mustart}
+% is specified, the first in the list will be used.
+
+}
+\value{
+ An object of class \code{"vglm"}, which has the
+ following slots. Some of these may not be assigned to save
+ space, and will be recreated if necessary later.
+ \item{extra}{the list \code{extra} at the end of fitting.}
+ \item{family}{the family function (of class \code{"vglmff"}).}
+ \item{iter}{the number of IRLS iterations used.}
+ \item{predictors}{a \eqn{M}-column matrix of linear predictors.}
+ \item{assign}{a named list which matches the columns and the
+ (LM) model matrix terms.}
+ \item{call}{the matched call.}
+ \item{coefficients}{a named vector of coefficients.}
+ \item{constraints}{
+ a named list of constraint matrices used in the fitting.
+ }
+ \item{contrasts}{the contrasts used (if any).}
+ \item{control}{list of control parameter used in the fitting.}
+ \item{criterion}{list of convergence criterion evaluated at the
+ final IRLS iteration.}
+ \item{df.residual}{the residual degrees of freedom.}
+ \item{df.total}{the total degrees of freedom.}
+ \item{dispersion}{the scaling parameter.}
+ \item{effects}{the effects.}
+ \item{fitted.values}{the fitted values, as a matrix.
+ This may be missing or consist entirely
+ of \code{NA}s, e.g., the Cauchy model.
+ }
+ \item{misc}{a list to hold miscellaneous parameters.}
+ \item{model}{the model frame.}
+ \item{na.action}{a list holding information about missing values.}
+ \item{offset}{if non-zero, a \eqn{M}-column matrix of offsets.}
+ \item{post}{a list where post-analysis results may be put.}
+ \item{preplot}{used by \code{\link{plotvgam}}, the plotting parameters
+ may be put here.}
+ \item{prior.weights}{initially supplied weights.}
+ \item{qr}{the QR decomposition used in the fitting.}
+ \item{R}{the \bold{R} matrix in the QR decomposition used in the fitting.}
+ \item{rank}{numerical rank of the fitted model.}
+ \item{residuals}{the \emph{working} residuals at the final IRLS iteration.}
+ \item{rss}{residual sum of squares at the final IRLS iteration with
+ the adjusted dependent vectors and weight matrices.}
+ \item{smart.prediction}{
+ a list of data-dependent parameters (if any)
+ that are used by smart prediction.
+
+ }
+ \item{terms}{the \code{\link[stats]{terms}} object used.}
+ \item{weights}{the weight matrices at the final IRLS iteration.
+ This is in matrix-band form.}
+ \item{x}{the model matrix (linear model LM, not VGLM).}
+ \item{xlevels}{the levels of the factors, if any, used in fitting.}
+ \item{y}{the response, in matrix form.}
+
+ This slot information is repeated at \code{\link{vglm-class}}.
+}
+\references{
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+The \pkg{VGAM} library can be downloaded
+starting from \url{http://www.stat.auckland.ac.nz/~yee}.
+Other \pkg{VGAM} resources and documentation can be found there.
+}
+
+
+\author{ Thomas W. Yee }
+\note{
+ This function can fit a wide variety of statistical models. Some of
+ these are harder to fit than others because of inherent numerical
+ difficulties associated with some of them. Successful model fitting
+ benefits from cumulative experience. Varying the values of arguments
+ in the \pkg{VGAM} family function itself is a good first step if
+ difficulties arise, especially if initial values can be inputted.
+ A second, more general step, is to vary the values of arguments in
+ \code{\link{vglm.control}}.
+ A third step is to make use of arguments such as \code{etastart},
+ \code{coefstart} and \code{mustart}.
+
+ Some \pkg{VGAM} family functions end in \code{"ff"} to avoid
+ interference with other functions, e.g., \code{\link{binomialff}},
+ \code{\link{poissonff}}, \code{\link{gaussianff}},
+ \code{gammaff}. This is because \pkg{VGAM} family
+ functions are incompatible with \code{\link[stats]{glm}}
+ (and also \code{\link[gam]{gam}} in the \pkg{gam} library and
+ \code{\link[mgcv]{gam}} in the \pkg{mgcv} library).
+
+ The smart prediction (\code{\link{smartpred}}) library is packed with
+ the \pkg{VGAM} library.
+
+ The theory behind the scaling parameter is currently being made more
+ rigorous, but it it should give the same value as the scale parameter
+ for GLMs.
+
+ In Example 5 below, the \code{xij} argument to illustrate covariates
+ that are specific to a linear predictor. Here, \code{lop}/\code{rop} are
+ the ocular pressures of the left/right eye (artificial data). Variables
+ \code{leye} and \code{reye} might be the presence/absence of a particular
+ disease on the LHS/RHS eye respectively. See \code{\link{fill}}
+ for more details and examples.
+
+}
+
+%~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+ \code{\link{vglm.control}},
+ \code{\link{vglm-class}},
+ \code{\link{vglmff-class}},
+ \code{\link{smartpred}},
+ \code{vglm.fit},
+ \code{\link{fill}},
+ \code{\link{rrvglm}},
+ \code{\link{vgam}}.
+ Methods functions include
+ \code{coef.vlm},
+ \code{\link{predict.vglm}},
+ \code{summary.vglm},
+ etc.
+}
+
+\examples{
+# Example 1. Dobson (1990) Page 93: Randomized Controlled Trial :
+counts = c(18,17,15,20,10,20,25,13,12)
+outcome = gl(3,1,9)
+treatment = gl(3,3)
+print(d.AD <- data.frame(treatment, outcome, counts))
+vglm.D93 = vglm(counts ~ outcome + treatment, family=poissonff)
+summary(vglm.D93)
+
+
+# Example 2. Multinomial logit model
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+
+
+# Example 3. Proportional odds model
+fit = vglm(cbind(normal,mild,severe) ~ let, cumulative(par=TRUE), pneumo)
+coef(fit, matrix=TRUE)
+constraints(fit)
+fit at x # LM model matrix
+model.matrix(fit) # Larger VGLM model matrix
+
+
+# Example 4. Bivariate logistic model
+data(coalminers)
+fit = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers, trace=TRUE)
+coef(fit, matrix=TRUE)
+fit at y
+
+
+# Example 5. The use of the xij argument
+n = 1000
+eyes = data.frame(lop = runif(n), rop = runif(n))
+eyes = transform(eyes,
+ leye = ifelse(runif(n) < logit(-1+2*lop, inverse=TRUE), 1, 0),
+ reye = ifelse(runif(n) < logit(-1+2*rop, inverse=TRUE), 1, 0))
+fit = vglm(cbind(leye,reye) ~ lop + rop + fill(lop),
+ binom2.or(exchangeable=TRUE, zero=3),
+ xij = op ~ lop + rop + fill(lop), data=eyes)
+coef(fit)
+coef(fit, matrix=TRUE)
+coef(fit, matrix=TRUE, compress=FALSE)
+
+# Here's one method to handle the xij argument with a term that
+# produces more than one column in the model matrix.
+POLY3 = function(x, ...) {
+ # A cubic
+ poly(c(x,...), 3)[1:length(x),]
+}
+
+fit = vglm(cbind(leye,reye) ~ POLY3(lop,rop) + POLY3(rop,lop) + fill(POLY3(lop,rop)),
+ binom2.or(exchangeable=TRUE, zero=3), data=eyes,
+ xij = POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
+ fill(POLY3(lop,rop)))
+coef(fit)
+coef(fit, matrix=TRUE)
+coef(fit, matrix=TRUE, compress=FALSE)
+predict(fit)[1:4,]
+}
+\keyword{models}
+\keyword{regression}
+
+%eyes$leye = ifelse(runif(n) < 1/(1+exp(-1+2*eyes$lop)), 1, 0)
+%eyes$reye = ifelse(runif(n) < 1/(1+exp(-1+2*eyes$rop)), 1, 0)
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
new file mode 100644
index 0000000..f4b84a1
--- /dev/null
+++ b/man/vglm.control.Rd
@@ -0,0 +1,153 @@
+\name{vglm.control}
+\alias{vglm.control}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Control function for vglm }
+\description{
+ Algorithmic constants and parameters for running \code{vglm} are set
+ using this function.
+}
+\usage{
+vglm.control(backchat = if (is.R()) FALSE else TRUE,
+ checkwz=TRUE, criterion = names(.min.criterion.VGAM),
+ epsilon = 1e-07, half.stepsizing = TRUE,
+ maxit = 30, stepsize = 1, save.weight = FALSE,
+ trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
+ xij = NULL, ...)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+ \item{backchat}{ logical indicating if a backchat is to be used (not
+ applicable in \R). }
+ \item{checkwz}{ logical indicating whether the diagonal elements of
+ the working weight matrices should be checked whether they are
+ sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
+ any values less than \code{wzepsilon} are replaced with this value.
+
+ }
+ \item{criterion}{ character variable describing what criterion is to
+ be used to test for convergence.
+ The possibilities are listed in \code{.min.criterion.VGAM}, but
+ most family functions only implement a few of these.
+ }
+ \item{epsilon}{ positive convergence tolerance epsilon. Roughly
+ speaking, the Newton-Raphson/Fisher-scoring iterations
+ are assumed to have
+ converged when two successive \code{criterion} values are within
+ \code{epsilon} of each other.
+ }
+ \item{half.stepsizing}{ logical indicating if half-stepsizing is
+ allowed. For example, in maximizing a log-likelihood, if the
+ next iteration has a log-likelihood that is less than the current
+ value of the log-likelihood, then a half step will be taken.
+ If the log-likelihood is still less than at the current position,
+ a quarter-step will be taken etc. Eventually a step will be taken
+ so that an improvement is made to the convergence criterion.
+ \code{half.stepsizing} is ignored if
+ \code{criterion=="coefficients"}.
+ }
+ \item{maxit}{ maximum number of
+ Newton-Raphson/Fisher-scoring iterations allowed. }
+ \item{stepsize}{ usual step size to be taken between each
+ Newton-Raphson/Fisher-scoring iteration. It should be a value
+ between 0 and 1, where
+ a value of unity corresponds to an ordinary step.
+ A value of 0.5 means half-steps are taken.
+ Setting a value near zero will cause convergence
+ to be generally slow but may help increase the chances of
+ successful convergence for some family functions.
+ }
+ \item{save.weight}{ logical indicating whether the \code{weights} slot
+ of a \code{"vglm"} object will be saved on the object. If not, it will
+ be reconstructed when needed, e.g., \code{summary}.
+ Some family functions have \code{save.weight=TRUE} and others have
+ \code{save.weight=FALSE} in their control functions.
+}
+ \item{trace}{ logical indicating if output should be produced for
+ each iteration. }
+ \item{wzepsilon}{
+ Small positive number used to test whether the diagonals of the working
+ weight matrices are sufficiently positive.
+
+ }
+ \item{xij}{
+ formula giving terms making up a covariate-dependent term (a variable
+ that takes on different values for each linear/additive predictor.
+ For example, the ocular pressure of each eye).
+ There should be \eqn{M} unique terms; use
+ \code{\link{fill1}}, \code{fill2}, \code{fill3}, etc. if necessary.
+ Each formula should have a response which is taken as the name of
+ that variable, and the terms are enumerated in sequential order.
+ With more than one formula, use a list of formulas.
+ See Example 2 below.
+
+}
+ \item{\dots}{ other parameters that may be picked up from control
+ functions that are specific to the \pkg{VGAM} family function. }
+}
+\details{
+ Most of the control parameters are used within \code{vglm.fit} and you
+ will have to look at that to understand the full details.
+
+ Setting \code{save.weight=FALSE} is useful for some models because the
+ \code{weights} slot of the object is the largest and so less memory
+ is used to store the object. However, for some \pkg{VGAM} family function,
+ it is necessary to set \code{save.weight=TRUE} because the \code{weights}
+ slot cannot be reconstructed later.
+ }
+\value{
+ A list with components matching the input names. A little error
+ checking is done, but not much.
+ The list is assigned to the \code{control} slot of \code{vglm} objects.
+ }
+ \references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+ }
+\author{ Thomas W. Yee}
+\note{
+In Example 2 below there are two covariates that have linear/additive
+predictor specific values.
+These are handled using the \code{xij} argument.
+
+}
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{vglm}},
+ \code{\link{fill}}.
+}
+
+\examples{
+# Example 1.
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+vglm(cbind(normal,mild,severe) ~ let, multinomial, pneumo,
+ crit="coef", step=0.5, trace=TRUE, eps=1e-8, maxit=40)
+
+
+# Example 2. The use of the xij argument
+set.seed(111)
+n = 1000
+ymat = rdiric(n, shape=c(4,7,3,1))
+mydat = data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n),
+ z1=runif(n), z2=runif(n), z3=runif(n), z4=runif(n))
+mydat = round(mydat, dig=2)
+fit = vglm(ymat ~ x1 + x2 + x3 + x4 + z1 + z2 + z3 + z4,
+ fam = dirichlet, data=mydat, crit="c",
+ xij = list(z ~ z1 + z2 + z3 + z4,
+ x ~ x1 + x2 + x3 + x4))
+model.matrix(fit, type="lm")[1:7,] # LM model matrix
+model.matrix(fit, type="vlm")[1:7,] # Big VLM model matrix
+coef(fit)
+coef(fit, matrix=TRUE)
+coef(fit, matrix=TRUE, compress=FALSE)
+max(abs(predict(fit)-predict(fit, new=mydat))) # Predicts correctly
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
new file mode 100644
index 0000000..ed64716
--- /dev/null
+++ b/man/vglmff-class.Rd
@@ -0,0 +1,233 @@
+\name{vglmff-class}
+\docType{class}
+\alias{vglmff-class}
+\title{Class ``vglmff'' }
+\description{ Family functions for the \pkg{VGAM} package }
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{new("vglmff", ...)}.
+
+}
+\section{Slots}{
+ In the following, \eqn{M} is the number of linear/additive
+ predictors.
+
+ \describe{
+ \item{\code{blurb}:}{
+ Object of class \code{"character"} giving
+ a small description of the model. Important arguments such as
+ parameter link functions can be expressed here.
+
+ }
+ \item{\code{constraints}:}{
+ Object of class \code{"expression"}
+ which sets up any constraint matrices defined by arguments in the
+ family function. A \code{zero} argument is always fed into
+ \code{cm.zero.vgam}, whereas other constraints are fed into
+ \code{cm.vgam}.
+
+ }
+ \item{\code{deviance}:}{
+ Object of class \code{"function"}
+ returning the deviance of the model. This slot is optional.
+ If present, the function must have arguments
+ \code{function(mu, y, w, residuals = FALSE, eta, extra=NULL)}.
+ Deviance residuals are returned if \code{residuals = TRUE}.
+
+ }
+ \item{\code{fini}:}{
+ Object of class \code{"expression"} to insert
+ code at a special position in \code{vglm.fit} or
+ \code{vgam.fit}.
+ This code is evaluated immediately after the fitting.
+
+ }
+ \item{\code{first}:}{
+ Object of class \code{"expression"} to insert
+ code at a special position in \code{vglm} or
+ \code{vgam}.
+
+ }
+ \item{\code{initialize}:}{
+ Object of class \code{"expression"} used
+ to perform error checking (especially for the variable \code{y})
+ and obtain starting values for the model.
+ In general, \code{etastart} or
+ \code{mustart} are assigned values based on the variables \code{y},
+ \code{x} and \code{w}.
+
+ }
+ \item{\code{inverse}:}{
+ Object of class \code{"function"} which
+ returns the fitted values, given the linear/additive predictors.
+ The function must have arguments
+ \code{function(eta, extra=NULL)}.
+
+ }
+ \item{\code{last}:}{
+ Object of class \code{"expression"} to insert
+ code at a special position (at the very end) of \code{vglm.fit} or
+ \code{vgam.fit}.
+ This code is evaluated after the fitting.
+ The list \code{misc} is often assigned components in this slot,
+ which becomes the \code{misc} slot on the fitted object.
+
+ }
+ \item{\code{link}:}{
+ Object of class \code{"function"} which,
+ given the fitted values, returns the linear/additive predictors.
+ If present, the function must have arguments
+ \code{function(mu, extra=NULL)}.
+
+ }
+ \item{\code{loglikelihood}:}{
+ Object of class \code{"function"}
+ returning the log-likelihood of the model. This slot is optional.
+ If present, the function must have arguments
+ \code{function(mu, y, w, residuals = FALSE, eta, extra=NULL)}.
+ The argument \code{residuals} can be ignored because
+ log-likelihood residuals aren't defined.
+
+ }
+ \item{\code{middle}:}{
+ Object of class \code{"expression"} to insert
+ code at a special position in \code{vglm.fit} or
+ \code{vgam.fit}.
+
+ }
+ \item{\code{middle2}:}{
+ Object of class \code{"expression"} to insert
+ code at a special position in \code{vglm.fit} or
+ \code{vgam.fit}.
+
+ }
+ \item{\code{summary.dispersion}:}{
+ Object of class \code{"logical"}
+ indicating whether the general VGLM formula (based on a residual
+ sum of squares) can be used for computing the scaling/dispersion
+ parameter. It is \code{TRUE} for most models except for nonlinear
+ regression models.
+
+ }
+ \item{\code{vfamily}:}{
+ Object of class \code{"character"}
+ giving class information about the family function. Although
+ not developed at this stage, more flexible classes are planned
+ in the future. For example, family functions
+ \code{sratio}, \code{cratio}, \code{cumulative}, and \code{acat}
+ all operate on categorical data, therefore will have a special class
+ called \code{"VGAMcat"}, say. Then if \code{fit} was
+ a \code{vglm} object, then \code{coef(fit)} would print
+ out the \code{vglm} coefficients plus \code{"VGAMcat"}
+ information as well.
+
+ }
+ \item{\code{deriv}:}{
+ Object of class \code{"expression"} which
+ returns a \eqn{M}-column matrix of first derivatives of the
+ log-likelihood function
+ with respect to the linear/additive predictors, i.e., the
+ score vector. In Yee and Wild (1996) this is the
+ \eqn{\bold{d}_i}{\bold{d}i} vector. Thus each row of the
+ matrix returned by this slot is such a vector.
+
+ }
+ \item{\code{weight}:}{
+ Object of class \code{"expression"} which
+ returns the second derivatives of the log-likelihood function
+ with respect to the linear/additive predictors.
+ This can be either the observed or expected information matrix, i.e.,
+ Newton-Raphson or Fisher-scoring respectively.
+ In Yee and Wild (1996) this is the
+ \eqn{\bold{W}_i}{\bold{W}i} matrix. Thus each row of the
+ matrix returned by this slot is such a matrix.
+ Like the \code{weights} slot of \code{vglm}/\code{vgam}, it is
+ stored in
+ \emph{matrix-band} form, whereby the first \eqn{M}
+ columns of the matrix are the
+ diagonals, followed by the upper-diagonal band, followed by the
+ band above that, etc. In this case, there can be up to \eqn{M(M+1)}
+ columns, with the last column corresponding to the (1,\eqn{M}) elements
+ of the weight matrices.
+
+ }
+ }
+}
+
+\section{Methods}{
+ \describe{
+ \item{print}{\code{signature(x = "vglmff")}:
+ short summary of the family function.
+ }
+ }
+}
+\references{
+
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+\url{http://www.stat.auckland.ac.nz/~yee} contains further
+information on how to write \pkg{VGAM} family functions.
+The file is amongst other \pkg{VGAM} PDF documentation.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ With link functions etc., one must use \code{substitute} to
+ embed the options into the code. There are two different forms:
+ \code{eval(substitute(expression({...}), list(...)))}
+ for expressions, and
+ \code{eval(substitute( function(...) { ... }, list(...) )) }
+ for functions.
+
+ A unified method of handling arguments is to use
+ \code{match.arg}. This allows, for example,
+ \code{vglm(..., family = cratio(link=logit))}
+ and
+ \code{vglm(..., family = cratio(link="logi"))}
+ to be equivalent (Nb. there is a \code{logit} function).
+
+ The \code{extra} argument in
+ \code{inverse}, \code{link}, \code{deviance}, \code{loglikelihood}, etc.
+ matches with the argument \code{extra}
+ in \code{vglm}, \code{vgam} and \code{rrvglm}. This allows input
+ to be fed into all slots of a \pkg{VGAM} family function.
+
+ The expression \code{derivative} is evaluated immediately prior to
+ \code{weight}, so there is provision for re-use of variables etc.
+ Programmers must be careful to choose variable names that do not
+ interfere with \code{vglm.fit}, \code{vgam.fit} etc.
+
+ Programmers of \pkg{VGAM} family functions are encouraged
+ to keep to previous conventions regarding the naming of arguments,
+ e.g.,
+ \code{link} is the argument for parameter link functions,
+ \code{zero} for allowing some of the
+ linear/additive predictors to be an intercept term only, etc.
+
+ In general, Fisher-scoring is recommended over Newton-Raphson where
+ tractable. Although usually slightly slower in convergence,
+ the weight matrices from
+ using the expected information are positive-definite over a larger
+ parameter space.
+
+ }
+
+\section{Warning }{
+ \pkg{VGAM} family functions are not compatible with
+ \code{\link[stats]{glm}}, nor \code{gam} (from either
+ \pkg{gam} or \pkg{mgcv} packages).
+}
+
+\seealso{
+\code{\link{vglm}},
+\code{\link{vgam}},
+\code{\link{rrvglm}}.
+}
+\examples{
+cratio()
+cratio(link="cloglog")
+cratio(link=cloglog, reverse=TRUE)
+}
+\keyword{classes}
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
new file mode 100644
index 0000000..5cfce4a
--- /dev/null
+++ b/man/vonmises.Rd
@@ -0,0 +1,120 @@
+\name{vonmises}
+\alias{vonmises}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ von Mises Distribution Family Function }
+\description{
+ Estimates the location and scale parameters of the
+ von Mises distribution by maximum likelihood estimation.
+}
+\usage{
+vonmises(lscale="loge", ilocation=NULL,
+ iscale=NULL, method.init=1, zero=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lscale}{
+ Parameter link function applied to the scale parameter \eqn{k}.
+ See \code{\link{Links}} for more choices.
+ A log link is the default because the parameter is positive.
+
+ }
+ \item{ilocation}{
+ Initial value for the location \eqn{a} parameter.
+ By default, an initial value is chosen internally using
+ \code{method.init}. Assigning a value will override
+ the argument \code{method.init}.
+
+ }
+ \item{iscale}{
+ Initial value for the scale \eqn{k} parameter.
+ By default, an initial value is chosen internally using
+ \code{method.init}. Assigning a value will override
+ the argument \code{method.init}.
+
+ }
+ \item{method.init}{
+ An integer with value \code{1} or \code{2} which
+ specifies the initialization method. If failure to converge occurs
+ try the other value, or else specify a value for
+ \code{ilocation} and \code{iscale}.
+
+ }
+ \item{zero}{
+ An integer-valued vector specifying which
+ linear/additive predictors are modelled as intercepts only.
+ The default is none of them.
+ If used, choose one value from the set \{1,2\}.
+
+ }
+% \item{hstep}{ Positive numeric. The \eqn{h} used for the finite difference
+% approximation, e.g., in \eqn{(f(x+h)-f(x))/h} for the first
+% derivative estimate of the modified Bessel function values.
+% If too small, some half stepsizing may occur;
+% if too large, numerical problems might occur.
+% }
+
+}
+\details{
+ The (two-parameter) von Mises distribution
+ has a density that can be written as
+ \deqn{f(y;a,k) = \frac{\exp[k\cos(y-a)]}{
+ 2\pi I_0(k)}}{%
+ f(y;a,k) = exp[k*cos(y-a)] / (2*pi*I0(k))}
+ where \eqn{0 \leq y < 2\pi}{0 <= y < 2*pi},
+ \eqn{k>0} is the scale parameter,
+ \eqn{a} is the location parameter, and
+ \eqn{I_0(k)}{I0(k)} is the modified Bessel
+ function of order 0 evaluated at \eqn{k}.
+ The mean of \eqn{Y} (which is the fitted value) is \eqn{a}
+ and the circular variance is
+ \eqn{1 - I_1(k) / I_0(k)}{1 - I1(k) / I0(k)}
+ where \eqn{I_1(k)}{I1(k)} is the modified Bessel
+ function of order 1.
+ By default, \eqn{\eta_1=a}{eta1=a} and
+ \eqn{\eta_2=\log(k)}{eta2=log(k)} for this family function, but
+ later an extended logit link \eqn{\eta_1=\log(a/(2\pi-a))}{eta1=log(a/(2*pi-a))}
+ might be provided for \eqn{\eta_1}{eta1}.
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+\references{
+Evans, M., Hastings, N. and Peacock, B. (2000)
+\emph{Statistical Distributions},
+New York: Wiley-Interscience, Third edition.
+}
+\author{ T. W. Yee }
+\note{
+ The response and the fitted values are scaled so that
+ \eqn{0\leq y< 2\pi}{0<=y<2*pi}.
+ The linear/additive predictors are left alone.
+ Fisher scoring is used.
+}
+\section{Warning }{
+ Numerically, the von~Mises can be difficult to fit because of a
+ log-likelihood having multiple maxima.
+ The user is therefore encouraged to try different starting values,
+ i.e., make use of \code{ilocation} and \code{iscale}.
+}
+
+\seealso{
+ \code{\link[base]{Bessel}}.
+
+ \pkg{CircStats} and \pkg{circular} currently have a lot more
+ R functions for circular data than the \pkg{VGAM} package.
+}
+\examples{
+x = runif(n <- 1000)
+y = rnorm(n, m=2+x, sd=exp(0.2)) # Not von Mises data!!
+fit = vglm(y ~ x, vonmises(zero=2), trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+range(y) # original data
+range(fit at y) # processed data is in [0,2*pi)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
new file mode 100644
index 0000000..86b10b5
--- /dev/null
+++ b/man/vsmooth.spline.Rd
@@ -0,0 +1,185 @@
+\name{vsmooth.spline}
+\alias{vsmooth.spline}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Vector cubic smoothing spline }
+\description{
+ Fits a vector cubic smoothing spline.
+}
+\usage{
+vsmooth.spline(x, y, w, df = rep(5, M), spar = NULL,
+ all.knots = FALSE,
+ iconstraint = diag(M), xconstraint = diag(M),
+ constraints = list("(Intercepts)" = diag(M), x = diag(M)),
+ tol.nl = 0.01, var.arg = FALSE, scale.w = TRUE, nk = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{ A vector, matrix or a list.
+If a list, the \code{x} component is used.
+If a matrix, the first column is used.
+\code{x} may also be a complex vector, in which case
+the real part is used, and the imaginary part is used for the response.
+In this help file, \code{n} is the number of unique values of \code{x}.
+}
+ \item{y}{ A vector, matrix or a list.
+If a list, the \code{y} component is used.
+If a matrix, all but the first column is used.
+In this help file, \code{M} is the number of columns of \code{y} if
+there are no constraints on the functions.
+}
+ \item{w}{
+The weight matrices or the number of observations.
+If the weight matrices, then this must be a \code{n}-row matrix
+with the elements in matrix-band form (see \code{iam}).
+If a vector, then these are the number of observations.
+By default, \code{w} is the \code{M} by \code{M} identity
+matrix, denoted by \code{matrix(1, n, M)}.
+}
+ \item{df}{ Numerical vector containing the degrees of
+freedom for each component function (smooth).
+If necessary, the vector is recycled to have length equal
+to the number of component functions to be estimated
+(\code{M} if there are no constraints), which
+equals the number of columns of the \code{x}-constraint matrix.
+A value of 2 means a linear fit, and each element of
+\code{df} should lie between 2 and \code{n}.
+The larger the values of \code{df} the more wiggly the
+smooths.
+}
+ \item{spar}{
+Numerical vector containing the non-negative smoothing
+parameters for each component function (smooth).
+If necessary, the vector is recycled to have length equal
+to the number of component functions to be estimated
+(\code{M} if there are no constraints), which
+equals the number of columns of the \code{x}-constraint matrix.
+A value of zero means the smooth goes through the data and hence
+is wiggly.
+A value of \code{Inf} may be assigned, meaning the smooth will
+be linear.
+By default, the \code{NULL} value of \code{spar} means
+\code{df} is used to determine the smoothing
+parameters.
+}
+ \item{all.knots}{ Logical. If \code{TRUE} then each distinct
+value of \code{x} will be a knot. By default, only a subset of
+the unique values of \code{x} are used; typically, the number
+of knots is \code{O(n^0.25)} for \code{n} large,
+but if \code{n <= 40} then all the unique values of \code{x} are used.
+}
+ \item{iconstraint}{ A \code{M}-row constraint matrix for the
+intercepts. It must be of full column rank.
+By default, the constraint matrix for the intercepts is the
+\code{M} by \code{M} identity matrix, meaning no constraints.
+}
+ \item{xconstraint}{ A \code{M}-row constraint matrix for \code{x}.
+It must be of full column rank.
+By default, the constraint matrix for the intercepts is the
+\code{M} by \code{M} identity matrix, meaning no constraints.
+}
+ \item{constraints}{
+An alternative to specifying \code{iconstraint} and \code{xconstraint},
+this is a list with two components corresponding to the
+intercept and \code{x} respectively. They must both be a
+\code{M}-row constraint matrix with full column rank.
+}
+ \item{tol.nl}{ Tolerance for testing nonlinearity for the
+component functions. If \code{df} is within \code{tol.nl} of
+2 then the function is treated as linear.
+}
+ \item{var.arg}{ Logical: return the pointwise variances
+of the fit?
+Currently, this corresponds only to the nonlinear part of the
+fit, and may be wrong.
+}
+ \item{scale.w}{
+Logical.
+By default, the weights \code{w} are scaled so that the
+diagonal elements have mean 1.
+}
+ \item{nk}{ Number of knots.
+If used, this argument overrides \code{all.knots}, and
+must lie between 6 and \code{n}+2 inclusive.
+}
+}
+\details{
+ The algorithm implemented is detailed in Yee (2000).
+ It involves decomposing the component functions into a linear and
+ nonlinear part, and using B-splines.
+ The cost of the computation is \code{O(n M^3)}.
+
+ The argument \code{spar} contains \emph{scaled} smoothing parameters.
+
+}
+\value{
+ An object of class \code{"vsmooth.spline"} (see
+\code{vsmooth.spline-class}).
+}
+\references{
+Yee, T. W. (2000)
+Vector Splines and Other Vector Smoothers.
+Pages 529--534.
+In: Bethlehem, J. G. and van der Heijde, P. G. M.
+\emph{Proceedings in Computational Statistics COMPSTAT 2000}.
+Heidelberg: Physica-Verlag.
+
+}
+\author{ Thomas W. Yee }
+\note{
+ This function is quite similar to \code{\link[stats]{smooth.spline}}.
+ For \code{M=1}, the results will be generally different, mainly due to
+ the different way the knots are selected.
+
+ The vector cubic smoothing spline which \code{s()} represents is
+ computationally demanding for large \eqn{M}. The cost is approximately
+ \eqn{O(M^3)}.
+
+ Yet to be done: return the \emph{unscaled} smoothing parameters.
+
+}
+
+\seealso{
+\code{vsmooth.spline-class},
+\code{plot.vsmooth.spline},
+\code{predict.vsmooth.spline},
+\code{iam},
+\code{\link[VGAM]{s}},
+\code{\link[stats]{smooth.spline}}.
+}
+\examples{
+n = 20
+x = 2 + 5*(n:1)/n
+x[2:4] = x[5:7] # Allow duplication
+y1 = sin(x) + rnorm(n, sd=0.13)
+y2 = cos(x) + rnorm(n, sd=0.13)
+y3 = 1 + sin(x) + rnorm(n, sd=0.13) # Run this for constraints
+y = cbind(y1, y2, y3)
+ww = cbind(rep(3,n), 4, (1:n)/n)
+
+(fit = vsmooth.spline(x, y, w=ww, df=5))
+\dontrun{
+plot(fit) # The 1st and 3rd functions do not differ by a constant
+}
+
+mat = matrix(c(1,0,1, 0,1,0), 3, 2)
+(fit2 = vsmooth.spline(x, y, w=ww, df=5, iconstr=mat, xconstr=mat))
+# The 1st and 3rd functions do differ by a constant:
+mycols = c("red","blue","red")
+\dontrun{
+plot(fit2, lcol=mycols, pcol=mycols, las=1)
+}
+
+p = predict(fit, x=fit at x, deriv=0)
+max(abs(fit at y - p$y)) # Should be zero
+
+par(mfrow=c(3,1))
+ux = seq(1, 8, len=100)
+for(d in 1:3) {
+ p = predict(fit, x=ux, deriv=d)
+\dontrun{
+ matplot(p$x, p$y, type="l", main=paste("deriv =", d), lwd=2)
+}
+}
+}
+\keyword{regression}
+\keyword{smooth}
diff --git a/man/waitakere.Rd b/man/waitakere.Rd
new file mode 100644
index 0000000..68ae1bb
--- /dev/null
+++ b/man/waitakere.Rd
@@ -0,0 +1,58 @@
+\name{waitakere}
+\alias{waitakere}
+\non_function{}
+\title{Waitakere Ranges data}
+\usage{data(waitakere)}
+\description{
+ The \code{waitakere} data frame has 579 rows and 18 columns.
+ Altitude is explanatory, and there are binary responses
+ (presence/absence = 1/0 respectively) for 17 plant species.
+
+}
+\format{
+ This data frame contains the following columns:
+ \describe{
+ \item{agaaus}{Agathis australis, or Kauri}
+ \item{beitaw}{Beilschmiedia tawa, or Tawa}
+ \item{corlae}{Corynocarpus laevigatus}
+ \item{cyadea}{Cyathea dealbata}
+ \item{cyamed}{Cyathea medullaris}
+ \item{daccup}{Dacrydium cupressinum}
+ \item{dacdac}{Dacrycarpus dacrydioides}
+ \item{eladen}{Elaecarpus dentatus}
+ \item{hedarb}{Hedycarya arborea}
+ \item{hohpop}{Species name unknown}
+ \item{kniexc}{Knightia excelsa, or Rewarewa}
+ \item{kuneri}{Kunzea ericoides}
+ \item{lepsco}{Leptospermum scoparium}
+ \item{metrob}{Metrosideros robusta}
+ \item{neslan}{Nestegis lanceolata}
+ \item{rhosap}{Rhopalostylis sapida}
+ \item{vitluc}{Vitex lucens, or Puriri}
+ \item{altitude}{meters above sea level}
+ }
+}
+\details{
+ These were collected from the Waitakere Ranges, a small forest in northern
+ Auckland, New Zealand. At 579 sites in the forest, the presence/absence
+ of 17 plant species was recorded, as well as the altitude.
+ Each site was of area size 200\eqn{m^2}{m^2}.
+
+}
+\source{
+ Dr Neil Mitchell, University of Auckland.
+}
+%\references{
+%None.
+%}
+\seealso{
+ \code{\link{hunua}}.
+}
+\examples{
+data(waitakere)
+fit = vgam(agaaus ~ s(altitude), binomialff, waitakere)
+\dontrun{
+plot(fit, se=TRUE, lcol="red", scol="blue") }
+predict(fit, waitakere, type="response")[1:3]
+}
+\keyword{datasets}
diff --git a/man/wald.Rd b/man/wald.Rd
new file mode 100644
index 0000000..8a857d6
--- /dev/null
+++ b/man/wald.Rd
@@ -0,0 +1,75 @@
+\name{wald}
+\alias{wald}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Wald Distribution Family Function }
+\description{
+Estimates the parameter of the standard Wald distribution
+by maximum likelihood estimation.
+}
+\usage{
+wald(link.lambda="loge", init.lambda=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.lambda}{
+ Parameter link function for the \eqn{\lambda}{lambda} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.lambda}{
+ Initial value for the \eqn{\lambda}{lambda} parameter.
+ The default means an initial value is chosen internally.
+
+ }
+}
+\details{
+ The standard Wald distribution is a special case of the
+ inverse Gaussian distribution with \eqn{\mu=1}{mu=1}.
+ It has a density that can be written as
+ \deqn{f(y;\lambda) = \sqrt{\lambda/(2\pi y^3)}
+ \; \exp\left(-\lambda (y-1)^2/(2 y)\right)}{%
+ f(y;mu,lambda) = sqrt(lambda/(2*pi*y^3)) *
+ exp(-lambda*(y-1)^2/(2*y))
+ }
+ where \eqn{y>0} and \eqn{\lambda>0}{lambda>0}.
+ The mean of \eqn{Y} is \eqn{1} (returned as the fitted values) and its variance is
+ \eqn{1/\lambda}{1/lambda}.
+ By default, \eqn{\eta=\log(\lambda)}{eta=log(lambda)}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
+\emph{Continuous Univariate Distributions},
+2nd edition,
+Volume 1,
+New York: Wiley.
+}
+\author{ T. W. Yee }
+\note{
+ The \pkg{VGAM} family function \code{\link{inv.gaussianff}}
+ estimates the location parameter \eqn{\mu}{mu} too.
+}
+
+
+\seealso{
+ \code{\link{inv.gaussianff}}.
+
+}
+\examples{
+set.seed(123)
+shape = 1
+y = rgamma(n=1000, shape=shape) # Not inverse Gaussian!!
+fit = vglm(y ~ 1, wald(init=0.2), trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/weibull.Rd b/man/weibull.Rd
new file mode 100644
index 0000000..b97e1d1
--- /dev/null
+++ b/man/weibull.Rd
@@ -0,0 +1,148 @@
+\name{weibull}
+\alias{weibull}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Weibull Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter Weibull distribution.
+ Allows for Type-I right censored data.
+
+}
+\usage{
+weibull(lshape = "logoff", lscale = "loge",
+ eshape=if(lshape == "logoff") list(offset=-2) else list(),
+ escale=list(),
+ ishape = NULL, iscale = NULL, imethod=1, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link functions applied to the
+ (positive) shape parameter (called \eqn{a} below) and
+ (positive) scale parameter (called \eqn{b} below).
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{eshape, escale}{
+ Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ Parameter link functions applied to the
+ \item{ishape, iscale}{
+ Optional initial values for the shape and scale parameters.
+ }
+ \item{imethod}{
+ Initialization method used if there are censored observations.
+ Currently only the values 1 and 2 are allowed.
+ }
+ \item{zero}{
+ An integer specifying which linear/additive predictor is to be modelled
+ as an intercept only. The value must be from the set \{1,2\},
+ which correspond to the shape and scale parameters respectively.
+ Setting \code{zero=NULL} means none of them.
+
+ }
+}
+\details{
+ The Weibull density for a response \eqn{Y} is
+ \deqn{f(y;a,b) = a y^{a-1} \exp[-(y/b)^a] / (b^a)}{%
+ f(y;a,b) = a y^(a-1) * exp(-(y/b)^a) / [b^a]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+ The cumulative distribution function is
+ \deqn{F(y;a,b) = 1 - \exp[-(y/b)^a].}{%
+ F(y;a,b) = 1 - exp(-(y/b)^a).}
+ The mean of \eqn{Y} is \eqn{b \, \Gamma(1+ 1/a)}{b * gamma(1+ 1/a)}
+ (returned as the fitted values),
+ and the mode is at \eqn{b\,(1-1/a)^{1/a}}{b * (1- 1/a)^(1/a)} when
+ \eqn{a>1}.
+ The density is unbounded for \eqn{a<1}.
+ The \eqn{k}th moment about the origin is
+ \eqn{E(Y^k) = b^k \, \Gamma(1+ k/a)}{E(Y^k) = b^k * gamma(1+ k/a)}.
+
+ This \pkg{VGAM} family function handles Type-I right censored data as
+ well as complete data.
+ Fisher scoring is used to estimate the two parameters.
+ The Fisher information matrices used here are only valid
+ if \eqn{a>2}; these are where the regularity conditions for maximum
+ likelihood estimation are satisfied.
+ For this reason, the default link function for the shape parameter is
+ a log-link with an offset value of \eqn{-2}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+Kleiber, C. and Kotz, S. (2003)
+\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
+Hoboken, NJ: Wiley-Interscience.
+
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
+\emph{Continuous Univariate Distributions},
+2nd edition, Volume 1, New York: Wiley.
+
+Gupta, R. D. and Kundu, D. (2006)
+On the comparison of Fisher information of the
+Weibull and GE distributions,
+\emph{Journal of Statistical Planning and Inference},
+\bold{136},
+3130--3144.
+
+}
+\author{ T. W. Yee }
+\note{
+ Successful convergence depends on having reasonably good initial
+ values. If the initial values chosen by this function are not good,
+ make use the two initial value arguments.
+ For censored data, numerical integration is used to compute the
+ expected working weight matrices; this may fail if the data is
+ censored `too much' and/or may be quite slow.
+ See the example below on how to input censored data.
+
+ The Weibull distribution is often an alternative to the lognormal
+ distribution. The inverse Weibull distribution, which is that of
+ \eqn{1/Y} where \eqn{Y} has a Weibull(\eqn{a,b}) distribution, is
+ known as the log-Gompertz distribution.
+
+}
+\section{Warning}{
+ If the shape parameter is less than two then numerical problems may
+ occur during the fitting and/or misleading inference may result in
+ the \code{summary} of the object.
+
+}
+
+\seealso{
+ \code{\link[stats:Weibull]{dweibull}},
+ \code{\link{gev}},
+ \code{\link{lognormal}},
+ \code{\link{expexp}}.
+}
+\examples{
+# Complete data
+x = runif(n <- 1000)
+y = rweibull(n, shape=2+exp(1+x), scale = exp(-0.5))
+fit = vglm(y ~ x, weibull, tra=TRUE)
+coef(fit, mat=TRUE)
+Coef(fit)
+
+# Type-I right censored data
+cutpt = 0.6 # Making this too small results in numerical problems
+rcensored = y > cutpt
+cy = ifelse(rcensored, cutpt, y)
+table(rcensored)
+\dontrun{
+par(mfrow=1:2)
+hist(y, xlim=range(y))
+hist(cy, xlim=range(y), main="Censored y")
+}
+cfit = vglm(cy ~ x, weibull, trace=TRUE, crit="l",
+ extra=list(rightcensored=rcensored))
+coef(cfit, mat=TRUE)
+summary(cfit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
new file mode 100644
index 0000000..e088731
--- /dev/null
+++ b/man/weightsvglm.Rd
@@ -0,0 +1,134 @@
+\name{weightsvglm}
+\alias{weightsvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Prior and Working Weights of a VGLM fit }
+\description{
+ Returns either the prior weights or working weights of a VGLM object.
+
+}
+\usage{
+weightsvglm(object, type = c("prior", "working"),
+ matrix.arg = TRUE, ignore.slot = FALSE,
+ deriv.arg = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ a model object from the \pkg{VGAM} \R package
+ that inherits from a \emph{vector generalized linear model} (VGLM),
+ e.g., a model of class \code{"vglm"}.
+
+ }
+ \item{type}{
+ Character, which type of weight is to be returned?
+ The default is the first one.
+
+ }
+ \item{matrix.arg}{
+ Logical, whether the answer is returned as a
+ matrix. If not, it will be a vector.
+
+ }
+ \item{ignore.slot}{
+ Logical. If \code{TRUE} then
+ \code{object at weights} is ignored even if it has been assigned,
+ and the long calculation for \code{object at weights}
+ is repeated. This may give a slightly different answer because
+ of the final IRLS step at convergence may or may not assign
+ the latest value of quantities such as the mean and weights.
+
+ }
+ \item{deriv.arg}{
+ Logical. If \code{TRUE} then
+ a list with components \code{deriv} and \code{weights}
+ is returned. See below for more details.
+
+ }
+ \item{\dots}{
+ Currently ignored.
+
+ }
+}
+\details{
+ Prior weights are usually inputted with the \code{weights} argument in
+ functions such as \code{\link{vglm}} and \code{\link{vgam}}. It may
+ refer to frequencies of the individual data or be weight matrices
+ specified beforehand.
+
+ Working weights are used by the IRLS algorithm. They correspond to the
+ second derivatives of the log-likelihood function with respect to the
+ linear predictors. The working weights correspond to positive-definite
+ weight matrices and are returned in \emph{matrix-band} form, e.g.,
+ the first \eqn{M} columns correspond to the diagonals, etc.
+
+}
+\value{
+ If \code{type="working"} and \code{deriv=TRUE} then a list is returned
+ with the two components described below. Otherwise the prior or
+ working weights are returned depending on the value of \code{type}.
+
+ \item{deriv}{
+ Typically the first derivative of the
+ log-likelihood with respect to the linear predictors.
+ For example, this is the variable \code{deriv.mu} in
+ \code{vglm.fit()}, or equivalently, the matrix returned in the
+ \code{"deriv"} slot of a \pkg{VGAM} family function.
+
+ }
+ \item{weights }{
+ The working weights.
+
+ }
+}
+\references{
+ Yee, T. W. and Hastie, T. J. (2003)
+ Reduced-rank vector generalized linear models.
+ \emph{Statistical Modelling},
+ \bold{3}, 15--41.
+
+ Chambers, J. M. and T. J. Hastie (eds) (1992)
+ \emph{Statistical Models in S}.
+ Wadsworth & Brooks/Cole.
+}
+
+\author{ Thomas W. Yee }
+
+\note{
+ This function is intended to be similar to
+ \code{weights.glm} (see \code{\link[stats]{glm}}).
+
+}
+
+% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link[stats]{glm}},
+ \code{\link{vglmff-class}},
+ \code{\link{vglm}}.
+}
+\examples{
+data(pneumo)
+pneumo = transform(pneumo, let=log(exposure.time))
+(fit = vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel=TRUE, reverse=TRUE), pneumo))
+fit at y # These are sample proportions
+weights(fit, type="prior", matrix=FALSE) # Number of observations
+
+# Look at the working residuals
+n = nrow(model.matrix(fit, type="lm"))
+M = ncol(predict(fit))
+
+temp = weights(fit, type="working", deriv=TRUE)
+wz = m2adefault(temp$weights, M=M) # In array format
+wzinv = array(apply(wz, 3, solve), c(M,M,n))
+wresid = matrix(NA, n, M) # working residuals
+for(i in 1:n)
+ wresid[i,] = wzinv[,,i,drop=TRUE] \%*\% temp$deriv[i,]
+max(abs(c(resid(fit, type="w")) - c(wresid))) # Should be 0
+
+z = predict(fit) + wresid # Adjusted dependent vector
+z
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/wrapup.smart.Rd b/man/wrapup.smart.Rd
new file mode 100644
index 0000000..9830957
--- /dev/null
+++ b/man/wrapup.smart.Rd
@@ -0,0 +1,37 @@
+\name{wrapup.smart}
+\alias{wrapup.smart}
+\title{ Cleans Up After Smart Prediction }
+\description{
+\code{wrapup.smart} deletes any variables used by smart prediction.
+Needed by both the modelling function and the prediction function.
+}
+\usage{
+wrapup.smart()
+}
+\details{
+ The variables to be deleted are \code{.smart.prediction},
+ \code{.smart.prediction.counter}, and \code{.smart.prediction.mode}.
+ The function \code{wrapup.smart} is useful in \R because
+ these variables are held in \code{smartpredenv}. In S-PLUS,
+ \code{wrapup.smart} is not really necessary because the variables are
+ placed in frame 1, which disappears when finished anyway.
+
+}
+\references{
+ See the technical help file at \url{http://www.stat.auckland.ac.nz/~yee}
+ for details.
+
+}
+\seealso{
+\code{\link{setup.smart}}.
+}
+\examples{
+\dontrun{# Place this inside modelling functions such as lm, glm, vglm.
+wrapup.smart() # Put at the end of lm
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+% Converted by Sd2Rd version 1.10.
diff --git a/man/yeo.johnson.Rd b/man/yeo.johnson.Rd
new file mode 100644
index 0000000..a0ef366
--- /dev/null
+++ b/man/yeo.johnson.Rd
@@ -0,0 +1,79 @@
+\name{yeo.johnson}
+\alias{yeo.johnson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Yeo-Johnson Transformation}
+\description{
+ Computes the Yeo-Johnson transformation, which is a
+ normalizing transformation.
+}
+\usage{
+yeo.johnson(y, lambda, derivative = 0,
+ epsilon = sqrt(.Machine$double.eps), inverse = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{y}{Numeric, a vector or matrix. }
+ \item{lambda}{Numeric. It is recycled to the same length as
+ \code{y} if necessary. }
+ \item{derivative}{Non-negative integer. The default is
+ the ordinary function evaluation, otherwise the derivative
+ with respect to \code{lambda}.}
+ \item{epsilon}{ Numeric and positive value. The tolerance given
+ to values of \code{lambda} when comparing it to 0 or 2. }
+ \item{inverse}{ Logical.
+ Return the inverse transformation? }
+}
+\details{
+ The Yeo-Johnson transformation can be thought of as an extension
+ of the Box-Cox transformation. It handles both positive and
+ negative values, whereas the Box-Cox transformation only handles
+ positive values. Both can be used to transform the data so
+ as to improve normality. They can be used to perform LMS
+ quantile regression.
+}
+\value{
+ The Yeo-Johnson transformation or its inverse, or its
+ derivatives with respect to \code{lambda}, of \code{y}.
+}
+\references{
+Yeo, I.-K. and Johnson, R. A. (2000)
+A new family of power transformations to improve normality or symmetry.
+\emph{Biometrika},
+\bold{87}, 954--959.
+
+Yee, T. W. (2004)
+Quantile regression via vector generalized additive models.
+\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+}
+
+\author{ Thomas W. Yee }
+\note{
+If \code{inverse=TRUE} then the argument \code{derivative=0} is required.
+}
+
+\seealso{
+\code{\link{lms.yjn}},
+\code{\link[MASS]{boxcox}}.
+}
+\examples{
+n = 200
+y = seq(-4, 4, len=n)
+ltry = c(0, 0.5, 1, 1.5, 2) # Try these values of lambda
+lltry = length(ltry)
+psi = matrix(NA, n, lltry)
+for(i in 1:lltry) {
+ psi[,i] = yeo.johnson(y, lambda=ltry[i])
+}
+
+\dontrun{
+matplot(y, psi, type="l", ylim=c(-4, 4), lwd=2, lty=1:lltry,
+ ylab="Yeo-Johnson transformation", col=1:lltry, las=1,
+ main="Yeo-Johnson transformation with some values of lambda")
+abline(v=0, h=0)
+legend(x=c(1), y=c(-0.5), lty=1:lltry, legend=as.character(ltry),
+ lwd=2, col=1:lltry)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/yip88.Rd b/man/yip88.Rd
new file mode 100644
index 0000000..a784410
--- /dev/null
+++ b/man/yip88.Rd
@@ -0,0 +1,147 @@
+\name{yip88}
+\alias{yip88}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Inflated Poisson Distribution (Yip (1988) algorithm) }
+\description{
+ Fits a zero-inflated Poisson distribution based on Yip (1988).
+
+}
+\usage{
+yip88(link.lambda = "loge", n.arg = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link.lambda}{
+ Link function for the usual \eqn{\lambda}{lambda} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{n.arg}{
+ The total number of observations in the data set. Needed when the
+ response variable has all the zeros deleted from it, so that the
+ number of zeros can be determined.
+
+ }
+}
+\details{
+ The method implemented here, Yip (1988), maximizes a \emph{conditional}
+ likelihood. Consequently, the methodology used here deletes the
+ zeros from the data set, and is thus related to the positive Poisson
+ distribution (where \eqn{P(Y=0) = 0}).
+
+ The probability function of \eqn{Y} is 0 with probability
+ \eqn{\phi}{phi}, and
+ Poisson(\eqn{\lambda}{lambda}) with probability \eqn{1-\phi}{1-phi}.
+ Thus
+ \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
+ P(Y=0) = phi + (1-phi) * P(W=0)}
+ where \eqn{W} is Poisson(\eqn{\lambda}{lambda}).
+ The mean, \eqn{(1-\phi) \lambda}{(1-phi) * lambda}, can be obtained
+ by the extractor function \code{fitted} applied to the object.
+
+ This family function treats \eqn{\phi}{phi} as a scalar. If you want
+ to model both \eqn{\phi}{phi} and \eqn{\lambda}{lambda} as a function
+ of covariates, try \code{\link{zipoisson}}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+}
+\references{
+
+Yip, P. (1988)
+Inference about the mean of a Poisson distribution in the
+presence of a nuisance parameter.
+\emph{The Australian Journal of Statistics},
+\bold{30},
+299--306.
+
+Angers, J-F. and Biswas, A. (2003)
+A Bayesian analysis of zero-inflated generalized Poisson
+model.
+\emph{Computational Statistics & Data Analysis},
+\bold{42}, 37--46.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+}
+\author{ Thomas W. Yee }
+\note{
+ The data may be inputted in two ways. The first is when the response is
+ a vector of positive values, with the argument \code{n} in \code{yip88}
+ specifying the total number of observations. The second is simply
+ include all the data in the response. In this case, the zeros are
+ trimmed off during the computation, and the \code{x} and \code{y}
+ slots of the object, if assigned, will reflect this.
+
+ The estimate of \eqn{\phi}{phi} is placed in the \code{misc} slot as
+ \code{@misc$phi}. However, this estimate is computed only for intercept
+ models, i.e., the formula is of the form \code{y ~ 1}.
+
+}
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned.
+ Yip (1988) only considered \eqn{\phi}{phi} being a scalar and not
+ modelled as a function of covariates. To get around this limitation,
+ try \code{\link{zipoisson}}.
+
+ Inference obtained from \code{summary.vglm} and \code{summary.vgam}
+ may or may not be correct. In particular, the p-values, standard
+ errors and degrees of freedom may need adjustment. Use simulation on
+ artificial data to check that these are reasonable.
+
+}
+\seealso{
+ \code{\link{zipoisson}},
+ \code{\link{Zipois}},
+ \code{\link{zapoisson}},
+ \code{\link{pospoisson}},
+ \code{\link{poissonff}},
+ \code{\link{dzipois}}.
+}
+
+\examples{
+# Generate some artificial data
+n = 1000
+phi = 0.35
+lambda = 2
+y = rzipois(n, lambda, phi)
+table(y)
+
+# Two equivalent ways of fitting the same model
+fit1 = vglm(y ~ 1, yip88(n=length(y)), subset=y>0, trace=TRUE, crit="c")
+fit2 = vglm(y ~ 1, yip88, trace=TRUE, crit="c")
+true.mean = (1-phi) * lambda
+true.mean
+mean(y)
+fitted(fit1)[1:5,]
+fit1 at misc$phi # The estimate of phi
+
+# Compare the ZIP with the positive Poisson distribution
+pp = vglm(y ~ 1, pospoisson, subset=y>0, tr=TRUE, crit="c")
+coef(pp)
+Coef(pp)
+(coef(fit1) - coef(pp)) # Same
+((fitted(fit1) - fitted(pp))[1:5,]) # Different
+
+# Another example ------------------------------
+y = 0:7 # Data from Angers and Biswas (2003)
+w = c(182, 41, 12, 2, 2, 0, 0, 1)
+y = y[w>0]
+w = w[w>0]
+yy = rep(y,w)
+
+fit3 = vglm(yy ~ 1, yip88(n=length(yy)), subset=yy>0, trace=TRUE, crit="c")
+fit3 at misc$phi # Estimate of phi (they get 0.5154 with standard error 0.0707)
+coef(fit3, matrix=TRUE)
+Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
+fitted(fit3)[1:5]
+mean(yy) # compare this with fitted(fit3)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
new file mode 100644
index 0000000..fba2b95
--- /dev/null
+++ b/man/zanegbinomial.Rd
@@ -0,0 +1,156 @@
+\name{zanegbinomial}
+\alias{zanegbinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Negative Binomial Distribution }
+\description{
+ Fits a zero-altered negative binomial distribution based on
+ a conditional model involving a binomial distribution and a
+ positive-negative binomial distribution.
+
+}
+\usage{
+zanegbinomial(lp0="logit", lmunb = "loge", lk = "loge",
+ ik = 1, zero = -3, cutoff = 0.995, method.init=3)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lp0}{
+ Link function for the parameter \eqn{p_0}{p0}, called \code{p0} here.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lmunb}{
+ Link function applied to the \code{munb} parameter, which is the mean
+ \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lk}{
+ Parameter link function applied to the reciprocal of the dispersion
+ parameter, called \code{k}. That is, as \code{k} increases, the
+ variance of the response decreases.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{ik}{
+ Initial values for \code{k}. They must be positive, and one value
+ for each response/species.
+
+ }
+ \item{zero}{
+ Integer valued vector, usually assigned \eqn{-3} or \eqn{3} if
+ used at all. Specifies which of the three linear predictors are
+ modelled as an intercept only. By default, the \code{k} parameter
+ (after \code{lk} is applied) for each response is modelled as
+ a single unknown number that is estimated. It can be modelled as a
+ function of the explanatory variables by setting \code{zero=NULL}.
+ A negative value means that the value is recycled, so setting \eqn{-3}
+ means all \code{k} are intercept only.
+
+ }
+ \item{cutoff}{
+ A numeric which is close to 1 but never exactly 1. Used to
+ specify how many terms of the infinite series are actually used.
+ The sum of the probabilites are added until they reach this value
+ or more. It is like specifying \code{p} in an imaginary function
+ \code{qnegbin(p)}.
+
+ }
+ \item{method.init}{
+ See \code{\link{negbinomial}}.
+
+ }
+}
+
+\details{
+ The response \eqn{Y} is zero with probability \eqn{p_0}{p0},
+ or \eqn{Y} has a positive-negative binomial distribution with
+ probability \eqn{1-p_0}{1-p0}. Thus \eqn{0 < p_0 < 1}{0 < p0 < 1},
+ which is modelled as a function of the covariates. The zero-altered
+ negative binomial distribution differs from the zero-inflated negative
+ binomial distribution in that the former has zeros coming from one
+ source, whereas the latter has zeros coming from the negative binomial
+ distribution too. The zero-inflated negative binomial distribution
+ is currently not implemented in the \pkg{VGAM} package. Some people
+ call the zero-altered negative binomial a \emph{hurdle} model.
+
+ For one response/species, by default, the three linear/additive
+ predictors are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(p0),
+ log(munb), log(k))^T}. This vector is recycled for multiple species.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+ The \code{fitted.values} slot of the fitted object,
+ which should be extracted by the generic function \code{fitted}, returns
+ the mean \eqn{\mu}{mu} which is given by
+ \deqn{\mu = (1-p_0) \mu_{nb} / [1 - (k/(k+\mu_{nb}))^k].}{%
+ mu = (1-p0) * munb / [1 - (k/(k+munb))^k].}
+
+}
+\references{
+Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
+D. B. (1996)
+Modelling the abundances of rare species: statistical models
+for counts with extra zeros.
+\emph{Ecological Modelling},
+\bold{88},
+297--308.
+
+}
+\section{Warning }{
+ Convergence for this \pkg{VGAM} family function seems to depend quite
+ strongly on providing good initial values.
+
+ Inference obtained from \code{summary.vglm} and \code{summary.vgam}
+ may or may not be correct. In particular, the p-values, standard errors
+ and degrees of freedom may need adjustment. Use simulation on artificial
+ data to check that these are reasonable.
+
+}
+
+\author{ T. W. Yee }
+\note{
+
+ Note this family function allows \eqn{p_0}{p0} to be modelled as
+ functions of the covariates. It is a conditional model, not a mixture
+ model.
+
+ This family function effectively combines
+ \code{\link{posnegbinomial}} and \code{\link{binomialff}} into
+ one family function.
+
+ This family function can handle a multivariate response, e.g., more
+ than one species.
+}
+
+\seealso{
+ \code{\link{posnegbinomial}},
+ \code{\link{binomialff}},
+ \code{\link{rposnegbin}},
+ \code{\link{zipoisson}}.
+}
+
+\examples{
+\dontrun{
+x = runif(n <- 2000)
+p0 = logit(-1 + 2*x, inverse=TRUE)
+y1 = rposnegbin(n, munb=exp(0+2*x), k=exp(1)) # With covariates
+y2 = rposnegbin(n, munb=exp(1+2*x), k=exp(1)) # With covariates
+y1 = ifelse(runif(n) < p0, 0, y1)
+y2 = ifelse(runif(n) < p0, 0, y2)
+table(y1)
+table(y2)
+
+fit = vglm(cbind(y1,y2) ~ x, zanegbinomial, trace=TRUE)
+coef(fit, matrix=TRUE)
+fitted(fit)[1:9,]
+predict(fit)[1:9,]
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
new file mode 100644
index 0000000..1f19e6e
--- /dev/null
+++ b/man/zapoisson.Rd
@@ -0,0 +1,137 @@
+\name{zapoisson}
+\alias{zapoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Poisson Distribution }
+\description{
+ Fits a zero-altered Poisson distribution based on a conditional
+ model involving a binomial distribution
+ and a positive-Poisson distribution.
+}
+\usage{
+zapoisson(lp0 = "logit", llambda = "loge")
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lp0}{
+ Link function for the parameter \eqn{p_0}{p0}, called \code{p0} here.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{llambda}{
+ Link function for the usual \eqn{\lambda}{lambda} parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+}
+\details{
+ The response \eqn{Y} is zero with probability \eqn{p_0}{p0}, or \eqn{Y}
+ has a positive-Poisson(\eqn{\lambda)}{lambda)} distribution with
+ probability \eqn{1-p_0}{1-p0}. Thus \eqn{0 < p_0 < 1}{0 < p0 < 1},
+ which is modelled as a function of the covariates. The zero-altered
+ Poisson distribution differs from the zero-inflated Poisson distribution
+ in that the former has zeros coming from one source, whereas the latter
+ has zeros coming from the Poisson distribution too. Some people call
+ the zero-altered Poisson a \emph{hurdle} model.
+
+ For one response/species, by default, the two linear/additive predictors
+ are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(p0), log(lambda))^T}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+ The \code{fitted.values} slot of the fitted object,
+ which should be extracted by the generic function \code{fitted}, returns
+ the mean \eqn{\mu}{mu} which is given by
+ \deqn{\mu = (1-p_0) \lambda / [1 - \exp(-\lambda)].}{%
+ mu = (1-p0) * lambda / [1 - exp(-lambda)].}
+
+}
+\references{
+Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
+D. B. (1996)
+Modelling the abundances of rare species: statistical models
+for counts with extra zeros.
+\emph{Ecological Modelling},
+\bold{88},
+297--308.
+
+Angers, J-F. and Biswas, A. (2003)
+A Bayesian analysis of zero-inflated generalized Poisson
+model.
+\emph{Computational Statistics & Data Analysis},
+\bold{42}, 37--46.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\section{Warning }{
+Inference obtained from \code{summary.vglm} and
+\code{summary.vgam} may or may not be correct.
+In particular, the p-values, standard errors and degrees of freedom
+may need adjustment. Use simulation on artificial data to check
+that these are reasonable.
+}
+
+\author{ T. W. Yee }
+\note{
+ There are subtle differences between this family function and
+ \code{\link{yip88}} and \code{\link{zipoisson}}.
+ In particular, \code{\link{zipoisson}} is a
+ \emph{mixture} model whereas \code{zapoisson} and \code{\link{yip88}}
+ are \emph{conditional} models.
+
+ Note this family function allows \eqn{p_0}{p0} to be modelled
+ as functions of the covariates. It can be thought of an extension
+ of \code{\link{yip88}}, which is also a conditional model but its
+ \eqn{\phi}{phi} parameter is a scalar only.
+
+ This family function effectively combines \code{\link{pospoisson}}
+ and \code{\link{binomialff}} into one family function.
+
+ This family function can handle a multivariate response, e.g.,
+ more than one species.
+}
+
+\seealso{
+ \code{\link{zipoisson}},
+ \code{\link{yip88}},
+ \code{\link{pospoisson}},
+ \code{\link{posnegbinomial}},
+ \code{\link{binomialff}},
+ \code{\link{rpospois}}.
+}
+
+\examples{
+x = runif(n <- 1000)
+p0 = logit(-1 + 1*x, inverse=TRUE)
+lambda = loge(-0.3 + 2*x, inverse=TRUE)
+y = ifelse(runif(n) < p0, 0, rpospois(n, lambda))
+table(y)
+fit = vglm(y ~ x, zapoisson, trace=TRUE)
+fit = vglm(y ~ x, zapoisson, trace=TRUE, crit="c")
+fitted(fit)[1:5]
+predict(fit)[1:5,]
+predict(fit, untransform=TRUE)[1:5,]
+coef(fit, matrix=TRUE)
+
+
+# Another example ------------------------------
+# Data from Angers and Biswas (2003)
+y = 0:7; w = c(182, 41, 12, 2, 2, 0, 0, 1)
+y = y[w>0]
+w = w[w>0]
+yy = rep(y,w)
+fit3 = vglm(yy ~ 1, zapoisson, trace=TRUE, crit="c")
+coef(fit3, matrix=TRUE)
+Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
+fitted(fit3)[1:5]
+mean(yy) # compare this with fitted(fit3)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zero.Rd b/man/zero.Rd
new file mode 100644
index 0000000..8320eb6
--- /dev/null
+++ b/man/zero.Rd
@@ -0,0 +1,104 @@
+\name{zero}
+% \alias{zeroarg}
+\alias{zero}
+\title{ The zero Argument in VGAM Family Functions }
+\description{
+ The \code{zero} argument allows users to conveniently
+ model certain linear/additive predictors as intercepts
+ only.
+}
+% \usage{
+% VGAMfamilyFunction(zero=3)
+% }
+\value{
+ Nothing is returned.
+ It is simply a convenient argument for constraining
+ certain linear/additive predictors to be an intercept only.
+
+}
+\section{Warning }{
+ The use of other arguments may conflict with the \code{zero}
+ argument. For example, using \code{constraints} to input constraint
+ matrices may conflict with the \code{zero} argument.
+ Another example is the argument \code{parallel}.
+ In general users
+ should not assume any particular order of precedence when
+ there is potential conflict of definition.
+ Currently no checking for consistency is made.
+
+ The argument \code{zero} may be renamed in the future to
+ something better.
+
+}
+\section{Side Effects}{
+ The argument creates the appropriate constraint matrices
+ internally.
+}
+\details{
+ Often a certain parameter needs to be modelled simply while other
+ parameters in the model may be more complex, for example, the
+ \eqn{\lambda}{lambda} parameter in LMS-Box-Cox quantile regression
+ should be modelled more simply compared to its \eqn{\mu}{mu} parameter.
+ Another example is the \eqn{\xi}{xi} parameter in a GEV distribution
+ which is should be modelled simpler than its \eqn{\mu}{mu} parameter.
+ Using the \code{zero} argument allows this to be fitted conveniently
+ without having to input all the constraint matrices explicitly.
+
+ The \code{zero} argument should be assigned an integer vector from the
+ set \{\code{1:M}\} where \code{M} is the number of linear/additive
+ predictors. Full details about constraint matrices can be found in
+ the references.
+
+}
+
+\author{T. W. Yee }
+\note{
+ In all \pkg{VGAM} family functions \code{zero=NULL} means
+ none of the linear/additive predictors are modelled as
+ intercepts-only.
+ Almost all \pkg{VGAM} family function have \code{zero=NULL}
+ as the default, but there are some exceptions, e.g.,
+ \code{\link{binom2.or}}.
+
+ Typing something like \code{coef(fit, matrix=TRUE)} is a useful
+ way to ensure that the \code{zero} argument has worked as expected.
+
+}
+\references{
+Yee, T. W. and Wild, C. J. (1996)
+Vector generalized additive models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{58}, 481--493.
+
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+\url{http://www.stat.auckland.ac.nz/~yee}
+}
+
+\seealso{
+ \code{\link{constraints}}.
+}
+
+\examples{
+args(multinomial)
+args(binom2.or)
+args(gpd)
+
+
+#LMS quantile regression example
+data(bminz)
+fit = vglm(BMI ~ bs(age, df=4), fam=lms.bcg(zero=c(1,3)),
+ data=bminz, trace=TRUE)
+coef(fit, matrix=TRUE)
+}
+\keyword{models}
+\keyword{regression}
+\keyword{programming}
+
+
+%zz Here is a conflict which is not picked up (no internal checking done)
+
+
diff --git a/man/zeta.Rd b/man/zeta.Rd
new file mode 100644
index 0000000..ee73695
--- /dev/null
+++ b/man/zeta.Rd
@@ -0,0 +1,118 @@
+\name{zeta}
+\alias{zeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Riemann's Zeta Function }
+\description{
+ Computes Riemann's zeta function and its first two derivatives.
+
+}
+\usage{
+zeta(x, deriv = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+ A complex-valued vector/matrix whose real values must be \eqn{\geq
+ 1}{>= 1}. Otherwise, if \code{x} may be real. If \code{deriv} is 1
+ or 2 then \code{x} must be real and positive.
+
+ }
+ \item{deriv}{
+ An integer equalling 0 or 1 or 2, which is the order of the derivative.
+ The default means it is computed ordinarily.
+
+ }
+}
+\details{
+ While the usual definition involves an infinite series, more efficient
+ methods have been devised to compute the value. In particular,
+ this function uses Euler-Maclaurin summation. Theoretically, the
+ zeta function can be computed over the whole complex plane because of
+ analytic continuation.
+
+ The formula used here for analytic continuation is
+ \deqn{\zeta(s) = 2^s \pi^{s-1} \sin(\pi s/2) \Gamma(1-s) \zeta(1-s).}{%
+ zeta(s) = 2^s * pi^(s-1) * sin(pi*s/2) * gamma(1-s) * zeta(1-s).}
+ This is actually one of several formulas, but this one was discovered
+ by Riemann himself and is called the \emph{functional equation}.
+
+}
+
+\section{Warning}{
+ This function has not been fully tested, especially the derivatives.
+ In particular, analytic continuation does not work here for
+ complex \code{x} with \code{Re(x)<1} because currently the
+ \code{\link[base:Special]{gamma}} function does not handle complex
+ arguments.
+
+}
+
+\value{
+ A vector/matrix of computed values.
+
+% The derivative is attached as an attribute zz.
+}
+\references{
+
+Riemann, B. (1859)
+Ueber die Anzahl der Primzahlen unter einer gegebenen Grosse.
+\emph{Monatsberichte der Berliner Akademie, November 1859}.
+
+Edwards, H. M. (1974)
+\emph{Riemann's Zeta Function}.
+Academic Press: New York.
+
+Markman, B. (1965)
+The Riemann zeta function.
+\emph{BIT},
+\bold{5},
+138--141.
+
+Abramowitz, M. and Stegun, I. A. (1972)
+\emph{Handbook of Mathematical Functions with Formulas, Graphs, and
+Mathematical Tables},
+New York: Dover Publications Inc.
+
+}
+\author{ T. W. Yee, with the help of G. J. Tee. }
+\note{
+ Estimation of the parameter of the zeta distribution can be achieved
+ with \code{\link{zetaff}}.
+
+}
+
+\seealso{
+ \code{\link{zetaff}},
+ \code{\link{lerch}},
+ \code{\link[base:Special]{gamma}}.
+}
+\examples{
+zeta(2:10)
+
+\dontrun{
+x = seq(1.2, 10, len=100)
+plot(x, zeta(x), type="l", las=1, xlim=c(-12,10), ylim=c(-1,4), col="red")
+x = seq(-12, 0.8, len=100)
+lines(x, zeta(x), col="red")
+abline(v=0, h=c(0,1), lty="dashed")
+
+# Close up plot
+x = seq(-14, -0.4, len=100)
+plot(x, zeta(x), type="l", las=1, col="red")
+abline(v=0, h=0, lty="dashed")
+
+# Plot of the first derivatives
+x = seq(0.04, 0.8, len=100)
+plot(x, zeta(x, deriv=1), type="l", las=1, col="blue", xlim=c(0.04,3), ylim=c(-6,0))
+x = seq(1.2, 3, len=100)
+lines(x, zeta(x, deriv=1), col="blue")
+abline(v=0, h=0, lty="dashed") }
+
+zeta(2) - pi^2 / 6 # Should be zero
+zeta(4) - pi^4 / 90 # Should be zero
+zeta(6) - pi^6 / 945 # Should be 0
+zeta(8) - pi^8 / 9450 # Should be 0
+# zeta(0, deriv=1) + 0.5 * log(2*pi) # Should be 0
+}
+\keyword{math}
+
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
new file mode 100644
index 0000000..9789188
--- /dev/null
+++ b/man/zetaff.Rd
@@ -0,0 +1,101 @@
+\name{zetaff}
+\alias{zetaff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zeta Distribution Family Function }
+\description{
+ Estimates the parameter of the zeta distribution.
+}
+\usage{
+zetaff(link = "loge", init.p = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Parameter link function applied to the (positive) parameter \eqn{p}.
+ See \code{\link{Links}} for more choices.
+ Choosing \code{\link{loglog}} constrains \eqn{p>1}, but
+ may fail if the maximum likelihood estimate is less than one.
+
+ }
+ \item{init.p}{
+ Optional initial value for the parameter \eqn{p}.
+ The default is to choose an initial value internally.
+ If converge failure occurs use this argument to input a value.
+
+ }
+}
+\details{
+In this long tailed distribution
+the response must be a positive integer.
+The probability function for a response \eqn{Y} is
+\deqn{P(Y=y) = 1/[y^{p+1} \zeta(p+1)],\ \ \ p>0,\ \ \ y=1,2,...}{%
+ P(Y=y) = 1/(y^(p+1) zeta(p+1)), p>0, y=1,2,...}
+where \eqn{\zeta}{zeta} is Riemann's zeta function.
+The parameter \eqn{p} is positive, therefore a log link
+is the default.
+The mean of \eqn{Y} is
+\eqn{\mu = \zeta(p) / \zeta(p+1)}{mu = zeta(p)/zeta(p+1)}
+provided \eqn{p>1}.
+The variance of \eqn{Y} is
+\eqn{\zeta(p-1) / \zeta(p+1) - \mu^2}{zeta(p-1) / zeta(p+1) - mu^2}
+provided \eqn{p>2}.
+
+It appears that good initial values are needed for successful
+convergence. If convergence is not obtained, try several values
+ranging from values near 0 to values about 10 or more.
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+}
+
+%Lindsey, J. K. (1995)
+%\emph{Modelling Frequency and Count Data}.
+%Oxford: Clarendon Press.
+
+\references{
+pp.465--471, Chapter 11 of
+Johnson NL, Kotz S, and Kemp AW (1993)
+\emph{Univariate Discrete Distributions},
+2nd ed.
+New York: Wiley.
+
+Knight, K. (2000)
+\emph{Mathematical Statistics}.
+Boca Raton: Chapman & Hall/CRC Press.
+}
+\author{ T. W. Yee }
+\note{ The \code{\link{zeta}} function may be used to
+compute values of the zeta function.
+}
+
+\seealso{
+\code{\link{zeta}},
+\code{\link{dzeta}},
+\code{\link{hzeta}},
+\code{\link{zipf}}.
+
+Documentation accompanying the \pkg{VGAM} package at
+\url{http://www.stat.auckland.ac.nz/~yee}
+contains further information and examples.
+
+}
+\examples{
+y = 1:5 # Knight, p.304
+w = c(63, 14, 5, 1, 2)
+fit = vglm(y ~ 1, zetaff, trace=TRUE, wei=w, crit="c")
+(phat = Coef(fit)) # 1.682557
+cbind(dzeta(y, phat) * sum(w), w)
+
+weighted.mean(y, w)
+fitted(fit, mat=FALSE)
+predict(fit)
+
+# MLE should satisfy the following:
+mean(log(rep(y, w))) + zeta(1+phat, deriv=1)/zeta(1+phat) # Should be 0
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
new file mode 100644
index 0000000..80affc2
--- /dev/null
+++ b/man/zibinomUC.Rd
@@ -0,0 +1,83 @@
+\name{Zibinom}
+\alias{Zibinom}
+\alias{dzibinom}
+\alias{pzibinom}
+\alias{qzibinom}
+\alias{rzibinom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Inflated Binomial Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the zero-inflated binomial distribution with parameter \code{phi}.
+
+}
+\usage{
+dzibinom(x, size, prob, log = FALSE, phi = 0)
+pzibinom(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0)
+qzibinom(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0)
+rzibinom(n, size, prob, phi = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{size}{number of trials. It is the \eqn{N} symbol in the formula
+ given in \code{\link{zibinomial}}. }
+ \item{prob}{probability of success on each trial. }
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{log, log.p, lower.tail}{ Arguments that are passed on to
+ \code{\link[stats:Binomial]{pbinom}}.}
+ \item{phi}{
+ Probability of zero (ignoring the binomial distribution), called
+ \eqn{\phi}{phi}. The default value of \eqn{\phi=0}{phi=0} corresponds
+ to the response having an ordinary binomial distribution.
+
+ }
+}
+\details{
+ The probability function of \eqn{Y} is 0 with probability
+ \eqn{\phi}{phi}, and \eqn{Binomial(size, prob)}{Binomial(size, prob)} with
+ probability \eqn{1-\phi}{1-phi}. Thus
+ \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
+ P(Y=0) = phi + (1-phi) * P(W=0)}
+ where \eqn{W} is distributed \eqn{Binomial(size, prob)}{Binomial(size, prob)}.
+}
+\value{
+ \code{dzibinom} gives the density,
+ \code{pzibinom} gives the distribution function,
+ \code{qzibinom} gives the quantile function, and
+ \code{rzibinom} generates random deviates.
+}
+%\references{ }
+\author{ Thomas W. Yee }
+\note{
+ The argument \code{phi} is recycled to the required length, and
+ must have values which lie in the interval [0,1].
+
+}
+
+\seealso{
+ \code{\link{zibinomial}},
+ \code{\link[stats:Binomial]{dbinom}}.
+}
+\examples{
+prob = 0.2
+size = 10
+phi = 0.5
+(i = dzibinom(0:size, size, prob, phi=phi))
+cumsum(i) - pzibinom(0:size, size, prob, phi=phi) # Should be 0s
+table(rzibinom(100, size, prob, phi=phi))
+
+table(qzibinom(runif(100), size, prob, phi=phi))
+round(dzibinom(0:10, size, prob, phi=phi) * 100) # Should be similar
+
+\dontrun{
+x = 0:size
+plot(x, dzibinom(x, size, prob, phi=phi), type="h", ylab="Probability",
+ main=paste("ZIB(", size, ", ", prob, ", phi=", phi, ") (blue) vs",
+ " Binomial(", size, ", ", prob, ") (red & shifted slightly)", sep=""),
+ lwd=2, col="blue", las=1)
+lines(x+0.05, dbinom(x, size, prob), type="h", lwd=2, col="red")
+}
+}
+\keyword{distribution}
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
new file mode 100644
index 0000000..3b503f1
--- /dev/null
+++ b/man/zibinomial.Rd
@@ -0,0 +1,123 @@
+\name{zibinomial}
+\alias{zibinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Inflated Binomial Distribution Family Function }
+\description{
+ Fits a zero-inflated binomial distribution by maximum likelihood
+ estimation.
+
+}
+\usage{
+zibinomial(lphi="logit", link.mu="logit", iphi=NULL, zero=1, mv=FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lphi}{
+ Link function for the parameter \eqn{\phi}{phi}.
+ See \code{\link{Links}} for more choices.
+
+}
+ \item{link.mu}{
+ Link function for the usual binomial probability \eqn{\mu}{mu} parameter.
+ See \code{\link{Links}} for more choices.
+
+}
+\item{iphi}{
+ Optional initial value for \eqn{\phi}{phi}, whose value must lie
+ between 0 and 1. The default is to compute an initial value internally.
+
+}
+\item{zero}{
+ An integer specifying which linear/additive predictor is modelled
+ as intercepts only. If given, the value must be either 1 or 2,
+ and the default is the first. Setting \code{zero=NULL} enables both
+ \eqn{\phi}{phi} and \eqn{\mu}{mu} to be modelled as a function of
+ the explanatory variables.
+
+}
+\item{mv}{
+ Logical. Currently it must be \code{FALSE} to mean the function does
+ not handle multivariate responses. This is to remain compatible with
+ the same argument in \code{\link{binomialff}}.
+
+}
+}
+\details{
+ This function uses Fisher scoring and is based on
+ \deqn{P(Y=0) = \phi + (1-\phi) (1-\mu)^N,}{%
+ P(Y=0) = phi + (1-phi) * (1-mu)^N,}
+ for \eqn{y=0}, and
+ \deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)}.}{%
+ P(Y=y) = (1-phi) * choose(N,Ny) * mu^(N*y) * (1-mu)^(N*(1-y)).}
+ for \eqn{y=1/N,2/N,\ldots,1}. That is, the response is a sample
+ proportion out of \eqn{N} trials, and the argument \code{size} in
+ \code{\link{rzibinom}} is \eqn{N} here.
+ The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 <
+ phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) \mu}{E(Y)
+ = (1-phi) * mu} and these are returned as the fitted values.
+ By default, the two linear/additive predictors are \eqn{(logit(\phi),
+ logit(\mu))^T}{(logit(phi), logit(mu))^T}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+}
+%\references{
+
+%}
+\author{ T. W. Yee }
+\note{
+ The response variable must have one of the formats described by
+ \code{\link{binomialff}}, e.g., a factor or two column matrix or a
+ vector of sample proportions with the \code{weights} argument
+ specifying the values of \eqn{N}.
+
+ To work well, one needs \eqn{N>1} and \eqn{\mu>0}{mu>0}, i.e.,
+ the larger \eqn{N} and \eqn{\mu}{mu} are, the better.
+
+ For intercept-models and constant \eqn{N} over the \eqn{n} observations,
+ the \code{misc} slot has a component called \code{p0} which is the
+ estimate of \eqn{P(Y=0)}. This family function currently cannot handle
+ a multivariate response (only \code{mv=FALSE} can be handled).
+
+% The zero-\emph{deflated} binomial distribution cannot be handled with
+% this family function. It can be handled with the zero-altered binomial
+% distribution; see \code{\link{zabinomial}}.
+
+}
+
+\section{Warning }{
+ Numerical problems can occur.
+ Half-stepping is not uncommon.
+ If failure to converge occurs, make use of the argument \code{iphi}.
+
+}
+
+\seealso{
+ \code{\link{rzibinom}},
+ \code{\link{binomialff}},
+ \code{\link{posbinomial}},
+ \code{\link[stats:Binomial]{rbinom}}.
+}
+\examples{
+size = 10 # number of trials; N in the notation above
+n = 200
+phi = 0.50
+mubin = 0.3 # Mean of an ordinary binomial distribution
+sv = rep(size, len=n)
+y = rzibinom(n=n, size=sv, prob=mubin, phi=phi) / sv # A proportion
+table(y)
+fit = vglm(y ~ 1, zibinomial, weight=sv, trace=TRUE)
+coef(fit, matrix=TRUE)
+Coef(fit)
+fit at misc$p0 # Estimate of P(Y=0)
+fitted(fit)[1:4,]
+mean(y) # Compare this with fitted(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zipf.Rd b/man/zipf.Rd
new file mode 100644
index 0000000..b2db490
--- /dev/null
+++ b/man/zipf.Rd
@@ -0,0 +1,89 @@
+\name{zipf}
+\alias{zipf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zipf Distribution Family Function }
+\description{
+ Estimates the parameter of the Zipf distribution.
+
+}
+\usage{
+zipf(N=NULL, link="loge", init.s=NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{N}{
+ Number of elements, an integer satisfying \code{1 < N < Inf}.
+ The default is to use the maximum value of the response.
+ If given, \code{N} must be no less that the largest response value.
+ If \code{N=Inf} and \eqn{s>1} then this is the zeta distribution
+ (use \code{\link{zetaff}} instead).
+
+ }
+ \item{link}{
+ Parameter link function applied to the (positive) parameter \eqn{s}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{init.s}{
+ Optional initial value for the parameter \eqn{s}.
+ The default is to choose an initial value internally.
+ If converge failure occurs use this argument to input a value.
+
+ }
+}
+\details{
+ The probability function for a response \eqn{Y} is
+ \deqn{P(Y=y) = y^{-s} / \sum_{i=1}^N i^{-s},\ \ \ s>0,\ \ \ y=1,2,\ldots,N,}{%
+ P(Y=y) = (y^(-s)) / sum((1:N)^(-s)), s>0, y=1,2,...,N,}
+ where \eqn{s} is the exponent characterizing the distribution.
+ The mean of \eqn{Y}, which are returned as the fitted values,
+ is \eqn{\mu = H_{N,s-1} / H_{N,s}}{H(N,s-1) / H(N,s)}
+ where \eqn{H_{n,m}= \sum_{i=1}^n i^{-m}}{H(n,m)=sum((1:n)^(-m))} is the
+ \eqn{n}th generalized harmonic number.
+
+ Zipf's law is an experimental law which is often applied to the study
+ of the frequency of words in a corpus of natural language utterances.
+ It states that the frequency of any word is inversely proportional to
+ its rank in the frequency table.
+ For example, "the" and "of" are first two most common words, and
+ Zipf's law states that "the" is twice as common as "of".
+ Many other natural phenomena conform to Zipf's law.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+}
+
+\references{
+pp.465--471, Chapter 11 of
+Johnson NL, Kotz S, and Kemp AW (1993)
+\emph{Univariate Discrete Distributions},
+2nd ed.
+New York: Wiley.
+
+}
+\author{ T. W. Yee }
+\note{
+ Upon convergence, the \code{N} is stored as \code{@misc$N}.
+
+}
+
+\seealso{
+ \code{\link{dzipf}},
+ \code{\link{zetaff}}.
+
+}
+\examples{
+y = 1:5; w = c(63, 14, 5, 1, 2)
+fit = vglm(y ~ 1, zipf, trace=TRUE, weight=w)
+fit = vglm(y ~ 1, zipf(link=identity, init=3.4), tra=TRUE, weight=w, cri="c")
+fit at misc$N
+(shat = Coef(fit))
+weighted.mean(y, w)
+fitted(fit, mat=FALSE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
new file mode 100644
index 0000000..6344d03
--- /dev/null
+++ b/man/zipfUC.Rd
@@ -0,0 +1,60 @@
+\name{Zipf}
+\alias{Zipf}
+\alias{dzipf}
+\alias{pzipf}
+%\alias{qzipf}
+%\alias{rzipf}
+\title{The Zipf Distribution}
+\description{
+ Density, and cumulative distribution function
+ for the Zipf distribution.
+
+}
+\usage{
+dzipf(x, N, s)
+pzipf(q, N, s)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+% \item{n}{number of observations.
+% Must be a positive integer of length 1.}
+ \item{N, s }{
+ the number of elements, and the exponent characterizing the
+ distribution.
+ See \code{\link{zipf}} for more details.
+
+ }
+}
+\value{
+ \code{dzipf} gives the density, and
+ \code{pzipf} gives the cumulative distribution function.
+% \code{qzipf} gives the quantile function, and
+% \code{rzipf} generates random deviates.
+}
+\author{ T. W. Yee }
+\details{
+ This is a finite version of the zeta distribution.
+ See \code{\link{zipf}} for more details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{zipf}}.
+
+}
+\examples{
+\dontrun{
+N = 10; s=0.5; y = 1:N
+proby = dzipf(y, N=N, s=s)
+plot(y, proby, type="h", col="blue", ylab="P[Y=y]", ylim=c(0,0.2),
+ main=paste("Zipf(N=",N,", s=",s,")", sep=""))
+sum(proby) # Should be 1
+max(abs(cumsum(proby) - pzipf(y, N=N, s=s))) # Should be 0
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
new file mode 100644
index 0000000..3836fe3
--- /dev/null
+++ b/man/zipoisUC.Rd
@@ -0,0 +1,78 @@
+\name{Zipois}
+\alias{Zipois}
+\alias{dzipois}
+\alias{pzipois}
+\alias{qzipois}
+\alias{rzipois}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Inflated Poisson Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the zero-inflated Poisson distribution with parameter \code{phi}.
+
+}
+\usage{
+dzipois(x, lambda, phi = 0)
+pzipois(q, lambda, phi = 0)
+qzipois(p, lambda, phi = 0)
+rzipois(n, lambda, phi = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. Must be a single positive integer. }
+ \item{lambda}{ Vector of positive means. }
+ \item{phi}{
+ Probability of zero (ignoring the Poisson distribution), called
+ \eqn{\phi}{phi}. The default value of \eqn{\phi=0}{phi=0} corresponds
+ to the response having an ordinary Poisson distribution.
+
+ }
+}
+\details{
+ The probability function of \eqn{Y} is 0 with probability
+ \eqn{\phi}{phi}, and \eqn{Poisson(\lambda)}{Poisson(lambda)} with
+ probability \eqn{1-\phi}{1-phi}. Thus
+ \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
+ P(Y=0) = phi + (1-phi) * P(W=0)}
+ where \eqn{W} is distributed \eqn{Poisson(\lambda)}{Poisson(lambda)}.
+}
+\value{
+ \code{dzipois} gives the density,
+ \code{pzipois} gives the distribution function,
+ \code{qzipois} gives the quantile function, and
+ \code{rzipois} generates random deviates.
+}
+%\references{ }
+\author{ Thomas W. Yee }
+\note{
+ The argument \code{phi} is recycled to the required length, and
+ must have values which lie in the interval [0,1].
+
+}
+
+\seealso{
+ \code{\link[stats:Poisson]{dpois}},
+ \code{\link{zipoisson}}.
+}
+\examples{
+lambda = 4
+phi = 0.8
+(i = dzipois(0:7, lambda, phi))
+cumsum(i) - pzipois(0:7, lambda, phi) # Should be 0s
+table(rzipois(100, lambda, phi))
+
+table(qzipois(runif(100), lambda, phi))
+round(dzipois(0:10, lambda, phi) * 100) # Should be similar
+
+\dontrun{
+x = 0:10
+plot(x, dzipois(x, lambda, phi), type="h", ylab="Probability",
+ main=paste("ZIP(", lambda, ", phi=", phi, ") (blue) vs",
+ " Poisson(", lambda, ") (red & shifted slightly)", sep=""),
+ lwd=2, col="blue", las=1)
+lines(x+0.05, dpois(x, lambda), type="h", lwd=2, col="red")
+}
+}
+\keyword{distribution}
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
new file mode 100644
index 0000000..1103bf4
--- /dev/null
+++ b/man/zipoisson.Rd
@@ -0,0 +1,132 @@
+\name{zipoisson}
+\alias{zipoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Inflated Poisson Distribution Family Function }
+\description{
+ Fits a zero-inflated Poisson distribution.
+}
+\usage{
+zipoisson(lphi="logit", llambda = "loge", iphi = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lphi}{
+ Link function for the parameter \eqn{\phi}{phi}.
+ See \code{\link{Links}} for more choices.
+
+}
+ \item{llambda}{
+ Link function for the usual \eqn{\lambda}{lambda} parameter.
+ See \code{\link{Links}} for more choices.
+
+}
+\item{iphi}{
+ Optional initial value for \eqn{\phi}{phi}, whose value must lie
+ between 0 and 1. The default is to compute an initial value internally.
+
+}
+\item{zero}{
+ An integer specifying which linear/additive predictor is modelled as
+ intercepts only. If given, the value must be either 1 or 2, and the
+ default is none of them. Setting \code{zero=1} makes \eqn{\phi}{phi}
+ a single parameter.
+
+}
+}
+\details{
+ This function uses Fisher scoring and is based on
+ \deqn{P(Y=0) = \phi + (1-\phi) \exp(-\lambda),}{%
+ P(Y=0) = phi + (1-phi) * exp(-lambda),}
+ and for \eqn{y=1,2,\ldots},
+ \deqn{P(Y=y) = (1-\phi) \exp(-\lambda) \lambda^y / y!.}{%
+ P(Y=y) = (1-phi) * exp(-lambda) * lambda^y / y!.}
+ The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
+ The mean of \eqn{Y} is \eqn{(1-\phi) \lambda}{(1-phi)*lambda} and these
+ are returned as the fitted values. By default, the two linear/additive
+ predictors are \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi),
+ log(lambda))^T}.
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+}
+\references{
+ Thas, O. and Rayner, J. C. W. (2005)
+ Smooth tests for the zero-inflated Poisson distribution.
+ \emph{Biometrics},
+ \bold{61}, 808--815.
+
+ Data: Angers, J-F. and Biswas, A. (2003)
+ A Bayesian analysis of zero-inflated generalized Poisson model.
+ \emph{Computational Statistics & Data Analysis},
+ \bold{42}, 37--46.
+
+}
+\author{ T. W. Yee }
+\note{
+ For intercept-models, the \code{misc} slot has a component called
+ \code{p0} which is the estimate of \eqn{P(Y=0)}. Note that \eqn{P(Y=0)}
+ is not the parameter \eqn{\phi}{phi}. This family function currently
+ cannot handle a multivariate response.
+
+ This family function is now recommended above \code{\link{yip88}}.
+
+ The zero-\emph{deflated} Poisson distribution cannot be handled with
+ this family function. It can be handled with the zero-altered Poisson
+ distribution; see \code{\link{zapoisson}}.
+
+}
+
+\section{Warning }{
+ Numerical problems can occur.
+ Half-stepping is not uncommon.
+ If failure to converge occurs, try using \code{iphi} and/or
+ \code{zero=1} if there are explanatory variables.
+
+}
+
+\seealso{
+ \code{\link{zapoisson}},
+ \code{\link{Zipois}},
+ \code{\link{yip88}},
+ \code{\link[stats:Poisson]{rpois}}.
+}
+\examples{
+n = 5000
+x = runif(n)
+phi = logit(-0.5 + 1*x, inverse=TRUE)
+lambda = loge(0.5 + 2*x, inverse=TRUE)
+y = rzipois(n, lambda, phi)
+table(y)
+fit = vglm(y ~ x, zipoisson, trace=TRUE)
+coef(fit, matrix=TRUE) # These should agree with the above values
+
+
+# Another example: data from McKendrick (1926).
+y = 0:4 # Number of cholera cases per household in an Indian village
+w = c(168, 32, 16, 6, 1) # Frequencies; there are 223=sum(w) households
+fit = vglm(y ~ 1, zipoisson(iphi=0.3), wei=w, trace=TRUE)
+coef(fit, matrix=TRUE)
+cbind(actual=w, fitted=
+ dzipois(y, lambda=Coef(fit)[2], phi=Coef(fit)[1]) * sum(w))
+
+
+# Another example: data from Angers and Biswas (2003)
+y = 0:7
+w = c(182, 41, 12, 2, 2, 0, 0, 1)
+y = y[w>0]
+w = w[w>0]
+fit = vglm(y ~ 1, zipoisson(lphi=probit, iphi=0.3), wei=w, tra=TRUE)
+fit at misc$prob0 # Estimate of P(Y=0)
+coef(fit, matrix=TRUE)
+Coef(fit) # Estimate of phi and lambda
+fitted(fit)
+weighted.mean(y,w) # Compare this with fitted(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/src/cqof.f b/src/cqof.f
new file mode 100644
index 0000000..ae2a0ad
--- /dev/null
+++ b/src/cqof.f
@@ -0,0 +1,2057 @@
+ subroutine nw22ca(te4qac, ghry8z)
+ implicit logical (a-z)
+ double precision te4qac, ghry8z
+ integer sn
+ double precision r1, r2, y, y2, y3, y4, y5, y6, y7
+ double precision erf, yhg7o7, z, z2, z3, z4
+ double precision gln1k1, hqt8l8, a66epi, p10,p11,p12,p13, q10,q11,
+ &q12,q13
+ double precision p20,p21,p22,p23,p24,p25,p26,p27
+ double precision q20,q21,q22,q23,q24,q25,q26,q27
+ double precision p30,p31,p32,p33,p34
+ double precision q30,q31,q32,q33,q34
+ gln1k1 = 1.414213562373095049d0
+ hqt8l8 = 1.772453850905516027d0
+ a66epi = 20.0d0
+ p10 = 242.66795523053175d0
+ p11 = 21.979261618294152d0
+ p12 = 6.9963834886191355d0
+ p13 = -.035609843701815385d0
+ q10 = 215.05887586986120d0
+ q11 = 91.164905404514901d0
+ q12 = 15.082797630407787d0
+ q13 = 1.0d0
+ p20 = 300.4592610201616005d0
+ p21 = 451.9189537118729422d0
+ p22 = 339.3208167343436870d0
+ p23 = 152.9892850469404039d0
+ p24 = 43.16222722205673530d0
+ p25 = 7.211758250883093659d0
+ p26 = .5641955174789739711d0
+ p27 = -.0000001368648573827167067d0
+ q20 = 300.4592609569832933d0
+ q21 = 790.9509253278980272d0
+ q22 = 931.3540948506096211d0
+ q23 = 638.9802644656311665d0
+ q24 = 277.5854447439876434d0
+ q25 = 77.00015293522947295d0
+ q26 = 12.78272731962942351d0
+ q27 = 1.0d0
+ p30 = -.00299610707703542174d0
+ p31 = -.0494730910623250734d0
+ p32 = -.226956593539686930d0
+ p33 = -.278661308609647788d0
+ p34 = -.0223192459734184686d0
+ q30 = .0106209230528467918d0
+ q31 = .191308926107829841d0
+ q32 = 1.05167510706793207d0
+ q33 = 1.98733201817135256d0
+ q34 = 1.0d0
+ if(.not.(te4qac .lt. -a66epi))goto 23000
+ ghry8z = 2.753624d-89
+ return
+23000 continue
+ if(.not.(te4qac .gt. a66epi))goto 23002
+ ghry8z = 1.0d0
+ return
+23002 continue
+ y = te4qac / gln1k1
+ if(.not.(y .lt. 0.0d0))goto 23004
+ y = -y
+ sn = -1
+ goto 23005
+23004 continue
+ sn = 1
+23005 continue
+ y2 = y * y
+ y4 = y2 * y2
+ y6 = y4 * y2
+ if(.not.(y .lt. 0.46875d0))goto 23006
+ r1 = p10 + p11 * y2 + p12 * y4 + p13 * y6
+ r2 = q10 + q11 * y2 + q12 * y4 + q13 * y6
+ erf = y * r1 / r2
+ if(.not.(sn .eq. 1))goto 23008
+ ghry8z = 0.5d0 + 0.5*erf
+ goto 23009
+23008 continue
+ ghry8z = 0.5d0 - 0.5*erf
+23009 continue
+ goto 23007
+23006 continue
+ if(.not.(y .lt. 4.0d0))goto 23010
+ y3 = y2 * y
+ y5 = y4 * y
+ y7 = y6 * y
+ r1 = p20 + p21 * y + p22 * y2 + p23 * y3 + p24 * y4 + p25 * y5 +
+ &p26 * y6 + p27 * y7
+ r2 = q20 + q21 * y + q22 * y2 + q23 * y3 + q24 * y4 + q25 * y5 +
+ &q26 * y6 + q27 * y7
+ yhg7o7 = dexp(-y2) * r1 / r2
+ if(.not.(sn .eq. 1))goto 23012
+ ghry8z = 1.0 - 0.5*yhg7o7
+ goto 23013
+23012 continue
+ ghry8z = 0.5*yhg7o7
+23013 continue
+ goto 23011
+23010 continue
+ z = y4
+ z2 = z * z
+ z3 = z2 * z
+ z4 = z2 * z2
+ r1 = p30 + p31 * z + p32 * z2 + p33 * z3 + p34 * z4
+ r2 = q30 + q31 * z + q32 * z2 + q33 * z3 + q34 * z4
+ yhg7o7 = (dexp(-y2)/y) * (1.0 / hqt8l8 + r1 / (r2 * y2))
+ if(.not.(sn .eq. 1))goto 23014
+ ghry8z = 1.0d0 - 0.5*yhg7o7
+ goto 23015
+23014 continue
+ ghry8z = 0.5*yhg7o7
+23015 continue
+23011 continue
+23007 continue
+ return
+ end
+ subroutine pnm1ow(te4qac, ghry8z, nfiumb4)
+ implicit logical (a-z)
+ integer nfiumb4, w3gohz
+ double precision te4qac(nfiumb4), ghry8z(nfiumb4)
+ do 23016 w3gohz=1,nfiumb4
+ call nw22ca(te4qac(w3gohz), ghry8z(w3gohz))
+23016 continue
+ return
+ end
+ subroutine q4cgho(te4qac, dwgkz6, ghry8z)
+ implicit logical (a-z)
+ double precision te4qac, dwgkz6, ghry8z
+ double precision mu4ygka
+ if(.not.(1.0d0 - te4qac .ge. 1.0d0))goto 23018
+ ghry8z = -8.12589d0 / (3.0*dsqrt(dwgkz6))
+ goto 23019
+23018 continue
+ if(.not.(1.0d0 - te4qac .le. 0.0d0))goto 23020
+ ghry8z = 8.12589d0 / (3.0*dsqrt(dwgkz6))
+ goto 23021
+23020 continue
+ call nw22ca(1.0d0-te4qac, mu4ygka)
+ mu4ygka = mu4ygka / (3.0*dsqrt(dwgkz6))
+ ghry8z = -3.0d0 * dlog(1.0d0 + mu4ygka)
+23021 continue
+23019 continue
+ return
+ end
+ subroutine wgf0al(te4qac, ghry8z)
+ implicit logical (a-z)
+ double precision te4qac, ghry8z
+ if(.not.(1.0d0 - te4qac .ge. 1.0d0))goto 23022
+ ghry8z = -35.0d0
+ goto 23023
+23022 continue
+ if(.not.(1.0d0 - te4qac .le. 0.0d0))goto 23024
+ ghry8z = 3.542106d0
+ goto 23025
+23024 continue
+ ghry8z = dlog(-dlog(1.0d0 - te4qac))
+23025 continue
+23023 continue
+ return
+ end
+ subroutine u10e3o(te4qac, ghry8z)
+ implicit logical (a-z)
+ double precision te4qac, ghry8z
+ if(.not.(1.0d0 - te4qac .ge. 1.0d0))goto 23026
+ ghry8z = -34.53958d0
+ goto 23027
+23026 continue
+ if(.not.(1.0d0 - te4qac .le. 0.0d0))goto 23028
+ ghry8z = 34.53958d0
+ goto 23029
+23028 continue
+ ghry8z = dlog(te4qac / (1.0d0 - te4qac))
+23029 continue
+23027 continue
+ return
+ end
+ subroutine pjw1l(ur73jo, lq8reh, go0l1q, nfiumb4, lku8xq, vi231l,
+ &zxiwf1, dyt0pg, lir0o1, zvxw1l, h3mrfq, q121lc)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq, vi231l, zxiwf1, dyt0pg, lir0o1, zvxw1l,
+ &h3mrfq
+ double precision ur73jo(vi231l,zxiwf1), lq8reh(zxiwf1), go0l1q(
+ &lku8xq,nfiumb4), q121lc(nfiumb4)
+ integer w3gohz, d9rjek, nd6mep, opf6cv, c3qxjo
+ double precision nqvu3e
+ if(.not.(dyt0pg .eq. 1))goto 23030
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23032
+ c3qxjo = 2*lir0o1-1
+ do 23034 w3gohz=1,nfiumb4
+ nqvu3e = 0.0d0
+ do 23036 nd6mep=1,zxiwf1
+ nqvu3e = nqvu3e + ur73jo(2*w3gohz-1,nd6mep) * lq8reh(nd6mep)
+23036 continue
+ go0l1q(c3qxjo,w3gohz) = nqvu3e
+23034 continue
+ c3qxjo = 2*lir0o1
+ do 23038 w3gohz=1,nfiumb4
+ nqvu3e = 0.0d0
+ do 23040 nd6mep=1,zxiwf1
+ nqvu3e = nqvu3e + ur73jo(2*w3gohz ,nd6mep) * lq8reh(nd6mep)
+23040 continue
+ go0l1q(c3qxjo,w3gohz) = nqvu3e
+23038 continue
+ goto 23033
+23032 continue
+ do 23042 w3gohz=1,vi231l
+ nqvu3e = 0.0d0
+ do 23044 nd6mep=1,zxiwf1
+ nqvu3e = nqvu3e + ur73jo(w3gohz,nd6mep) * lq8reh(nd6mep)
+23044 continue
+ go0l1q(lir0o1,w3gohz) = nqvu3e
+23042 continue
+23033 continue
+ goto 23031
+23030 continue
+ opf6cv = 1
+ do 23046 w3gohz=1,nfiumb4
+ do 23048 d9rjek=1,lku8xq
+ nqvu3e = 0.0d0
+ do 23050 nd6mep=1,zxiwf1
+ nqvu3e = nqvu3e + ur73jo(opf6cv,nd6mep) * lq8reh(nd6mep)
+23050 continue
+ opf6cv = opf6cv + 1
+ go0l1q(d9rjek,w3gohz) = nqvu3e
+23048 continue
+23046 continue
+23031 continue
+ if(.not.(h3mrfq .eq. 1))goto 23052
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23054
+ do 23056 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = go0l1q(2*lir0o1-1,w3gohz) + q121lc(
+ &w3gohz)
+23056 continue
+ goto 23055
+23054 continue
+ do 23058 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = go0l1q(lir0o1,w3gohz) + q121lc(w3gohz)
+23058 continue
+23055 continue
+23052 continue
+ return
+ end
+ subroutine o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ & lir0o1)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq, aqk377, zvxw1l, lir0o1
+ double precision go0l1q(lku8xq,nfiumb4), w5poyv(aqk377,nfiumb4)
+ integer w3gohz, d9rjek
+ double precision xkwp2m0
+ if(.not.(lir0o1 .eq. 0))goto 23060
+ if(.not.(zvxw1l .eq. 1))goto 23062
+ do 23064 w3gohz=1,nfiumb4
+ do 23066 d9rjek=1,lku8xq
+ xkwp2m0 = dexp(go0l1q(d9rjek,w3gohz))
+ w5poyv(d9rjek,w3gohz) = xkwp2m0 / (1.0d0 + xkwp2m0)
+23066 continue
+23064 continue
+23062 continue
+ if(.not.(zvxw1l .eq. 2))goto 23068
+ do 23070 w3gohz=1,nfiumb4
+ do 23072 d9rjek=1,lku8xq
+ w5poyv(d9rjek,w3gohz) = dexp(go0l1q(d9rjek,w3gohz))
+23072 continue
+23070 continue
+23068 continue
+ if(.not.(zvxw1l .eq. 4))goto 23074
+ do 23076 w3gohz=1,nfiumb4
+ do 23078 d9rjek=1,lku8xq
+ w5poyv(d9rjek,w3gohz) = 1.0d0-dexp(-dexp(go0l1q(d9rjek,w3gohz)))
+23078 continue
+23076 continue
+23074 continue
+ if(.not.(zvxw1l .eq. 5))goto 23080
+ do 23082 w3gohz=1,nfiumb4
+ do 23084 d9rjek=1,aqk377
+ w5poyv(d9rjek,w3gohz) = dexp(go0l1q(2*d9rjek-1,w3gohz))
+23084 continue
+23082 continue
+23080 continue
+ if(.not.(zvxw1l .eq. 3))goto 23086
+ do 23088 w3gohz=1,nfiumb4
+ do 23090 d9rjek=1,aqk377
+ w5poyv(d9rjek,w3gohz) = dexp(go0l1q(2*d9rjek-1,w3gohz))
+23090 continue
+23088 continue
+23086 continue
+ if(.not.(zvxw1l .eq. 8))goto 23092
+ do 23094 w3gohz=1,nfiumb4
+ do 23096 d9rjek=1,lku8xq
+ w5poyv(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz)
+23096 continue
+23094 continue
+23092 continue
+ goto 23061
+23060 continue
+ if(.not.(zvxw1l .eq. 1))goto 23098
+ do 23100 w3gohz=1,nfiumb4
+ xkwp2m0 = dexp(go0l1q(lir0o1,w3gohz))
+ w5poyv(lir0o1,w3gohz) = xkwp2m0 / (1.0d0 + xkwp2m0)
+23100 continue
+23098 continue
+ if(.not.(zvxw1l .eq. 2))goto 23102
+ do 23104 w3gohz=1,nfiumb4
+ w5poyv(lir0o1,w3gohz) = dexp(go0l1q(lir0o1,w3gohz))
+23104 continue
+23102 continue
+ if(.not.(zvxw1l .eq. 4))goto 23106
+ do 23108 w3gohz=1,nfiumb4
+ w5poyv(lir0o1,w3gohz) = 1.0d0 - dexp(-dexp(go0l1q(lir0o1,w3gohz)))
+23108 continue
+23106 continue
+ if(.not.(zvxw1l .eq. 5))goto 23110
+ do 23112 w3gohz=1,nfiumb4
+ w5poyv(lir0o1,w3gohz) = dexp(go0l1q(2*lir0o1-1,w3gohz))
+23112 continue
+23110 continue
+ if(.not.(zvxw1l .eq. 3))goto 23114
+ do 23116 w3gohz=1,nfiumb4
+ w5poyv(lir0o1,w3gohz) = dexp(go0l1q(2*lir0o1-1,w3gohz))
+23116 continue
+23114 continue
+ if(.not.(zvxw1l .eq. 8))goto 23118
+ do 23120 w3gohz=1,nfiumb4
+ w5poyv(lir0o1,w3gohz) = go0l1q(lir0o1,w3gohz)
+23120 continue
+23118 continue
+23061 continue
+ return
+ end
+ subroutine kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ & aqk377, xhe4cg, go0l1q, dev, lir0o1, fiumb4, wlkaa3, cll)
+ implicit logical (a-z)
+ integer zvxw1l, nfiumb4, lku8xq, aqk377, xhe4cg, lir0o1, cll
+ double precision jmwo0z(nfiumb4, aqk377), w8xfic(nfiumb4, xhe4cg),
+ & w5poyv(aqk377, nfiumb4), go0l1q(lku8xq,nfiumb4), dev, fiumb4,
+ &wlkaa3
+ integer w3gohz, d9rjek
+ double precision qe3jcd, ue1phr, mu4ygk, ig5cma, j0izmn, smu,
+ &mqs3rp, mdk7tp, oesul2
+ double precision gq815b, aho01l, dh3rio
+ logical xyiu19
+ qe3jcd = 0.0d0
+ if(.not.(lir0o1 .eq. 0))goto 23122
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4)))goto 23124
+ do 23126 d9rjek=1,lku8xq
+ do 23128 w3gohz=1,nfiumb4
+ if(.not.(jmwo0z(w3gohz,d9rjek) .gt. 0.0d0))goto 23130
+ mdk7tp = jmwo0z(w3gohz,d9rjek) * dlog(jmwo0z(w3gohz,d9rjek))
+ goto 23131
+23130 continue
+ mdk7tp = 0.0d0
+23131 continue
+ if(.not.(jmwo0z(w3gohz,d9rjek) .lt. 1.0d0))goto 23132
+ mdk7tp = mdk7tp + (1.0d0 - jmwo0z(w3gohz,d9rjek)) * dlog(1.0d0 -
+ &jmwo0z(w3gohz,d9rjek))
+23132 continue
+ mu4ygk = w5poyv(d9rjek,w3gohz) * (1.0d0 - w5poyv(d9rjek,w3gohz))
+ if(.not.(mu4ygk .lt. fiumb4))goto 23134
+ smu = w5poyv(d9rjek,w3gohz)
+ if(.not.(smu .lt. fiumb4))goto 23136
+ oesul2 = jmwo0z(w3gohz,d9rjek) * wlkaa3
+ goto 23137
+23136 continue
+ oesul2 = jmwo0z(w3gohz,d9rjek) * dlog(smu)
+23137 continue
+ mqs3rp = 1.0d0 - smu
+ if(.not.(mqs3rp .lt. fiumb4))goto 23138
+ oesul2 = oesul2 + (1.0d0 - jmwo0z(w3gohz,d9rjek)) * wlkaa3
+ goto 23139
+23138 continue
+ oesul2 = oesul2 + (1.0d0 - jmwo0z(w3gohz,d9rjek)) * dlog(mqs3rp)
+23139 continue
+ goto 23135
+23134 continue
+ oesul2 = (jmwo0z(w3gohz,d9rjek) * dlog(w5poyv(d9rjek,w3gohz)) + (
+ &1.0d0 - jmwo0z(w3gohz,d9rjek)) * dlog(1.0d0 - w5poyv(d9rjek,
+ &w3gohz)))
+23135 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * (mdk7tp - oesul2)
+23128 continue
+23126 continue
+23124 continue
+ if(.not.(zvxw1l .eq. 2))goto 23140
+ do 23142 d9rjek=1,lku8xq
+ do 23144 w3gohz=1,nfiumb4
+ if(.not.(jmwo0z(w3gohz,d9rjek) .gt. 0.0d0))goto 23146
+ mu4ygk = w5poyv(d9rjek,w3gohz) - jmwo0z(w3gohz,d9rjek) + jmwo0z(
+ &w3gohz,d9rjek) * dlog(jmwo0z(w3gohz,d9rjek) / w5poyv(d9rjek,
+ &w3gohz))
+ goto 23147
+23146 continue
+ mu4ygk = w5poyv(d9rjek,w3gohz) - jmwo0z(w3gohz,d9rjek)
+23147 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23144 continue
+23142 continue
+23140 continue
+ if(.not.(zvxw1l .eq. 5))goto 23148
+ do 23150 d9rjek=1,aqk377
+ do 23152 w3gohz=1,nfiumb4
+ dh3rio = dexp(go0l1q(2*d9rjek,w3gohz))
+ call atez9d(dh3rio, ig5cma)
+ if(.not.(jmwo0z(w3gohz,d9rjek) .gt. 0.0d0))goto 23154
+ mu4ygk = (dh3rio - 1.0d0) * dlog(jmwo0z(w3gohz,d9rjek)) + (dlog(
+ &dh3rio)-jmwo0z(w3gohz,d9rjek) / w5poyv(d9rjek,w3gohz) - dlog(
+ &w5poyv(d9rjek,w3gohz)) ) * dh3rio - ig5cma
+ goto 23155
+23154 continue
+ mu4ygk = -1000.0d0
+23155 continue
+ mu4ygk = -mu4ygk
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23152 continue
+23150 continue
+23148 continue
+ if(.not.(zvxw1l .eq. 3))goto 23156
+ if(.not.(cll .eq. 0))goto 23158
+ aho01l = 34.0d0
+ do 23160 d9rjek=1,aqk377
+ do 23162 w3gohz=1,nfiumb4
+ if(.not.(go0l1q(2*d9rjek,w3gohz) .gt. aho01l))goto 23164
+ gq815b = dexp(aho01l)
+ xyiu19 = .true.
+ goto 23165
+23164 continue
+ if(.not.(go0l1q(2*d9rjek,w3gohz) .lt. -aho01l))goto 23166
+ gq815b = dexp(-aho01l)
+ xyiu19 = .true.
+ goto 23167
+23166 continue
+ gq815b = dexp(go0l1q(2*d9rjek,w3gohz))
+ xyiu19 = .false.
+23167 continue
+23165 continue
+ if(.not.(jmwo0z(w3gohz,d9rjek) .lt. 1.0d0))goto 23168
+ mu4ygk = 1.0d0
+ goto 23169
+23168 continue
+ mu4ygk = jmwo0z(w3gohz,d9rjek)
+23169 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * (jmwo0z(w3gohz,d9rjek) *
+ &dlog(mu4ygk/w5poyv(d9rjek,w3gohz)) + (jmwo0z(w3gohz,d9rjek) +
+ &gq815b) * dlog((w5poyv(d9rjek,w3gohz)+gq815b) / (gq815b+ jmwo0z(
+ &w3gohz,d9rjek))))
+23162 continue
+23160 continue
+ goto 23159
+23158 continue
+ aho01l = 34.0d0
+ do 23170 d9rjek=1,aqk377
+ do 23172 w3gohz=1,nfiumb4
+ if(.not.(go0l1q(2*d9rjek,w3gohz) .gt. aho01l))goto 23174
+ gq815b = dexp(aho01l)
+ xyiu19 = .true.
+ goto 23175
+23174 continue
+ if(.not.(go0l1q(2*d9rjek,w3gohz) .lt. -aho01l))goto 23176
+ gq815b = dexp(-aho01l)
+ xyiu19 = .true.
+ goto 23177
+23176 continue
+ gq815b = dexp(go0l1q(2*d9rjek,w3gohz))
+ xyiu19 = .false.
+23177 continue
+23175 continue
+ if(.not.( xyiu19 ))goto 23178
+ ig5cma = 0.0d0
+ j0izmn = 0.0d0
+ goto 23179
+23178 continue
+ call atez9d(gq815b + jmwo0z(w3gohz,d9rjek), ig5cma)
+ call atez9d(gq815b, j0izmn)
+23179 continue
+ call atez9d(1.0d0 + jmwo0z(w3gohz,d9rjek), ue1phr)
+ mu4ygk = gq815b * dlog(gq815b / (gq815b + w5poyv(d9rjek,w3gohz)))
+ &+ ig5cma - j0izmn - ue1phr
+ if(.not.(jmwo0z(w3gohz,d9rjek) .gt. 0.0d0))goto 23180
+ mu4ygk = mu4ygk + jmwo0z(w3gohz,d9rjek) * dlog(w5poyv(d9rjek,
+ &w3gohz) / (gq815b + w5poyv(d9rjek,w3gohz)))
+23180 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23172 continue
+23170 continue
+ qe3jcd = -qe3jcd / 2.0d0
+23159 continue
+23156 continue
+ if(.not.(zvxw1l .eq. 8))goto 23182
+ do 23184 d9rjek=1,lku8xq
+ do 23186 w3gohz=1,nfiumb4
+ mu4ygk = jmwo0z(w3gohz,d9rjek) - w5poyv(d9rjek,w3gohz)
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk**2
+23186 continue
+23184 continue
+23182 continue
+ goto 23123
+23122 continue
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4)))goto 23188
+ do 23190 w3gohz=1,nfiumb4
+ if(.not.(jmwo0z(w3gohz,lir0o1) .gt. 0.0d0))goto 23192
+ mdk7tp = jmwo0z(w3gohz,lir0o1) * dlog(jmwo0z(w3gohz,lir0o1))
+ goto 23193
+23192 continue
+ mdk7tp = 0.0d0
+23193 continue
+ if(.not.(jmwo0z(w3gohz,lir0o1) .lt. 1.0d0))goto 23194
+ mdk7tp = mdk7tp + (1.0d0 - jmwo0z(w3gohz,lir0o1)) * dlog(1.0d0 -
+ &jmwo0z(w3gohz,lir0o1))
+23194 continue
+ mu4ygk = w5poyv(lir0o1,w3gohz) * (1.0d0 - w5poyv(lir0o1,w3gohz))
+ if(.not.(mu4ygk .lt. fiumb4))goto 23196
+ smu = w5poyv(lir0o1,w3gohz)
+ if(.not.(smu .lt. fiumb4))goto 23198
+ oesul2 = jmwo0z(w3gohz,lir0o1) * wlkaa3
+ goto 23199
+23198 continue
+ oesul2 = jmwo0z(w3gohz,lir0o1) * dlog(smu)
+23199 continue
+ mqs3rp = 1.0d0 - smu
+ if(.not.(mqs3rp .lt. fiumb4))goto 23200
+ oesul2 = oesul2 + (1.0d0-jmwo0z(w3gohz,lir0o1))*wlkaa3
+ goto 23201
+23200 continue
+ oesul2 = oesul2 + (1.0d0-jmwo0z(w3gohz,lir0o1))*dlog(mqs3rp)
+23201 continue
+ goto 23197
+23196 continue
+ oesul2 = (jmwo0z(w3gohz,lir0o1) * dlog(w5poyv(lir0o1,w3gohz)) + (
+ &1.0d0 - jmwo0z(w3gohz,lir0o1)) * dlog(1.0d0 - w5poyv(lir0o1,
+ &w3gohz)))
+23197 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * (mdk7tp - oesul2)
+23190 continue
+23188 continue
+ if(.not.(zvxw1l .eq. 2))goto 23202
+ do 23204 w3gohz=1,nfiumb4
+ if(.not.(jmwo0z(w3gohz,lir0o1) .gt. 0.0d0))goto 23206
+ mu4ygk = w5poyv(lir0o1,w3gohz) - jmwo0z(w3gohz,lir0o1) + jmwo0z(
+ &w3gohz,lir0o1) * dlog(jmwo0z(w3gohz,lir0o1) / w5poyv(lir0o1,
+ &w3gohz))
+ goto 23207
+23206 continue
+ mu4ygk = w5poyv(lir0o1,w3gohz) - jmwo0z(w3gohz,lir0o1)
+23207 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23204 continue
+23202 continue
+ if(.not.(zvxw1l .eq. 5))goto 23208
+ do 23210 w3gohz=1,nfiumb4
+ dh3rio = dexp(go0l1q(2*lir0o1,w3gohz))
+ call atez9d(dh3rio, ig5cma)
+ if(.not.(jmwo0z(w3gohz,lir0o1) .gt. 0.0d0))goto 23212
+ mu4ygk = (dh3rio - 1.0d0) * dlog(jmwo0z(w3gohz,lir0o1)) + dh3rio *
+ & (dlog(dh3rio) - jmwo0z(w3gohz,lir0o1) / w5poyv(lir0o1,w3gohz) -
+ &dlog(w5poyv(lir0o1,w3gohz))) - ig5cma
+ goto 23213
+23212 continue
+ mu4ygk = -1000.0d0
+23213 continue
+ mu4ygk = -mu4ygk
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23210 continue
+23208 continue
+ if(.not.(zvxw1l .eq. 3))goto 23214
+ if(.not.(cll .eq. 0))goto 23216
+ aho01l = 34.0d0
+ do 23218 w3gohz=1,nfiumb4
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .gt. aho01l))goto 23220
+ gq815b = dexp(aho01l)
+ xyiu19 = .true.
+ goto 23221
+23220 continue
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .lt. -aho01l))goto 23222
+ gq815b = dexp(-aho01l)
+ xyiu19 = .true.
+ goto 23223
+23222 continue
+ gq815b = dexp(go0l1q(2*lir0o1,w3gohz))
+ xyiu19 = .false.
+23223 continue
+23221 continue
+ if(.not.(jmwo0z(w3gohz,lir0o1) .lt. 1.0d0))goto 23224
+ mu4ygk = 1.0d0
+ goto 23225
+23224 continue
+ mu4ygk = jmwo0z(w3gohz,lir0o1)
+23225 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * (jmwo0z(w3gohz,lir0o1) *
+ &dlog(mu4ygk/w5poyv(lir0o1,w3gohz)) + (jmwo0z(w3gohz,lir0o1)+
+ &gq815b) * dlog((w5poyv(lir0o1,w3gohz) + gq815b) / ( gq815b+jmwo0z(
+ &w3gohz,lir0o1))))
+23218 continue
+ goto 23217
+23216 continue
+ do 23226 w3gohz=1,nfiumb4
+ gq815b = dexp(go0l1q(2*lir0o1,w3gohz))
+ call atez9d(gq815b + jmwo0z(w3gohz,lir0o1), ig5cma)
+ call atez9d(gq815b, j0izmn)
+ call atez9d(1.0d0 + jmwo0z(w3gohz,lir0o1), ue1phr)
+ mu4ygk = gq815b * dlog(gq815b / (gq815b + w5poyv(lir0o1,w3gohz)))
+ &+ ig5cma - j0izmn - ue1phr
+ if(.not.(jmwo0z(w3gohz,lir0o1) .gt. 0.0d0))goto 23228
+ mu4ygk = mu4ygk + jmwo0z(w3gohz,lir0o1) * dlog(w5poyv(lir0o1,
+ &w3gohz) / (gq815b + w5poyv(lir0o1,w3gohz)))
+23228 continue
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk
+23226 continue
+ qe3jcd = -qe3jcd / 2.0d0
+23217 continue
+23214 continue
+ if(.not.(zvxw1l .eq. 8))goto 23230
+ do 23232 w3gohz=1,nfiumb4
+ mu4ygk = jmwo0z(w3gohz,lir0o1) - w5poyv(lir0o1,w3gohz)
+ qe3jcd = qe3jcd + w8xfic(w3gohz,1) * mu4ygk**2
+23232 continue
+23230 continue
+23123 continue
+ dev = 2.0d0 * qe3jcd
+ return
+ end
+ subroutine sptoq8(hft28, ur73jo, nfiumb4, vi231l, cqui1v, zvxw1l)
+ implicit logical (a-z)
+ integer nfiumb4, vi231l, cqui1v, zvxw1l
+ double precision hft28(nfiumb4,cqui1v), ur73jo(vi231l,1)
+ integer w3gohz, c3qxjo, pvnfr4
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq.5 )))goto 23234
+ c3qxjo = 1
+ do 23236 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = 1.0d0
+ ur73jo(2*w3gohz, c3qxjo) = 0.0d0
+23236 continue
+ c3qxjo = c3qxjo + 1
+ do 23238 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = 0.0d0
+ ur73jo(2*w3gohz, c3qxjo) = 1.0d0
+23238 continue
+ c3qxjo = c3qxjo + 1
+ do 23240 pvnfr4=1,cqui1v
+ do 23242 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = hft28(w3gohz,pvnfr4)
+ ur73jo(2*w3gohz, c3qxjo) = 0.0d0
+23242 continue
+ c3qxjo = c3qxjo + 1
+23240 continue
+ goto 23235
+23234 continue
+ c3qxjo = 1
+ do 23244 w3gohz=1,nfiumb4
+ ur73jo(w3gohz,c3qxjo) = 1.0d0
+23244 continue
+ c3qxjo = c3qxjo + 1
+ do 23246 pvnfr4=1,cqui1v
+ do 23248 w3gohz=1,nfiumb4
+ ur73jo(w3gohz,c3qxjo)=hft28(w3gohz,pvnfr4)
+23248 continue
+ c3qxjo = c3qxjo + 1
+23246 continue
+23235 continue
+ return
+ end
+ subroutine u16zxj(hft28, ur73jo, nfiumb4, cqui1v, zvxw1l, q121lc,
+ &vi231l, zxiwf1, i5uvkm, zqve1l, vvl1li, oju3yh, p1, h3mrfq)
+ implicit logical (a-z)
+ integer nfiumb4, cqui1v, zvxw1l, vi231l, zxiwf1, i5uvkm, zqve1l(
+ &i5uvkm), vvl1li(i5uvkm), p1, h3mrfq
+ double precision hft28(nfiumb4,cqui1v), ur73jo(vi231l,zxiwf1),
+ &oju3yh(nfiumb4,p1)
+ double precision q121lc(nfiumb4)
+ integer hv3wja, w3gohz, c3qxjo, pvnfr4
+ double precision mw6reg, ig5cma
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23250
+ do 23252 pvnfr4=1,cqui1v
+ do 23254 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,pvnfr4) = hft28(w3gohz,pvnfr4)
+ ur73jo(2*w3gohz ,pvnfr4) = 0.0d0
+23254 continue
+23252 continue
+ c3qxjo = cqui1v + 1
+ if(.not.(h3mrfq .eq. 0))goto 23256
+ do 23258 hv3wja=1,i5uvkm
+ do 23260 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = hft28(w3gohz,zqve1l(hv3wja)) * hft28(
+ &w3gohz,vvl1li(hv3wja))
+ ur73jo(2*w3gohz ,c3qxjo) = 0.0d0
+23260 continue
+ c3qxjo = c3qxjo + 1
+23258 continue
+ goto 23257
+23256 continue
+ do 23262 w3gohz=1,nfiumb4
+ mw6reg = 0.0d0
+ do 23264 pvnfr4=1,cqui1v
+ ig5cma = hft28(w3gohz,pvnfr4)
+ mw6reg = mw6reg + ig5cma * ig5cma
+23264 continue
+ q121lc(w3gohz) = -0.50d0 * mw6reg
+23262 continue
+23257 continue
+ goto 23251
+23250 continue
+ do 23266 pvnfr4=1,cqui1v
+ do 23268 w3gohz=1,nfiumb4
+ ur73jo(w3gohz,pvnfr4) = hft28(w3gohz,pvnfr4)
+23268 continue
+23266 continue
+ c3qxjo = cqui1v + 1
+ if(.not.(h3mrfq .eq. 0))goto 23270
+ do 23272 hv3wja=1,i5uvkm
+ do 23274 w3gohz=1,nfiumb4
+ ur73jo(w3gohz,c3qxjo) = hft28(w3gohz,zqve1l(hv3wja)) * hft28(
+ &w3gohz,vvl1li(hv3wja))
+23274 continue
+ c3qxjo = c3qxjo + 1
+23272 continue
+ goto 23271
+23270 continue
+ do 23276 w3gohz=1,nfiumb4
+ mw6reg = 0.0d0
+ do 23278 pvnfr4=1,cqui1v
+ ig5cma = hft28(w3gohz,pvnfr4)
+ mw6reg = mw6reg + ig5cma * ig5cma
+23278 continue
+ q121lc(w3gohz) = -0.50d0 * mw6reg
+23276 continue
+23271 continue
+23251 continue
+ if(.not.(p1 .gt. 0))goto 23280
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23282
+ do 23284 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = 1.0d0
+ ur73jo(2*w3gohz, c3qxjo) = 0.0d0
+23284 continue
+ c3qxjo = c3qxjo + 1
+ do 23286 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = 0.0d0
+ ur73jo(2*w3gohz, c3qxjo) = 1.0d0
+23286 continue
+ c3qxjo = c3qxjo + 1
+ if(.not.(p1 .gt. 1))goto 23288
+ do 23290 hv3wja=2,p1
+ do 23292 w3gohz=1,nfiumb4
+ ur73jo(2*w3gohz-1,c3qxjo) = oju3yh(w3gohz,hv3wja)
+ ur73jo(2*w3gohz, c3qxjo) = 0.0d0
+23292 continue
+ c3qxjo = c3qxjo + 1
+23290 continue
+23288 continue
+ goto 23283
+23282 continue
+ do 23294 hv3wja=1,p1
+ do 23296 w3gohz=1,nfiumb4
+ ur73jo(w3gohz,c3qxjo) = oju3yh(w3gohz,hv3wja)
+23296 continue
+ c3qxjo = c3qxjo + 1
+23294 continue
+23283 continue
+23280 continue
+ return
+ end
+ subroutine p0lk40(hft28, ur73jo, nfiumb4, lku8xq, vi231l, cqui1v,
+ &zvxw1l, aqk377, w5tcfp, cr8hav, i5uvkm, zqve1l, vvl1li, h3mrfq,
+ &q121lc)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq, vi231l, cqui1v, zvxw1l, aqk377, w5tcfp,
+ &cr8hav, i5uvkm, zqve1l(i5uvkm), vvl1li(i5uvkm), h3mrfq
+ double precision hft28(nfiumb4,cqui1v), ur73jo(vi231l,cr8hav),
+ &q121lc(nfiumb4)
+ integer hv3wja, w3gohz, d9rjek, nd6mep, ptr, c3qxjo, pvnfr4
+ double precision ig5cma, mw6reg
+ do 23298 nd6mep=1,cr8hav
+ do 23300 w3gohz=1,vi231l
+ ur73jo(w3gohz,nd6mep) = 0.0d0
+23300 continue
+23298 continue
+ c3qxjo = 0
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23302
+ do 23304 pvnfr4=1,cqui1v
+ ptr = 1
+ do 23306 w3gohz=1,nfiumb4
+ do 23308 d9rjek=1,aqk377
+ ur73jo(ptr,c3qxjo+d9rjek) = hft28(w3gohz,pvnfr4)
+ ptr = ptr + 2
+23308 continue
+23306 continue
+ c3qxjo = c3qxjo + aqk377
+23304 continue
+ goto 23303
+23302 continue
+ do 23310 pvnfr4=1,cqui1v
+ ptr = 0
+ do 23312 w3gohz=1,nfiumb4
+ do 23314 d9rjek=1,lku8xq
+ ptr = ptr + 1
+ ur73jo(ptr,c3qxjo+d9rjek) = hft28(w3gohz,pvnfr4)
+23314 continue
+23312 continue
+ c3qxjo = c3qxjo + lku8xq
+23310 continue
+23303 continue
+ if(.not.(w5tcfp .eq. 0))goto 23316
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23318
+ do 23320 hv3wja=1,i5uvkm
+ ptr = 1
+ do 23322 w3gohz=1,nfiumb4
+ ig5cma = hft28(w3gohz,zqve1l(hv3wja)) * hft28(w3gohz,vvl1li(
+ &hv3wja))
+ do 23324 d9rjek=1,aqk377
+ ur73jo(ptr,c3qxjo+d9rjek) = ig5cma
+ ptr = ptr + 2
+23324 continue
+23322 continue
+ c3qxjo = c3qxjo + aqk377
+23320 continue
+ goto 23319
+23318 continue
+ do 23326 hv3wja=1,i5uvkm
+ ptr = 0
+ do 23328 w3gohz=1,nfiumb4
+ ig5cma = hft28(w3gohz,zqve1l(hv3wja)) * hft28(w3gohz,vvl1li(
+ &hv3wja))
+ do 23330 d9rjek=1,lku8xq
+ ptr = ptr + 1
+ ur73jo(ptr,c3qxjo+d9rjek) = ig5cma
+23330 continue
+23328 continue
+ c3qxjo = c3qxjo + lku8xq
+23326 continue
+23319 continue
+ goto 23317
+23316 continue
+ if(.not.(h3mrfq .eq. 1))goto 23332
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23334
+ do 23336 w3gohz=1,nfiumb4
+ mw6reg = 0.0d0
+ do 23338 pvnfr4=1,cqui1v
+ ig5cma = hft28(w3gohz,pvnfr4)
+ mw6reg = mw6reg + ig5cma * ig5cma
+23338 continue
+ q121lc(w3gohz) = -0.50d0 * mw6reg
+23336 continue
+ goto 23335
+23334 continue
+ do 23340 w3gohz=1,nfiumb4
+ mw6reg = 0.0d0
+ do 23342 pvnfr4=1,cqui1v
+ ig5cma = hft28(w3gohz,pvnfr4)
+ mw6reg = mw6reg + ig5cma * ig5cma
+23342 continue
+ q121lc(w3gohz) = -0.50d0 * mw6reg
+23340 continue
+23335 continue
+ goto 23333
+23332 continue
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23344
+ do 23346 hv3wja=1,i5uvkm
+ ptr = 1
+ do 23348 w3gohz=1,nfiumb4
+ ig5cma = hft28(w3gohz,zqve1l(hv3wja)) * hft28(w3gohz,vvl1li(
+ &hv3wja))
+ do 23350 d9rjek=1,aqk377
+ ur73jo(ptr,c3qxjo+hv3wja) = ig5cma
+ ptr = ptr + 2
+23350 continue
+23348 continue
+23346 continue
+ c3qxjo = c3qxjo + i5uvkm
+ goto 23345
+23344 continue
+ do 23352 hv3wja=1,i5uvkm
+ ptr = 0
+ do 23354 w3gohz=1,nfiumb4
+ ig5cma = hft28(w3gohz,zqve1l(hv3wja)) * hft28(w3gohz,vvl1li(
+ &hv3wja))
+ do 23356 d9rjek=1,lku8xq
+ ptr = ptr + 1
+ ur73jo(ptr,c3qxjo+hv3wja) = ig5cma
+23356 continue
+23354 continue
+23352 continue
+ c3qxjo = c3qxjo + i5uvkm
+23345 continue
+23333 continue
+23317 continue
+ return
+ end
+ subroutine nbq4ua(jmwo0z, go0l1q, l1zvxx, nfiumb4, lku8xq, aqk377,
+ & zvxw1l, lir0o1, w8xfic, foej1u)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq, aqk377, zvxw1l, lir0o1, foej1u
+ double precision jmwo0z(nfiumb4,aqk377), go0l1q(lku8xq,nfiumb4),
+ &l1zvxx(15)
+ double precision w8xfic(nfiumb4,1)
+ double precision nqvu3e, pg2aqx, ozpqa0, ghys4c, qg8fdc, bdgzx3,
+ &cy0nqs, reg6st, wo8cqk
+ integer w3gohz
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4) .or.(zvxw1l .eq. 3)
+ &.or. (zvxw1l .eq. 5)))goto 23358
+ nqvu3e = 0.0d0
+ pg2aqx = 0.0d0
+ do 23360 w3gohz=1,nfiumb4
+ nqvu3e = nqvu3e + jmwo0z(w3gohz,lir0o1) * w8xfic(w3gohz,1)
+ pg2aqx = pg2aqx + w8xfic(w3gohz,1)
+23360 continue
+ ozpqa0 = nqvu3e / pg2aqx
+23358 continue
+ if(.not.(zvxw1l .eq. 1))goto 23362
+ call u10e3o(ozpqa0, ghys4c)
+ do 23364 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = ghys4c
+23364 continue
+23362 continue
+ if(.not.(zvxw1l .eq. 2))goto 23366
+ do 23368 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = dlog(jmwo0z(w3gohz,lir0o1) + 0.125d0)
+23368 continue
+23366 continue
+ if(.not.(zvxw1l .eq. 4))goto 23370
+ call wgf0al(ozpqa0, qg8fdc)
+ do 23372 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = qg8fdc
+23372 continue
+23370 continue
+ if(.not.(zvxw1l .eq. 5))goto 23374
+ if(.not.(foej1u .eq. 1))goto 23376
+ bdgzx3 = dlog(ozpqa0 + 0.03125d0)
+ cy0nqs = dlog(l1zvxx(3+aqk377+lir0o1)+0.01d0)
+ do 23378 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = bdgzx3
+ go0l1q(2*lir0o1, w3gohz) = cy0nqs
+23378 continue
+ goto 23377
+23376 continue
+ if(.not.(foej1u .eq. 2))goto 23380
+ bdgzx3 = dlog((6.0/8.0)*ozpqa0+0.000d0)
+ cy0nqs = dlog(l1zvxx(3+aqk377+lir0o1)+0.01d0)
+ do 23382 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = bdgzx3
+ go0l1q(2*lir0o1 ,w3gohz) = cy0nqs
+23382 continue
+ goto 23381
+23380 continue
+ cy0nqs = dlog(l1zvxx(3+aqk377+lir0o1)+0.01d0)
+ do 23384 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = dlog(jmwo0z(w3gohz,lir0o1) + 0.
+ &03125d0)
+ go0l1q(2*lir0o1, w3gohz) = cy0nqs
+23384 continue
+23381 continue
+23377 continue
+23374 continue
+ if(.not.(zvxw1l .eq. 3))goto 23386
+ if(.not.(foej1u .eq. 1))goto 23388
+ bdgzx3 = dlog(ozpqa0 + 0.03125d0)
+ cy0nqs = dlog(l1zvxx(3+lir0o1)+0.03125d0)
+ do 23390 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = bdgzx3
+ go0l1q(2*lir0o1,w3gohz) = cy0nqs
+23390 continue
+ goto 23389
+23388 continue
+ if(.not.(foej1u .eq. 2))goto 23392
+ bdgzx3 = dlog(ozpqa0 + 0.03125d0)
+ wo8cqk = l1zvxx(3+lir0o1)
+ cy0nqs = dlog(wo8cqk)
+ do 23394 w3gohz=1,nfiumb4
+ reg6st = jmwo0z(w3gohz,lir0o1) - ozpqa0
+ if(.not.(reg6st .gt. 3.0 * ozpqa0))goto 23396
+ go0l1q(2*lir0o1-1,w3gohz) = dlog(dsqrt(jmwo0z(w3gohz,lir0o1)))
+ go0l1q(2*lir0o1 ,w3gohz) = cy0nqs
+ goto 23397
+23396 continue
+ go0l1q(2*lir0o1-1,w3gohz) = bdgzx3
+ go0l1q(2*lir0o1 ,w3gohz) = cy0nqs
+23397 continue
+23394 continue
+ goto 23393
+23392 continue
+ if(.not.(foej1u .eq. 3))goto 23398
+ bdgzx3 = dlog(ozpqa0 + 0.03125d0)
+ wo8cqk = l1zvxx(3+lir0o1)
+ cy0nqs = dlog(wo8cqk)
+ do 23400 w3gohz=1,nfiumb4
+ reg6st = jmwo0z(w3gohz,lir0o1) - ozpqa0
+ if(.not.(reg6st .gt. ozpqa0))goto 23402
+ go0l1q(2*lir0o1-1,w3gohz) = dlog(0.5*(jmwo0z(w3gohz,lir0o1)+
+ &ozpqa0))
+ go0l1q(2*lir0o1 ,w3gohz) = dlog(wo8cqk / (reg6st / ozpqa0))
+ goto 23403
+23402 continue
+ if(.not.(jmwo0z(w3gohz,lir0o1) .lt. (ozpqa0 / 4.0)))goto 23404
+ go0l1q(2*lir0o1-1,w3gohz) = dlog(ozpqa0 / 4.0)
+ go0l1q(2*lir0o1 ,w3gohz) = cy0nqs
+ goto 23405
+23404 continue
+ go0l1q(2*lir0o1-1,w3gohz) = bdgzx3
+ go0l1q(2*lir0o1 ,w3gohz) = cy0nqs
+23405 continue
+23403 continue
+23400 continue
+ goto 23399
+23398 continue
+ cy0nqs = dlog(l1zvxx(3+lir0o1))
+ do 23406 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = dlog(jmwo0z(w3gohz,lir0o1) + 0.
+ &03125d0)
+ go0l1q(2*lir0o1, w3gohz) = cy0nqs
+23406 continue
+23399 continue
+23393 continue
+23389 continue
+23386 continue
+ if(.not.(zvxw1l .eq. 8))goto 23408
+ do 23410 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = jmwo0z(w3gohz,lir0o1)
+23410 continue
+23408 continue
+ return
+ end
+ subroutine kqsxz1(jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph,
+ &jrxg6l, jftq1, fiumb4, zl11l0, nfiumb4, lku8xq, aqk377, vi231l,
+ &zkjqhi, lir0o1, zvxw1l, gqxvz8, h3mrfq, q121lc)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, lir0o1, gqxvz8,
+ &h3mrfq
+ double precision jmwo0z(nfiumb4,aqk377), w8xfic(nfiumb4,1),
+ &go0l1q(lku8xq,nfiumb4), w5poyv(aqk377,nfiumb4), q121lc(nfiumb4),
+ &hr83e(nfiumb4,lku8xq), lj4dph(nfiumb4,lku8xq), jrxg6l(zkjqhi,
+ &nfiumb4), jftq1, fiumb4, zl11l0
+ integer w3gohz, zvxw1l
+ double precision mu4ygka, zixm0o, mu4ygkc, aho01l
+ logical xyiu19
+ double precision gq815b, p4je8, da51l0o, hmr3dx, yqsco4, ogq67o,
+ &qpzx6l(1,1), qxvi5(1,1), xkcm3b(1,1)
+ integer uxzze7, c4uxow, nh2qxl
+ double precision dh3rio, ig5cmad, ig5v8gzsp, dldshape
+ double precision l0zqm, q1znur
+ integer lqhm2g
+ uxzze7 = 1
+ ogq67o = 0.990d0
+ ogq67o = 0.995d0
+ if(.not.(zvxw1l .eq. 1))goto 23412
+ do 23414 w3gohz=1,nfiumb4
+ mu4ygka = w5poyv(lir0o1,w3gohz) * (1.0d0 - w5poyv(lir0o1,w3gohz))
+ zixm0o = mu4ygka * w8xfic(w3gohz,1)
+ if(.not.(mu4ygka .lt. fiumb4))goto 23416
+ mu4ygka = fiumb4
+23416 continue
+ if(.not.(zixm0o .lt. fiumb4))goto 23418
+ zixm0o = fiumb4
+ jrxg6l(lir0o1,w3gohz) = zl11l0
+ goto 23419
+23418 continue
+ jrxg6l(lir0o1,w3gohz) = dsqrt(zixm0o)
+23419 continue
+ lj4dph(w3gohz,lir0o1) = zixm0o
+ hr83e(w3gohz,lir0o1) = go0l1q(lir0o1,w3gohz) + (jmwo0z(w3gohz,
+ &lir0o1)-w5poyv(lir0o1,w3gohz)) / mu4ygka
+23414 continue
+23412 continue
+ if(.not.(zvxw1l .eq. 2))goto 23420
+ do 23422 w3gohz=1,nfiumb4
+ mu4ygka = w5poyv(lir0o1,w3gohz)
+ zixm0o = mu4ygka * w8xfic(w3gohz,1)
+ if(.not.(zixm0o .lt. fiumb4))goto 23424
+ zixm0o = fiumb4
+ jrxg6l(lir0o1,w3gohz) = zl11l0
+ goto 23425
+23424 continue
+ jrxg6l(lir0o1,w3gohz) = dsqrt(zixm0o)
+23425 continue
+ lj4dph(w3gohz,lir0o1) = zixm0o
+ if(.not.(jmwo0z(w3gohz,lir0o1) .gt. 0.0d0))goto 23426
+ mu4ygkc = mu4ygka
+ if(.not.(mu4ygkc .lt. fiumb4))goto 23428
+ mu4ygkc = fiumb4
+23428 continue
+ hr83e(w3gohz,lir0o1) = go0l1q(lir0o1,w3gohz) + (jmwo0z(w3gohz,
+ &lir0o1)-mu4ygkc)/mu4ygkc
+ goto 23427
+23426 continue
+ hr83e(w3gohz,lir0o1) = go0l1q(lir0o1,w3gohz) - 1.0d0
+23427 continue
+23422 continue
+23420 continue
+ if(.not.(zvxw1l .eq. 4))goto 23430
+ do 23432 w3gohz=1,nfiumb4
+ if(.not.((w5poyv(lir0o1,w3gohz) .lt. fiumb4) .or.(w5poyv(lir0o1,
+ &w3gohz) .gt. 1.0d0 - fiumb4)))goto 23434
+ mu4ygka = fiumb4
+ zixm0o = mu4ygka * w8xfic(w3gohz,1)
+ if(.not.(zixm0o .lt. fiumb4))goto 23436
+ zixm0o = fiumb4
+ jrxg6l(lir0o1,w3gohz) = zl11l0
+ goto 23437
+23436 continue
+ jrxg6l(lir0o1,w3gohz) = dsqrt(zixm0o)
+23437 continue
+ lj4dph(w3gohz,lir0o1) = zixm0o
+ hr83e(w3gohz,lir0o1) = go0l1q(lir0o1,w3gohz) + (jmwo0z(w3gohz,
+ &lir0o1)-w5poyv(lir0o1,w3gohz)) / mu4ygka
+ goto 23435
+23434 continue
+ mu4ygka = -(1.0d0 - w5poyv(lir0o1,w3gohz)) * dlog(1.0d0 - w5poyv(
+ &lir0o1,w3gohz))
+ if(.not.(mu4ygka .lt. fiumb4))goto 23438
+ mu4ygka = fiumb4
+23438 continue
+ zixm0o = -mu4ygka * w8xfic(w3gohz,1) * dlog(1.0d0 - w5poyv(lir0o1,
+ &w3gohz)) / w5poyv(lir0o1,w3gohz)
+ if(.not.(zixm0o .lt. fiumb4))goto 23440
+ zixm0o = fiumb4
+23440 continue
+ lj4dph(w3gohz,lir0o1) = zixm0o
+ jrxg6l(lir0o1,w3gohz) = dsqrt(zixm0o)
+ hr83e(w3gohz,lir0o1) = go0l1q(lir0o1,w3gohz) + (jmwo0z(w3gohz,
+ &lir0o1)-w5poyv(lir0o1,w3gohz)) / mu4ygka
+23435 continue
+23432 continue
+23430 continue
+ if(.not.(zvxw1l .eq. 5))goto 23442
+ l0zqm = 1.0d-20
+ aho01l = 34.0d0
+ do 23444 w3gohz=1,nfiumb4
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .gt. aho01l))goto 23446
+ dh3rio = dexp(aho01l)
+ xyiu19 = .true.
+ goto 23447
+23446 continue
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .lt. -aho01l))goto 23448
+ dh3rio = dexp(-aho01l)
+ xyiu19 = .true.
+ goto 23449
+23448 continue
+ dh3rio = dexp(go0l1q(2*lir0o1,w3gohz))
+ xyiu19 = .false.
+23449 continue
+23447 continue
+ call vdgam1(dh3rio, ig5cmad, lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23450
+ call intpr("error in kqsxz1 lqhm2g 1: ",-1,lqhm2g,1)
+23450 continue
+ q1znur = w5poyv(lir0o1,w3gohz)
+ if(.not.(q1znur .lt. l0zqm))goto 23452
+ q1znur = l0zqm
+23452 continue
+ dldshape = dlog(jmwo0z(w3gohz,lir0o1)) + dlog(dh3rio) - dlog(
+ &q1znur) + 1.0d0 - ig5cmad - jmwo0z(w3gohz,lir0o1) / q1znur
+ call vtgam1(dh3rio, ig5v8gzsp, lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23454
+ call intpr("error in kqsxz1 lqhm2g 2: ",-1,lqhm2g,1)
+23454 continue
+ lj4dph(w3gohz,2*lir0o1-1) = w8xfic(w3gohz,1) * dh3rio
+ mu4ygka = dh3rio * ig5v8gzsp - 1.0d0
+ lj4dph(w3gohz,2*lir0o1 ) = w8xfic(w3gohz,1) * dh3rio * mu4ygka
+ if(.not.(lj4dph(w3gohz,2*lir0o1-1) .lt. fiumb4))goto 23456
+ lj4dph(w3gohz,2*lir0o1-1) = fiumb4
+ jrxg6l(2*lir0o1-1,w3gohz) = zl11l0
+ goto 23457
+23456 continue
+ jrxg6l(2*lir0o1-1,w3gohz) = dsqrt(lj4dph(w3gohz,2*lir0o1-1))
+23457 continue
+ if(.not.(lj4dph(w3gohz,2*lir0o1) .lt. fiumb4))goto 23458
+ lj4dph(w3gohz,2*lir0o1) = fiumb4
+ jrxg6l(2*lir0o1,w3gohz) = zl11l0
+ goto 23459
+23458 continue
+ jrxg6l(2*lir0o1,w3gohz) = dsqrt(lj4dph(w3gohz,2*lir0o1))
+23459 continue
+ if(.not.(mu4ygka .lt. l0zqm))goto 23460
+ mu4ygka = l0zqm
+23460 continue
+ hr83e(w3gohz,2*lir0o1-1) = go0l1q(2*lir0o1-1,w3gohz) + jmwo0z(
+ &w3gohz,lir0o1) / q1znur - 1.0d0
+ hr83e(w3gohz,2*lir0o1 ) = go0l1q(2*lir0o1 ,w3gohz) + dldshape /
+ &mu4ygka
+23444 continue
+23442 continue
+ if(.not.(zvxw1l .eq. 3))goto 23462
+ aho01l = 34.0d0
+ l0zqm = 1.0d-20
+ do 23464 w3gohz=1,nfiumb4
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .gt. aho01l))goto 23466
+ gq815b = dexp(aho01l)
+ xyiu19 = .true.
+ goto 23467
+23466 continue
+ if(.not.(go0l1q(2*lir0o1,w3gohz) .lt. -aho01l))goto 23468
+ gq815b = dexp(-aho01l)
+ xyiu19 = .true.
+ goto 23469
+23468 continue
+ gq815b = dexp(go0l1q(2*lir0o1,w3gohz))
+ xyiu19 = .false.
+23469 continue
+23467 continue
+ q1znur = w5poyv(lir0o1,w3gohz)
+ if(.not.(q1znur .lt. l0zqm))goto 23470
+ q1znur = l0zqm
+23470 continue
+ call vdgam1(jmwo0z(w3gohz,lir0o1) + gq815b, mu4ygka, lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23472
+23472 continue
+ call vdgam1(gq815b, zixm0o, lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23474
+23474 continue
+ da51l0o = mu4ygka - zixm0o - (jmwo0z(w3gohz,lir0o1) + gq815b) / (
+ &q1znur + gq815b) + 1.0d0 + dlog(gq815b / (q1znur + gq815b))
+ p4je8 = gq815b
+ qxvi5(1,1) = gq815b
+ xkcm3b(1,1) = q1znur
+ nh2qxl = 5000
+ call enbin9(qpzx6l, qxvi5, xkcm3b, ogq67o, uxzze7, c4uxow, uxzze7,
+ & hmr3dx, jftq1, nh2qxl)
+ if(.not.(c4uxow .ne. 1))goto 23476
+ gqxvz8 = 5
+ return
+23476 continue
+ yqsco4 = -qpzx6l(1,1) - 1.0d0 / gq815b + 1.0d0 / (gq815b + q1znur)
+ lj4dph(w3gohz,2*lir0o1-1) = w8xfic(w3gohz,1) * q1znur * gq815b / (
+ &q1znur + gq815b)
+ lj4dph(w3gohz,2*lir0o1 ) = w8xfic(w3gohz,1) * gq815b * (-qpzx6l(1,
+ &1)*gq815b - 1.0d0 + gq815b / (gq815b + q1znur))
+ if(.not.(lj4dph(w3gohz,2*lir0o1-1) .lt. fiumb4))goto 23478
+ lj4dph(w3gohz,2*lir0o1-1) = fiumb4
+ jrxg6l(2*lir0o1-1,w3gohz) = zl11l0
+ goto 23479
+23478 continue
+ jrxg6l(2*lir0o1-1,w3gohz) = dsqrt(lj4dph(w3gohz,2*lir0o1-1))
+23479 continue
+ if(.not.(lj4dph(w3gohz,2*lir0o1) .lt. fiumb4))goto 23480
+ lj4dph(w3gohz,2*lir0o1) = fiumb4
+ jrxg6l(2*lir0o1,w3gohz) = zl11l0
+ goto 23481
+23480 continue
+ jrxg6l(2*lir0o1,w3gohz) = dsqrt(lj4dph(w3gohz,2*lir0o1))
+23481 continue
+ hr83e(w3gohz,2*lir0o1-1) = go0l1q(2*lir0o1-1,w3gohz) + jmwo0z(
+ &w3gohz,lir0o1) / q1znur - 1.0d0
+ hr83e(w3gohz,2*lir0o1 ) = go0l1q(2*lir0o1 ,w3gohz) + da51l0o / (
+ &p4je8 * yqsco4)
+23464 continue
+23462 continue
+ if(.not.(zvxw1l .eq. 8))goto 23482
+ do 23484 w3gohz=1,nfiumb4
+ lj4dph(w3gohz,lir0o1) = w8xfic(w3gohz,1)
+ jrxg6l(lir0o1,w3gohz) = dsqrt(lj4dph(w3gohz,lir0o1))
+ hr83e(w3gohz,lir0o1) = jmwo0z(w3gohz,lir0o1)
+23484 continue
+23482 continue
+ if(.not.(h3mrfq .eq. 1))goto 23486
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23488
+ do 23490 w3gohz=1,nfiumb4
+ hr83e(w3gohz,2*lir0o1-1) = hr83e(w3gohz,2*lir0o1-1) - q121lc(
+ &w3gohz)
+23490 continue
+ goto 23489
+23488 continue
+ do 23492 w3gohz=1,nfiumb4
+ hr83e(w3gohz,lir0o1) = hr83e(w3gohz,lir0o1) - q121lc(w3gohz)
+23492 continue
+23489 continue
+23486 continue
+ return
+ end
+ subroutine cqo2f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc,
+ &w5poyv, hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1,
+ &nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l,
+ &vvl1li, nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+ implicit logical (a-z)
+ integer p1i8xz(18), zqve1l(1), vvl1li(1)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), oju3yh(
+ &nfiumb4,9), w8xfic(nfiumb4,1), go0l1q(lku8xq,nfiumb4), q121lc(
+ &nfiumb4), w5poyv(aqk377,nfiumb4)
+ double precision hr83e(nfiumb4,lku8xq), lj4dph(nfiumb4,lku8xq),
+ &jrxg6l(zkjqhi,nfiumb4), ur73jo(vi231l,1)
+ double precision ioqzvb(vi231l,1), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(4)
+ double precision t5vlzq(lku8xq,nfiumb4,2), zxao0o(lku8xq*(lku8xq+
+ &1))
+ integer w3gohz, d9rjek, nd6mep, i5uvkm, ptr, opf6cv, i2, oht3ga,
+ &ucgi1r, w5tcfp, cqui1v, xhe4cg, ugsma5, zvxw1l, pga6nul
+ integer tvyd2b, fjg0qv, zx1610, zxiwf1, cr8hav, dyt0pg, h3mrfq,
+ &uvnk0i
+ integer uxzze7, foej1u
+ double precision hq710, fiumb4, t7sbea, xmr7cj, elq2cs, qik6ym,
+ &zl11l0, wlkaa3, jftq1
+ uxzze7 = 1
+ cqui1v = p1i8xz(1)
+ w5tcfp = p1i8xz(2)
+ zxiwf1 = p1i8xz(3)
+ xhe4cg = p1i8xz(4)
+ ugsma5 = p1i8xz(5)
+ zvxw1l = p1i8xz(6)
+ pga6nul = p1i8xz(7)
+ p1i8xz(9) = 0
+ cr8hav = p1i8xz(11)
+ dyt0pg = p1i8xz(12)
+ h3mrfq = p1i8xz(14)
+ uvnk0i = p1i8xz(15)
+ foej1u = p1i8xz(18)
+ fiumb4 = l1zvxx(1)
+ zl11l0 = dsqrt(fiumb4)
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4)))goto 23494
+ wlkaa3 = dlog(fiumb4)
+23494 continue
+ qik6ym = l1zvxx(2)
+ jftq1 = l1zvxx(3)
+ elq2cs = 0.0d0
+ oht3ga = 0
+ gqxvz8 = 1
+ call qh4ulb(zqve1l, vvl1li, cqui1v)
+ i5uvkm = cqui1v * (cqui1v+1) / 2
+ call p0lk40(hft28, ur73jo, nfiumb4, lku8xq, vi231l, cqui1v,
+ &zvxw1l, aqk377, w5tcfp, cr8hav, i5uvkm, zqve1l, vvl1li, h3mrfq,
+ &q121lc)
+653 if(.not.(ugsma5 .eq. 0))goto 23496
+ do 23498 d9rjek=1,aqk377
+ call nbq4ua(jmwo0z, go0l1q, l1zvxx, nfiumb4, lku8xq, aqk377,
+ &zvxw1l, d9rjek, w8xfic, foej1u)
+23498 continue
+ goto 23497
+23496 continue
+ if(.not.(ugsma5 .eq. 2))goto 23500
+ call pjw1l(ur73jo, lq8reh, go0l1q, nfiumb4, lku8xq, vi231l,
+ &zxiwf1, dyt0pg, oht3ga, zvxw1l, h3mrfq, q121lc)
+23500 continue
+23497 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &oht3ga)
+ if(.not.(ugsma5 .eq. 2))goto 23502
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, hq710, oht3ga, fiumb4, wlkaa3, uxzze7)
+ goto 23503
+23502 continue
+ hq710 = -1.0d0
+23503 continue
+ do 23504 ucgi1r=1,pga6nul
+ do 23506 d9rjek=1,aqk377
+ call kqsxz1(jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph, jrxg6l,
+ & jftq1, fiumb4, zl11l0, nfiumb4, lku8xq, aqk377, vi231l, zkjqhi,
+ &d9rjek, zvxw1l, gqxvz8, h3mrfq, q121lc)
+23506 continue
+ do 23508 d9rjek=1,zxiwf1
+ do 23510 w3gohz=1,vi231l
+ ioqzvb(w3gohz,d9rjek) = ur73jo(w3gohz,d9rjek)
+23510 continue
+23508 continue
+ do 23512 d9rjek=1,zxiwf1
+ ptr = 1
+ do 23514 opf6cv=1,nfiumb4
+ do 23516 i2=1,lku8xq
+ ioqzvb(ptr,d9rjek) = jrxg6l(i2,opf6cv) * ioqzvb(ptr,d9rjek)
+ ptr = ptr + 1
+23516 continue
+23514 continue
+23512 continue
+ do 23518 nd6mep=1,zxiwf1
+ i83h1(nd6mep) = nd6mep
+23518 continue
+ t7sbea = 1.0d-7
+ call dhkt9w(ioqzvb,vi231l,vi231l,zxiwf1,i0qvzl,i83h1,t5vlzq,
+ &zx1610,t7sbea)
+ if(.not.(zx1610 .ne. zxiwf1))goto 23520
+ gqxvz8 = 2
+ return
+23520 continue
+ do 23522 w3gohz=1,nfiumb4
+ do 23524 d9rjek=1,lku8xq
+ t5vlzq(d9rjek,w3gohz,1) = jrxg6l(d9rjek,w3gohz) * hr83e(w3gohz,
+ &d9rjek)
+23524 continue
+23522 continue
+ tvyd2b = 101
+ call vdqrsl(ioqzvb,vi231l,vi231l,zx1610,i0qvzl, t5vlzq, elq2cs,
+ &t5vlzq(1,1,2), lq8reh, elq2cs,go0l1q,tvyd2b,fjg0qv)
+ do 23526 w3gohz=1,nfiumb4
+ do 23528 d9rjek=1,lku8xq
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) / jrxg6l(d9rjek,
+ &w3gohz)
+23528 continue
+23526 continue
+ if(.not.(h3mrfq .eq. 1))goto 23530
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23532
+ do 23534 w3gohz=1,nfiumb4
+ do 23536 d9rjek=1,aqk377
+ go0l1q(2*d9rjek-1,w3gohz) = go0l1q(2*d9rjek-1,w3gohz) + q121lc(
+ &w3gohz)
+23536 continue
+23534 continue
+ goto 23533
+23532 continue
+ do 23538 w3gohz=1,nfiumb4
+ do 23540 d9rjek=1,lku8xq
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) + q121lc(w3gohz)
+23540 continue
+23538 continue
+23533 continue
+23530 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &oht3ga)
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat,oht3ga,fiumb4,wlkaa3, uxzze7)
+ xmr7cj = dabs(nx1bat - hq710) / (1.0d0 + dabs(nx1bat))
+ if(.not.(xmr7cj .lt. qik6ym))goto 23542
+ gqxvz8 = 0
+ p1i8xz(8) = ucgi1r
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23544
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat,oht3ga,fiumb4,wlkaa3, oht3ga)
+23544 continue
+ return
+23542 continue
+ hq710 = nx1bat
+23543 continue
+23504 continue
+ if(.not.(ugsma5 .eq. 1 .or. ugsma5 .eq. 2))goto 23546
+ ugsma5 = 0
+ p1i8xz(9) = 1
+ goto 653
+23546 continue
+ gqxvz8 = 3
+ return
+ end
+ subroutine cqo1f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc,
+ &w5poyv, hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1,
+ &nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l,
+ &vvl1li, nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+ implicit logical (a-z)
+ integer p1i8xz(18), zqve1l(1), vvl1li(1)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), w8xfic(
+ &nfiumb4,1), go0l1q(lku8xq,nfiumb4), q121lc(nfiumb4), w5poyv(
+ &aqk377,nfiumb4), oju3yh(nfiumb4,9), hr83e(nfiumb4,lku8xq), lj4dph(
+ &nfiumb4,lku8xq), jrxg6l(zkjqhi,nfiumb4), ur73jo(vi231l,1)
+ double precision ioqzvb(vi231l,1), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(4)
+ double precision t5vlzq(vi231l,3), zxao0o(lku8xq*(lku8xq+1))
+ integer w3gohz, lir0o1, i5uvkm, oht3ga, ucgi1r, w5tcfp, h3mrfq,
+ &cqui1v, xhe4cg, ugsma5, zvxw1l, pga6nul
+ integer tvyd2b, fjg0qv, zx1610, zxiwf1, dyt0pg, uvnk0i
+ integer uxzze7, p1, foej1u
+ double precision hq710, fiumb4, t7sbea, xmr7cj, elq2cs, qik6ym,
+ &zl11l0, wlkaa3, jftq1
+ integer nd6mep
+ double precision ni1qfp, epx9jf
+ ni1qfp = 0.0d0
+ uxzze7 = 1
+ cqui1v = p1i8xz(1)
+ w5tcfp = p1i8xz(2)
+ zxiwf1 = p1i8xz(3)
+ xhe4cg = p1i8xz(4)
+ ugsma5 = p1i8xz(5)
+ zvxw1l = p1i8xz(6)
+ pga6nul = p1i8xz(7)
+ p1i8xz(9) = 0
+ dyt0pg = p1i8xz(12)
+ if(.not.(dyt0pg .ne. 1))goto 23548
+ gqxvz8 = 4
+ return
+23548 continue
+ h3mrfq = p1i8xz(14)
+ uvnk0i = p1i8xz(15)
+ p1 = p1i8xz(16)
+ foej1u = p1i8xz(18)
+ fiumb4 = l1zvxx(1)
+ zl11l0 = dsqrt(fiumb4)
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4)))goto 23550
+ wlkaa3 = dlog(fiumb4)
+23550 continue
+ qik6ym = l1zvxx(2)
+ jftq1 = l1zvxx(3)
+ elq2cs = 0.0d0
+ oht3ga = 0
+ gqxvz8 = 1
+ call qh4ulb(zqve1l, vvl1li, cqui1v)
+ i5uvkm = cqui1v * (cqui1v+1) / 2
+ call u16zxj(hft28, ur73jo, nfiumb4, cqui1v, zvxw1l, q121lc,
+ &vi231l, zxiwf1, i5uvkm, zqve1l, vvl1li, oju3yh, p1, h3mrfq)
+ do 23552 lir0o1=1,aqk377
+653 epx9jf = 1.0d0
+ if(.not.(ugsma5 .eq. 0))goto 23554
+ call nbq4ua(jmwo0z, go0l1q, l1zvxx, nfiumb4, lku8xq, aqk377,
+ &zvxw1l, lir0o1, w8xfic, foej1u)
+ goto 23555
+23554 continue
+ if(.not.(ugsma5 .eq. 2))goto 23556
+ call pjw1l(ur73jo, lq8reh(1+(lir0o1-1)*zxiwf1), go0l1q, nfiumb4,
+ &lku8xq, vi231l, zxiwf1, dyt0pg, lir0o1, zvxw1l, h3mrfq, q121lc)
+23556 continue
+23555 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &lir0o1)
+ if(.not.(ugsma5 .eq. 2))goto 23558
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, hq710, lir0o1, fiumb4, wlkaa3, uxzze7)
+ goto 23559
+23558 continue
+ hq710 = -1.0d0
+23559 continue
+ do 23560 ucgi1r=1,pga6nul
+ call kqsxz1(jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph, jrxg6l,
+ & jftq1, fiumb4, zl11l0, nfiumb4, lku8xq, aqk377, vi231l, zkjqhi,
+ &lir0o1, zvxw1l, gqxvz8, h3mrfq, q121lc)
+ do 23562 nd6mep=1,zxiwf1
+ do 23564 w3gohz=1,vi231l
+ ioqzvb(w3gohz,nd6mep) = ur73jo(w3gohz,nd6mep)
+23564 continue
+23562 continue
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23566
+ do 23568 nd6mep=1,zxiwf1
+ do 23570 w3gohz=1,nfiumb4
+ ioqzvb(2*w3gohz-1,nd6mep) = jrxg6l(2*lir0o1-1,w3gohz) * ioqzvb(2*
+ &w3gohz-1,nd6mep)
+ ioqzvb(2*w3gohz ,nd6mep) = jrxg6l(2*lir0o1 ,w3gohz) * ioqzvb(2*
+ &w3gohz ,nd6mep)
+23570 continue
+23568 continue
+ goto 23567
+23566 continue
+ do 23572 nd6mep=1,zxiwf1
+ do 23574 w3gohz=1,nfiumb4
+ ioqzvb(w3gohz,nd6mep) = jrxg6l(lir0o1,w3gohz) * ioqzvb(w3gohz,
+ &nd6mep)
+23574 continue
+23572 continue
+23567 continue
+ do 23576 nd6mep=1,zxiwf1
+ i83h1(nd6mep) = nd6mep
+23576 continue
+ t7sbea = 1.0d-7
+ call dhkt9w(ioqzvb,vi231l,vi231l,zxiwf1,i0qvzl,i83h1,t5vlzq,
+ &zx1610,t7sbea)
+ if(.not.(zx1610 .ne. zxiwf1))goto 23578
+ gqxvz8 = 2
+ return
+23578 continue
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23580
+ do 23582 w3gohz=1,nfiumb4
+ t5vlzq(2*w3gohz-1,1)=jrxg6l(2*lir0o1-1,w3gohz)*hr83e(w3gohz,2*
+ &lir0o1-1)
+ t5vlzq(2*w3gohz ,1)=jrxg6l(2*lir0o1 ,w3gohz)*hr83e(w3gohz,2*
+ &lir0o1 )
+23582 continue
+ goto 23581
+23580 continue
+ do 23584 w3gohz=1,nfiumb4
+ t5vlzq(w3gohz,1) = jrxg6l(lir0o1,w3gohz) * hr83e(w3gohz,lir0o1)
+23584 continue
+23581 continue
+ tvyd2b = 101
+ call vdqrsl(ioqzvb,vi231l,vi231l,zx1610,i0qvzl, t5vlzq(1,1),
+ &elq2cs, t5vlzq(1,2), lq8reh(1+(lir0o1-1)*zxiwf1), elq2cs,t5vlzq(1,
+ &3),tvyd2b,fjg0qv)
+ if(.not.(uvnk0i .gt. 1))goto 23586
+23586 continue
+ do 23588 nd6mep=1,zxiwf1
+ t5vlzq(nd6mep,1) = lq8reh((lir0o1-1)*zxiwf1 + nd6mep)
+23588 continue
+ do 23590 nd6mep=1,zxiwf1
+ lq8reh((lir0o1-1)*zxiwf1 + i83h1(nd6mep)) = t5vlzq(nd6mep,1)
+23590 continue
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23592
+ do 23594 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz)=t5vlzq(2*w3gohz-1,3)/jrxg6l(2*lir0o1-1,
+ &w3gohz)
+ go0l1q(2*lir0o1 ,w3gohz) = t5vlzq(2*w3gohz ,3) / jrxg6l(2*lir0o1 ,
+ &w3gohz)
+23594 continue
+ if(.not.(h3mrfq .eq. 1))goto 23596
+ do 23598 w3gohz=1,nfiumb4
+ go0l1q(2*lir0o1-1,w3gohz) = go0l1q(2*lir0o1-1,w3gohz) + q121lc(
+ &w3gohz)
+23598 continue
+23596 continue
+ goto 23593
+23592 continue
+ do 23600 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = t5vlzq(w3gohz,3) / jrxg6l(lir0o1,w3gohz)
+23600 continue
+ if(.not.(h3mrfq .eq. 1))goto 23602
+ do 23604 w3gohz=1,nfiumb4
+ go0l1q(lir0o1,w3gohz) = go0l1q(lir0o1,w3gohz) + q121lc(w3gohz)
+23604 continue
+23602 continue
+23593 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &lir0o1)
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat,lir0o1,fiumb4,wlkaa3,uxzze7)
+ xmr7cj = dabs(nx1bat - hq710) / (1.0d0 + dabs(nx1bat))
+ if(.not.(xmr7cj .lt. qik6ym))goto 23606
+ gqxvz8 = 0
+ p1i8xz(8)=ucgi1r
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23608
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat,lir0o1,fiumb4,wlkaa3, oht3ga)
+23608 continue
+ ni1qfp = ni1qfp + nx1bat
+ goto 1011
+ goto 23607
+23606 continue
+ hq710 = nx1bat
+23607 continue
+23560 continue
+ if(.not.(ugsma5 .eq. 1))goto 23610
+ ugsma5 = 0
+ p1i8xz(9) = 1
+ goto 653
+23610 continue
+ gqxvz8 = 3
+1011 epx9jf = 1.0d0
+23552 continue
+ nx1bat = ni1qfp
+ return
+ end
+ subroutine vcao6f(hft28, jmwo0z, w8xfic, go0l1q, w5poyv, hr83e,
+ &lj4dph, jrxg6l, ioqzvb, i0qvzl, i83h1, nfiumb4, lku8xq, aqk377,
+ &vi231l, zkjqhi, gqxvz8, p1i8xz, nx1bat, lq8reh, t5vlzq, zxao0o,
+ &l1zvxx, gqai81,h2mzlo, sq5cvf, ynk9ah, uxs1iq, vliac4, vfd2pw,
+ &sazp9g,s0, zrcbl2, nyg3mt, e6tljz, ifo4ew, ozuw3p, hwi2tb, nbd5rl,
+ & wj5shg, ykdc2t, wk2, wzxao0o, phqco4, vb81l0, bmb, rjcq9o, mwk,
+ &n1zwoi, j1l0o1, qc7zyb, vlni8d, jko0o1, mnh3up, fg3pxq)
+ implicit logical (a-z)
+ integer p1i8xz(19)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), w8xfic(
+ &nfiumb4,1), go0l1q(lku8xq,nfiumb4), w5poyv(aqk377,nfiumb4)
+ double precision hr83e(nfiumb4,lku8xq), lj4dph(nfiumb4,lku8xq),
+ &jrxg6l(zkjqhi,nfiumb4)
+ double precision ioqzvb(vi231l,2), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(6)
+ double precision t5vlzq(vi231l,3), zxao0o(lku8xq*(lku8xq+1))
+ integer lir0o1, sglfr1, oht3ga, ucgi1r, cqui1v, xhe4cg, ugsma5,
+ &zvxw1l, pga6nul
+ integer dyt0pg, uvnk0i, zxiwf1
+ integer uxzze7, c3qxjo
+ double precision hq710, fiumb4, xmr7cj, elq2cs, qik6ym, zl11l0,
+ &wlkaa3, jftq1
+ double precision ni1qfp, epx9jf
+ integer gqai81(15), h2mzlo, ynk9ah(1),uxs1iq(1),vliac4(1), ozuw3p(
+ &1), hwi2tb(3), nbd5rl(1), wj5shg(1)
+ integer foej1u, jko0o1(1), mnh3up(1), fg3pxq(2), vlni8d(2)
+ double precision sq5cvf(aqk377)
+ double precision vfd2pw(h2mzlo,nfiumb4), sazp9g(nfiumb4,1),s0(
+ &lku8xq), zrcbl2(h2mzlo,nfiumb4), nyg3mt(h2mzlo,nfiumb4), e6tljz(
+ &nfiumb4,2), ifo4ew(h2mzlo,1), ykdc2t(1), wk2(nfiumb4,h2mzlo),
+ &phqco4(1), vb81l0(1), bmb(1), rjcq9o(1), mwk(1), j1l0o1(1),
+ &qc7zyb(1)
+ integer ymetu2
+ integer w3gohz, myx3od, ibd3vc
+ integer d8gwha, tiav4e
+ double precision purf2k(2), x1boaf, ad3xzo, j6gbnx, rk3jet
+ double precision h4fgoy, das4bx
+ double precision q121lc(2)
+ x1boaf=0.0d0
+ ad3xzo=0.0d0
+ j6gbnx=0.0d0
+ rk3jet=0.0d0
+ d8gwha = p1i8xz(19)
+ ni1qfp = 0.0d0
+ uxzze7 = 1
+ cqui1v = p1i8xz(1)
+ zxiwf1 = p1i8xz(3)
+ xhe4cg = p1i8xz(4)
+ ugsma5 = p1i8xz(5)
+ zvxw1l = p1i8xz(6)
+ pga6nul = p1i8xz(7)
+ p1i8xz(9) = 0
+ tiav4e = p1i8xz(11)
+ dyt0pg = p1i8xz(12)
+ if(.not.((dyt0pg .ne. 1) .or. (tiav4e .ne. cqui1v)))goto 23612
+ gqxvz8 = 4
+ return
+23612 continue
+ uvnk0i = p1i8xz(15)
+ foej1u = p1i8xz(18)
+ h4fgoy = l1zvxx(3+aqk377+aqk377+2)
+ fiumb4 = l1zvxx(1)
+ zl11l0 = dsqrt(fiumb4)
+ if(.not.((zvxw1l .eq. 1) .or. (zvxw1l .eq. 4)))goto 23614
+ wlkaa3 = dlog(fiumb4)
+23614 continue
+ qik6ym = l1zvxx(2)
+ jftq1 = l1zvxx(3)
+ elq2cs = 0.0d0
+ oht3ga = 0
+ gqxvz8 = 1
+ do 23616 lir0o1=1,aqk377
+653 epx9jf = 1.0d0
+ if(.not.(ugsma5 .eq. 0))goto 23618
+ call nbq4ua(jmwo0z, go0l1q, l1zvxx, nfiumb4, lku8xq, aqk377,
+ &zvxw1l, lir0o1, w8xfic, foej1u)
+ goto 23619
+23618 continue
+ if(.not.(ugsma5 .ne. 1))goto 23620
+ gqxvz8 = 6
+ return
+23620 continue
+23619 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &lir0o1)
+ if(.not.(ugsma5 .eq. 2))goto 23622
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, hq710, lir0o1, fiumb4, wlkaa3, uxzze7)
+ goto 23623
+23622 continue
+ hq710 = -1.0d0
+23623 continue
+ do 23624 ucgi1r=1,pga6nul
+ call sptoq8(hft28, ioqzvb, nfiumb4, vi231l, cqui1v, zvxw1l)
+ gqai81(7) = 0
+ call kqsxz1(jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph, jrxg6l,
+ & jftq1, fiumb4, zl11l0, nfiumb4, lku8xq, aqk377, vi231l, zkjqhi,
+ &lir0o1, zvxw1l, gqxvz8, oht3ga, q121lc)
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23626
+ ymetu2 = 2*lir0o1-1
+ goto 23627
+23626 continue
+ ymetu2 = lir0o1
+23627 continue
+ do 23628 myx3od=1,h2mzlo
+ do 23630 w3gohz=1,nfiumb4
+ zrcbl2(myx3od,w3gohz) = jrxg6l(ymetu2-1+myx3od,w3gohz)
+ nyg3mt(myx3od,w3gohz) = go0l1q(ymetu2-1+myx3od,w3gohz)
+23630 continue
+23628 continue
+ c3qxjo = tiav4e * aqk377
+ sglfr1 = cqui1v * (lir0o1-1)
+ if(.not.(ucgi1r .eq. 1))goto 23632
+ x1boaf = sq5cvf( sglfr1 + hwi2tb(1))
+ ad3xzo = sq5cvf(c3qxjo + sglfr1 + hwi2tb(1))
+ if(.not.(cqui1v .eq. 2))goto 23634
+ j6gbnx = sq5cvf( sglfr1 + hwi2tb(2))
+ rk3jet = sq5cvf(c3qxjo + sglfr1 + hwi2tb(2))
+23634 continue
+ do 23636 myx3od=1,tiav4e
+ do 23638 w3gohz=1,nfiumb4
+ sazp9g(w3gohz,sglfr1 + hwi2tb(myx3od)) = 0.0d0
+23638 continue
+23636 continue
+ goto 23633
+23632 continue
+ sq5cvf( sglfr1 + hwi2tb(1)) = x1boaf
+ sq5cvf(c3qxjo + sglfr1 + hwi2tb(1)) = ad3xzo
+ if(.not.(cqui1v .eq. 2))goto 23640
+ sq5cvf( sglfr1 + hwi2tb(2)) = j6gbnx
+ sq5cvf(c3qxjo + sglfr1 + hwi2tb(2)) = rk3jet
+23640 continue
+23633 continue
+ call vbfa(d8gwha,nfiumb4,h2mzlo,gqai81, e6tljz, hr83e(1,ymetu2),
+ &lj4dph(1,ymetu2), sq5cvf( sglfr1 + hwi2tb(1)), sq5cvf(c3qxjo +
+ &sglfr1 + hwi2tb(1)), ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g(1,sglfr1
+ &+ hwi2tb(1)), nyg3mt,s0, lq8reh(1+(lir0o1-1)*zxiwf1), zo5jyl,
+ &h4fgoy, ioqzvb,i0qvzl, i83h1, purf2k, zrcbl2, ifo4ew, ozuw3p,
+ &hwi2tb, nbd5rl, wj5shg, ykdc2t, wk2, wzxao0o, phqco4, vb81l0, bmb,
+ & rjcq9o, mwk, n1zwoi, j1l0o1(1+(lir0o1-1)*(vlni8d(1+cqui1v)-1)),
+ &qc7zyb, das4bx, vlni8d, jko0o1, mnh3up, fg3pxq)
+ l1zvxx(3+aqk377+aqk377+1) = das4bx
+ ibd3vc = gqai81(14)
+ if(.not.(ibd3vc .ne. 0))goto 23642
+ call intpr("vcao6f: exiting because of an error",-1,ibd3vc,1)
+ gqxvz8 = 8
+ return
+23642 continue
+ do 23644 myx3od=1,h2mzlo
+ do 23646 w3gohz=1,nfiumb4
+ go0l1q(ymetu2-1+myx3od,w3gohz) = nyg3mt(myx3od,w3gohz)
+23646 continue
+23644 continue
+ call o47zxq(go0l1q, w5poyv, nfiumb4, lku8xq, aqk377, zvxw1l,
+ &lir0o1)
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat, lir0o1, fiumb4, wlkaa3, uxzze7)
+ xmr7cj = dabs(nx1bat - hq710) / (1.0d0 + dabs(nx1bat))
+ if(.not.(xmr7cj .lt. qik6ym))goto 23648
+ gqxvz8 = 0
+ p1i8xz(8) = ucgi1r
+ if(.not.((zvxw1l .eq. 3) .or. (zvxw1l .eq. 5)))goto 23650
+ call kqx20o(zvxw1l, jmwo0z, w8xfic, w5poyv, nfiumb4, lku8xq,
+ &aqk377, xhe4cg, go0l1q, nx1bat,lir0o1,fiumb4,wlkaa3, oht3ga)
+23650 continue
+ ni1qfp = ni1qfp + nx1bat
+ goto 1011
+ goto 23649
+23648 continue
+ hq710 = nx1bat
+23649 continue
+23624 continue
+ if(.not.(ugsma5 .eq. 1))goto 23652
+ ugsma5 = 0
+ p1i8xz(9) = 1
+ goto 653
+23652 continue
+ gqxvz8 = 3
+1011 epx9jf = 1.0d0
+23616 continue
+ nx1bat = ni1qfp
+ return
+ end
+ subroutine dcqof(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc,
+ &w5poyv, hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1,
+ &nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l,
+ &vvl1li, nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx, ize5km, ip0ox8,
+ &v8gzsp, p2, xt3fko, o4xmfj, zclbn2)
+ implicit logical (a-z)
+ integer p1i8xz(19), zqve1l(1), vvl1li(1)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ integer dyt0pg
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), oju3yh(
+ &nfiumb4,9), w8xfic(nfiumb4,1), go0l1q(lku8xq,nfiumb4), q121lc(
+ &nfiumb4), w5poyv(aqk377,nfiumb4), hr83e(nfiumb4,lku8xq), lj4dph(
+ &nfiumb4,lku8xq), jrxg6l(zkjqhi,nfiumb4), ur73jo(vi231l,1)
+ double precision ioqzvb(vi231l,1), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(4)
+ double precision t5vlzq(lku8xq,nfiumb4,2), zxao0o(lku8xq*(lku8xq+
+ &1))
+ integer p2
+ double precision ize5km(nfiumb4,p2), ip0ox8(nfiumb4,1), v8gzsp(p2,
+ &1), xt3fko(p2,1), o4xmfj, zclbn2(1)
+ integer w3gohz, s9otpy, pvnfr4, cqui1v, fiy4lc, nd6mep, z2q1li,
+ &foej1u
+ double precision wrbc3q, gibj6t
+ cqui1v = p1i8xz(1)
+ fiy4lc = p1i8xz(5)
+ dyt0pg = p1i8xz(12)
+ z2q1li = p1i8xz(13)
+ foej1u = p1i8xz(18)
+ do 23654 pvnfr4=1,cqui1v
+ do 23656 w3gohz=1,nfiumb4
+ wrbc3q = 0.0d0
+ do 23658 s9otpy=1,p2
+ wrbc3q = wrbc3q + ize5km(w3gohz,s9otpy) * v8gzsp(s9otpy,pvnfr4)
+23658 continue
+ ip0ox8(w3gohz,pvnfr4) = wrbc3q
+ hft28(w3gohz,pvnfr4) = wrbc3q
+23656 continue
+23654 continue
+ if(.not.(dyt0pg.eq.1))goto 23660
+ call cqo1f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &gibj6t, zclbn2, t5vlzq, zxao0o, l1zvxx)
+ goto 23661
+23660 continue
+ call cqo2f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &gibj6t, zclbn2, t5vlzq, zxao0o, l1zvxx)
+23661 continue
+ do 23662 s9otpy=1,p2
+ do 23664 w3gohz=1,nfiumb4
+ ize5km(w3gohz,s9otpy) = o4xmfj * ize5km(w3gohz,s9otpy)
+23664 continue
+23662 continue
+ do 23666 pvnfr4=1,cqui1v
+ do 23668 s9otpy=1,p2
+ do 23670 w3gohz=1,nfiumb4
+ hft28(w3gohz,pvnfr4)=ip0ox8(w3gohz,pvnfr4)+ize5km(w3gohz,s9otpy)
+23670 continue
+ p1i8xz(5) = 2
+ do 23672 nd6mep=1,z2q1li
+ lq8reh(nd6mep) = zclbn2(nd6mep)
+23672 continue
+ if(.not.(dyt0pg.eq.1))goto 23674
+ call cqo1f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+ goto 23675
+23674 continue
+ call cqo2f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+23675 continue
+ if(.not.(gqxvz8 .ne. 0))goto 23676
+ return
+23676 continue
+ xt3fko(s9otpy,pvnfr4) = (nx1bat - gibj6t) / o4xmfj
+23668 continue
+ if(.not.(cqui1v .gt. 1))goto 23678
+ do 23680 w3gohz=1,nfiumb4
+ hft28(w3gohz,pvnfr4) = ip0ox8(w3gohz,pvnfr4)
+23680 continue
+23678 continue
+23666 continue
+ p1i8xz(5) = fiy4lc
+ return
+ end
+ subroutine vdcaof(hft28, jmwo0z, w8xfic, go0l1q, w5poyv, hr83e,
+ &lj4dph, jrxg6l, ioqzvb, i0qvzl, i83h1, nfiumb4, lku8xq, aqk377,
+ &vi231l, zkjqhi, gqxvz8, p1i8xz, nx1bat, lq8reh, t5vlzq, zxao0o,
+ &l1zvxx, ize5km, ip0ox8, v8gzsp, p2, xt3fko, zclbn2, gqai81,h2mzlo,
+ & sq5cvf, ynk9ah, uxs1iq, vliac4, vfd2pw,sazp9g,s0, zrcbl2, nyg3mt,
+ & e6tljz, ifo4ew, ozuw3p, hwi2tb, nbd5rl, wj5shg, ykdc2t, wk2,
+ &wzxao0o, phqco4, vb81l0, bmb, rjcq9o, mwk, n1zwoi, j1l0o1, qc7zyb,
+ & vlni8d, jko0o1, mnh3up, fg3pxq)
+ implicit logical (a-z)
+ integer p1i8xz(19)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ integer dyt0pg
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), w8xfic(
+ &nfiumb4,1), go0l1q(lku8xq,nfiumb4), w5poyv(aqk377,nfiumb4), hr83e(
+ &nfiumb4,lku8xq), lj4dph(nfiumb4,lku8xq), jrxg6l(zkjqhi,nfiumb4)
+ double precision ioqzvb(vi231l,1), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(6)
+ double precision t5vlzq(lku8xq,nfiumb4,2)
+ double precision zxao0o(lku8xq*(lku8xq+1))
+ integer p2
+ double precision ize5km(nfiumb4,p2), ip0ox8(nfiumb4,1), v8gzsp(p2,
+ &1), xt3fko(p2,1), o4xmfj, zclbn2(1)
+ integer w3gohz, pp, pvnfr4, cqui1v, fiy4lc, z2q1li, foej1u
+ double precision wrbc3q, gibj6t
+ integer gqai81(15), h2mzlo, ynk9ah(1),uxs1iq(1),vliac4(1), ozuw3p(
+ &1), hwi2tb(1), nbd5rl(1), wj5shg(1), vlni8d(2), jko0o1(1), mnh3up(
+ &1), fg3pxq(2)
+ double precision sq5cvf(aqk377)
+ double precision vfd2pw(h2mzlo,nfiumb4), sazp9g(nfiumb4,1),s0(
+ &lku8xq), zrcbl2(h2mzlo,nfiumb4)
+ double precision nyg3mt(h2mzlo,nfiumb4), e6tljz(nfiumb4,1),
+ &ifo4ew(h2mzlo,1), ykdc2t(1), wk2(nfiumb4,h2mzlo), phqco4(1),
+ &vb81l0(1), bmb(1), rjcq9o(1), mwk(1), j1l0o1(1), qc7zyb(1),
+ &das4bx
+ integer d8gwha
+ double precision h4fgoy
+ das4bx = 0.0d0
+ d8gwha = 0
+ cqui1v = p1i8xz(1)
+ fiy4lc = p1i8xz(5)
+ dyt0pg = p1i8xz(12)
+ z2q1li = p1i8xz(13)
+ foej1u = p1i8xz(18)
+ h4fgoy = l1zvxx(3+aqk377+aqk377+2)
+ o4xmfj = l1zvxx(3+aqk377+aqk377+3)
+ do 23682 pvnfr4=1,cqui1v
+ do 23684 w3gohz=1,nfiumb4
+ wrbc3q = 0.0d0
+ do 23686 pp=1,p2
+ wrbc3q = wrbc3q + ize5km(w3gohz,pp) * v8gzsp(pp,pvnfr4)
+23686 continue
+ ip0ox8(w3gohz,pvnfr4) = wrbc3q
+ hft28(w3gohz,pvnfr4) = wrbc3q
+23684 continue
+23682 continue
+ if(.not.(dyt0pg.eq.1))goto 23688
+ call vcao6f(hft28, jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph,
+ &jrxg6l, ioqzvb, i0qvzl, i83h1, nfiumb4, lku8xq, aqk377, vi231l,
+ &zkjqhi, gqxvz8, p1i8xz, gibj6t, zclbn2, t5vlzq, zxao0o, l1zvxx,
+ &gqai81,h2mzlo, sq5cvf, ynk9ah, uxs1iq, vliac4, vfd2pw,sazp9g,s0,
+ &zrcbl2, nyg3mt, e6tljz, ifo4ew, ozuw3p, hwi2tb, nbd5rl, wj5shg,
+ &ykdc2t, wk2, wzxao0o, phqco4, vb81l0, bmb, rjcq9o, mwk, n1zwoi,
+ &j1l0o1, qc7zyb, vlni8d, jko0o1, mnh3up, fg3pxq)
+ l1zvxx(3+aqk377+aqk377+1) = das4bx
+ goto 23689
+23688 continue
+23689 continue
+ do 23690 pp=1,p2
+ do 23692 w3gohz=1,nfiumb4
+ ize5km(w3gohz,pp) = o4xmfj * ize5km(w3gohz,pp)
+23692 continue
+23690 continue
+ do 23694 pvnfr4=1,cqui1v
+ do 23696 pp=1,p2
+ do 23698 w3gohz=1,nfiumb4
+ hft28(w3gohz,pvnfr4) = ip0ox8(w3gohz,pvnfr4) + ize5km(w3gohz,pp)
+23698 continue
+ p1i8xz(5) = 0
+ if(.not.(dyt0pg.eq.1))goto 23700
+ call vcao6f(hft28, jmwo0z, w8xfic, go0l1q, w5poyv, hr83e, lj4dph,
+ &jrxg6l, ioqzvb, i0qvzl, i83h1, nfiumb4, lku8xq, aqk377, vi231l,
+ &zkjqhi, gqxvz8, p1i8xz, nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx,
+ &gqai81,h2mzlo, sq5cvf, ynk9ah, uxs1iq, vliac4, vfd2pw,sazp9g,s0,
+ &zrcbl2, nyg3mt, e6tljz, ifo4ew, ozuw3p, hwi2tb, nbd5rl, wj5shg,
+ &ykdc2t, wk2, wzxao0o, phqco4, vb81l0, bmb, rjcq9o, mwk, n1zwoi,
+ &j1l0o1, qc7zyb, vlni8d, jko0o1, mnh3up, fg3pxq)
+ l1zvxx(3+aqk377+aqk377+1) = das4bx
+ goto 23701
+23700 continue
+23701 continue
+ if(.not.(gqxvz8 .ne. 0))goto 23702
+ return
+23702 continue
+ xt3fko(pp,pvnfr4) = (nx1bat - gibj6t) / o4xmfj
+23696 continue
+ if(.not.(cqui1v .gt. 1))goto 23704
+ do 23706 w3gohz=1,nfiumb4
+ hft28(w3gohz,pvnfr4) = ip0ox8(w3gohz,pvnfr4)
+23706 continue
+23704 continue
+23694 continue
+ p1i8xz(5) = fiy4lc
+ return
+ end
+ subroutine duqof(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc,
+ &w5poyv, hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1,
+ &nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l,
+ &vvl1li, nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx, ip0ox8, xt3fko,
+ &o4xmfj, zclbn2)
+ implicit logical (a-z)
+ integer p1i8xz(19), zqve1l(1), vvl1li(1)
+ integer nfiumb4, lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, i83h1(1)
+ integer dyt0pg
+ double precision hft28(nfiumb4,1), jmwo0z(nfiumb4,aqk377), oju3yh(
+ &nfiumb4,9), w8xfic(nfiumb4,1), go0l1q(lku8xq,nfiumb4), q121lc(
+ &nfiumb4), w5poyv(aqk377,nfiumb4), hr83e(nfiumb4,lku8xq), lj4dph(
+ &nfiumb4,lku8xq), jrxg6l(zkjqhi,nfiumb4), ur73jo(vi231l,1)
+ double precision ioqzvb(vi231l,1), i0qvzl(1), nx1bat, lq8reh(1),
+ &l1zvxx(4)
+ double precision t5vlzq(lku8xq,nfiumb4,2), zxao0o(lku8xq*(lku8xq+
+ &1))
+ double precision ip0ox8(nfiumb4,1), xt3fko(nfiumb4,1), o4xmfj,
+ &zclbn2(1)
+ integer w3gohz, pvnfr4, cqui1v, fiy4lc, nd6mep, z2q1li
+ double precision gibj6t
+ cqui1v = p1i8xz(1)
+ fiy4lc = p1i8xz(5)
+ dyt0pg = p1i8xz(12)
+ z2q1li = p1i8xz(13)
+ if(.not.(dyt0pg.eq.1))goto 23708
+ call cqo1f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &gibj6t, zclbn2, t5vlzq, zxao0o, l1zvxx)
+ goto 23709
+23708 continue
+ call cqo2f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &gibj6t, zclbn2, t5vlzq, zxao0o, l1zvxx)
+23709 continue
+ do 23710 pvnfr4=1,cqui1v
+ do 23712 w3gohz=1,nfiumb4
+ hft28(w3gohz,pvnfr4) = ip0ox8(w3gohz,pvnfr4) + o4xmfj
+ p1i8xz(5) = 2
+ do 23714 nd6mep=1,z2q1li
+ lq8reh(nd6mep) = zclbn2(nd6mep)
+23714 continue
+ if(.not.(dyt0pg.eq.1))goto 23716
+ call cqo1f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+ goto 23717
+23716 continue
+ call cqo2f(hft28, jmwo0z, oju3yh, w8xfic, go0l1q, q121lc, w5poyv,
+ &hr83e, lj4dph, jrxg6l, ur73jo, ioqzvb, i0qvzl, i83h1, nfiumb4,
+ &lku8xq, aqk377, vi231l, zkjqhi, gqxvz8, p1i8xz, zqve1l, vvl1li,
+ &nx1bat, lq8reh, t5vlzq, zxao0o, l1zvxx)
+23717 continue
+ if(.not.(gqxvz8 .ne. 0))goto 23718
+ return
+23718 continue
+ xt3fko(w3gohz,pvnfr4) = (nx1bat - gibj6t) / o4xmfj
+ hft28(w3gohz,pvnfr4) = ip0ox8(w3gohz,pvnfr4)
+23712 continue
+23710 continue
+ p1i8xz(5) = fiy4lc
+ return
+ end
diff --git a/src/fgam.f b/src/fgam.f
new file mode 100644
index 0000000..5ab50a7
--- /dev/null
+++ b/src/fgam.f
@@ -0,0 +1,642 @@
+c 24/8/99
+c This is the original fgam.f file
+c It needs to be compiled and loaded into R in order to smooth.
+
+c All of this is automatically in Splus
+
+
+ subroutine vbsplvd ( t, k, x, left, a, dbiatx, nderiv )
+ implicit double precision(a-h,o-z)
+calls bsplvb
+calculates value and deriv.s of all b-splines which do not vanish at x
+c
+c****** i n p u t ******
+c t the knot array, of length left+k (at least)
+c k the order of the b-splines to be evaluated
+c x the point at which these values are sought
+c left an integer indicating the left endpoint of the interval of
+c interest. the k b-splines whose support contains the interval
+c (t(left), t(left+1))
+c are to be considered.
+c a s s u m p t i o n - - - it is assumed that
+c t(left) .lt. t(left+1)
+c division by zero will result otherwise (in b s p l v b ).
+c also, the output is as advertised only if
+c t(left) .le. x .le. t(left+1) .
+c nderiv an integer indicating that values of b-splines and their
+c derivatives up to but not including the nderiv-th are asked
+c for. ( nderiv is replaced internally by the integer in (1,k)
+c closest to it.)
+c
+c****** w o r k a r e a ******
+c a an array of order (k,k), to contain b-coeff.s of the derivat-
+c ives of a certain order of the k b-splines of interest.
+c
+c****** o u t p u t ******
+c dbiatx an array of order (k,nderiv). its entry (i,m) contains
+c value of (m-1)st derivative of (left-k+i)-th b-spline of
+c order k for knot sequence t , i=m,...,k; m=1,...,nderiv.
+c
+c****** m e t h o d ******
+c values at x of all the relevant b-splines of order k,k-1,...,
+c k+1-nderiv are generated via bsplvb and stored temporarily
+c in dbiatx . then, the b-coeffs of the required derivatives of the
+c b-splines of interest are generated by differencing, each from the
+c preceding one of lower order, and combined with the values of b-
+c splines of corresponding order in dbiatx to produce the desired
+c values.
+c
+ integer k,left,nderiv, i,ideriv,il,j,jlow,jp1mid,kp1,kp1mm
+ * ,ldummy,m,mhigh
+ double precision a(k,k),dbiatx(k,nderiv),t(1),x
+ double precision factor,fkp1mm,sum
+ mhigh = max0(min0(nderiv,k),1)
+c mhigh is usually equal to nderiv.
+ kp1 = k+1
+ call bsplvb(t,kp1-mhigh,1,x,left,dbiatx)
+ if (mhigh .eq. 1) go to 99
+c the first column of dbiatx always contains the b-spline values
+c for the current order. these are stored in column k+1-current
+c order before bsplvb is called to put values for the next
+c higher order on top of it.
+ ideriv = mhigh
+ do 15 m=2,mhigh
+ jp1mid = 1
+ do 11 j=ideriv,k
+ dbiatx(j,ideriv) = dbiatx(jp1mid,1)
+ 11 jp1mid = jp1mid + 1
+ ideriv = ideriv - 1
+ call bsplvb(t,kp1-ideriv,2,x,left,dbiatx)
+ 15 continue
+c
+c at this point, b(left-k+i, k+1-j)(x) is in dbiatx(i,j) for
+c i=j,...,k and j=1,...,mhigh ('=' nderiv). in particular, the
+c first column of dbiatx is already in final form. to obtain cor-
+c responding derivatives of b-splines in subsequent columns, gene-
+c rate their b-repr. by differencing, then evaluate at x.
+c
+ jlow = 1
+ do 20 i=1,k
+ do 19 j=jlow,k
+ 19 a(j,i) = 0d0
+ jlow = i
+ 20 a(i,i) = 1d0
+c at this point, a(.,j) contains the b-coeffs for the j-th of the
+c k b-splines of interest here.
+c
+ do 40 m=2,mhigh
+ kp1mm = kp1 - m
+ fkp1mm = dble(kp1mm)
+ il = left
+ i = k
+c
+c for j=1,...,k, construct b-coeffs of (m-1)st derivative of
+c b-splines from those for preceding derivative by differencing
+c and store again in a(.,j) . the fact that a(i,j) = 0 for
+c i .lt. j is used.sed.
+ do 25 ldummy=1,kp1mm
+ factor = fkp1mm/(t(il+kp1mm) - t(il))
+c the assumption that t(left).lt.t(left+1) makes denominator
+c in factor nonzero.
+ do 24 j=1,i
+ 24 a(i,j) = (a(i,j) - a(i-1,j))*factor
+ il = il - 1
+ 25 i = i - 1
+c
+c for i=1,...,k, combine b-coeffs a(.,i) with b-spline values
+c stored in dbiatx(.,m) to get value of (m-1)st derivative of
+c i-th b-spline (of interest here) at x , and store in
+c dbiatx(i,m). storage of this value over the value of a b-spline
+c of order m there is safe since the remaining b-spline derivat-
+c ive of the same order do not use this value due to the fact
+c that a(j,i) = 0 for j .lt. i .
+ 30 do 40 i=1,k
+ sum = 0.
+ jlow = max0(i,m)
+ do 35 j=jlow,k
+ 35 sum = a(j,i)*dbiatx(j,m) + sum
+ 40 dbiatx(i,m) = sum
+ 99 return
+ end
+ subroutine bsplvb ( t, jhigh, index, x, left, biatx )
+ implicit double precision(a-h,o-z)
+calculates the value of all possibly nonzero b-splines at x of order
+c
+c jout = dmax( jhigh , (j+1)*(index-1) )
+c
+c with knot sequence t .
+c
+c****** i n p u t ******
+c t.....knot sequence, of length left + jout , assumed to be nonde-
+c creasing. a s s u m p t i o n . . . .
+c t(left) .lt. t(left + 1) .
+c d i v i s i o n b y z e r o will result if t(left) = t(left+1)
+c jhigh,
+c index.....integers which determine the order jout = max(jhigh,
+c (j+1)*(index-1)) of the b-splines whose values at x are to
+c be returned. index is used to avoid recalculations when seve-
+c ral columns of the triangular array of b-spline values are nee-
+c ded (e.g., in bvalue or in vbsplvd ). precisely,
+c if index = 1 ,
+c the calculation starts from scratch and the entire triangular
+c array of b-spline values of orders 1,2,...,jhigh is generated
+c order by order , i.e., column by column .
+c if index = 2 ,
+c only the b-spline values of order j+1, j+2, ..., jout are ge-
+c nerated, the assumption being that biatx , j , deltal , deltar
+c are, on entry, as they were on exit at the previous call.
+c in particular, if jhigh = 0, then jout = j+1, i.e., just
+c the next column of b-spline values is generated.
+c
+c w a r n i n g . . . the restriction jout .le. jmax (= 20) is im-
+c posed arbitrarily by the dimension statement for deltal and
+c deltar below, but is n o w h e r e c h e c k e d for .
+c
+c x.....the point at which the b-splines are to be evaluated.
+c left.....an integer chosen (usually) so that
+c t(left) .le. x .le. t(left+1) .
+c
+c****** o u t p u t ******
+c biatx.....array of length jout , with biatx(i) containing the val-
+c ue at x of the polynomial of order jout which agrees with
+c the b-spline b(left-jout+i,jout,t) on the interval (t(left),
+c t(left+1)) .
+c
+c****** m e t h o d ******
+c the recurrence relation
+c
+c x - t(i) t(i+j+1) - x
+c b(i,j+1)(x) = -----------b(i,j)(x) + ---------------b(i+1,j)(x)
+c t(i+j)-t(i) t(i+j+1)-t(i+1)
+c
+c is used (repeatedly) to generate the (j+1)-vector b(left-j,j+1)(x),
+c ...,b(left,j+1)(x) from the j-vector b(left-j+1,j)(x),...,
+c b(left,j)(x), storing the new values in biatx over the old. the
+c facts that
+c b(i,1) = 1 if t(i) .le. x .lt. t(i+1)
+c and that
+c b(i,j)(x) = 0 unless t(i) .le. x .lt. t(i+j)
+c are used. the particular organization of the calculations follows al-
+c gorithm (8) in chapter x of the text.
+c
+ parameter(jmax = 20)
+ integer index,jhigh,left, i,j,jp1
+ double precision biatx(jhigh),t(1),x, deltal(jmax)
+ double precision deltar(jmax),saved,term
+c dimension biatx(jout), t(left+jout)
+current fortran standard makes it impossible to specify the length of
+c t and of biatx precisely without the introduction of otherwise
+c superfluous additional arguments.
+ data j/1/
+c save j,deltal,deltar (valid in fortran 77)
+c
+ go to (10,20), index
+ 10 j = 1
+ biatx(1) = 1d0
+ if (j .ge. jhigh) go to 99
+c
+ 20 jp1 = j + 1
+ deltar(j) = t(left+j) - x
+ deltal(j) = x - t(left+1-j)
+ saved = 0d0
+ do 26 i=1,j
+ term = biatx(i)/(deltar(i) + deltal(jp1-i))
+ biatx(i) = saved + deltar(i)*term
+ 26 saved = deltal(jp1-i)*term
+ biatx(jp1) = saved
+ j = jp1
+ if (j .lt. jhigh) go to 20
+c
+ 99 return
+ end
+
+ double precision function bvalue ( t, bcoef, n, k, x, jderiv )
+ implicit double precision(a-h,o-z)
+calls vinterv
+c
+calculates value at x of jderiv-th derivative of spline from b-repr.
+c the spline is taken to be continuous from the right.
+c
+c****** i n p u t ******
+c t, bcoef, n, k......forms the b-representation of the spline f to
+c be evaluated. specifically,
+c t.....knot sequence, of length n+k, assumed nondecreasing.
+c bcoef.....b-coefficient sequence, of length n .
+c n.....length of bcoef and dimension of s(k,t),
+c a s s u m e d positive .
+c k.....order of the spline .
+c
+c w a r n i n g . . . the restriction k .le. kmax (=20) is imposed
+c arbitrarily by the dimension statement for aj, dm, dm below,
+c but is n o w h e r e c h e c k e d for.
+c
+c x.....the point at which to evaluate .
+c jderiv.....integer giving the order of the derivative to be evaluated
+c a s s u m e d to be zero or positive.
+c
+c****** o u t p u t ******
+c bvalue.....the value of the (jderiv)-th derivative of f at x .
+c
+c****** m e t h o d ******
+c the nontrivial knot interval (t(i),t(i+1)) containing x is lo-
+c cated with the aid of vinterv . the k b-coeffs of f relevant for
+c this interval are then obtained from bcoef (or taken to be zero if
+c not explicitly available) and are then differenced jderiv times to
+c obtain the b-coeffs of (d**jderiv)f relevant for that interval.
+c precisely, with j = jderiv, we have from x.(12) of the text that
+c
+c (d**j)f = sum ( bcoef(.,j)*b(.,k-j,t) )
+c
+c where
+c / bcoef(.), , j .eq. 0
+c /
+c bcoef(.,j) = / bcoef(.,j-1) - bcoef(.-1,j-1)
+c / ----------------------------- , j .gt. 0
+c / (t(.+k-j) - t(.))/(k-j)
+c
+c then, we use repeatedly the fact that
+c
+c sum ( a(.)*b(.,m,t)(x) ) = sum ( a(.,x)*b(.,m-1,t)(x) )
+c with
+c (x - t(.))*a(.) + (t(.+m-1) - x)*a(.-1)
+c a(.,x) = ---------------------------------------
+c (x - t(.)) + (t(.+m-1) - x)
+c
+c to write (d**j)f(x) eventually as a linear combination of b-splines
+c of order 1 , and the coefficient for b(i,1,t)(x) must then
+c be the desired number (d**j)f(x). (see x.(17)-(19) of text).
+c
+ parameter(kmax = 20)
+ integer jderiv,k,n, i,ilo,imk,j,jc,jcmin,jcmax,jj,km1,mflag,nmi
+ double precision bcoef(n),t(1),x
+ double precision aj(kmax),dm(kmax),dp(kmax),fkmj
+c dimension t(n+k)
+current fortran standard makes it impossible to specify the length of
+c t precisely without the introduction of otherwise superfluous
+c additional arguments.
+ bvalue = 0.
+ if (jderiv .ge. k) go to 99
+c
+c *** find i s.t. 1 .le. i .lt. n+k and t(i) .lt. t(i+1) and
+c t(i) .le. x .lt. t(i+1) . if no such i can be found, x lies
+c outside the support of the spline f and bvalue = 0.
+c (the asymmetry in this choice of i makes f rightcontinuous)
+ if( (x.ne.t(n+1)) .or. (t(n+1).ne.t(n+k)) ) go to 700
+ i = n
+ go to 701
+ 700 call vinterv ( t, n+k, x, i, mflag )
+ if (mflag .ne. 0) go to 99
+ 701 continue
+c *** if k = 1 (and jderiv = 0), bvalue = bcoef(i).
+ km1 = k - 1
+ if (km1 .gt. 0) go to 1
+ bvalue = bcoef(i)
+ go to 99
+c
+c *** store the k b-spline coefficients relevant for the knot interval
+c (t(i),t(i+1)) in aj(1),...,aj(k) and compute dm(j) = x - t(i+1-j),
+c dp(j) = t(i+j) - x, j=1,...,k-1 . set any of the aj not obtainable
+c from input to zero. set any t.s not obtainable equal to t(1) or
+c to t(n+k) appropriately.
+ 1 jcmin = 1
+ imk = i - k
+ if (imk .ge. 0) go to 8
+ jcmin = 1 - imk
+ do 5 j=1,i
+ 5 dm(j) = x - t(i+1-j)
+ do 6 j=i,km1
+ aj(k-j) = 0.
+ 6 dm(j) = dm(i)
+ go to 10
+ 8 do 9 j=1,km1
+ 9 dm(j) = x - t(i+1-j)
+c
+ 10 jcmax = k
+ nmi = n - i
+ if (nmi .ge. 0) go to 18
+ jcmax = k + nmi
+ do 15 j=1,jcmax
+ 15 dp(j) = t(i+j) - x
+ do 16 j=jcmax,km1
+ aj(j+1) = 0.
+ 16 dp(j) = dp(jcmax)
+ go to 20
+ 18 do 19 j=1,km1
+ 19 dp(j) = t(i+j) - x
+c
+ 20 do 21 jc=jcmin,jcmax
+ 21 aj(jc) = bcoef(imk + jc)
+c
+c *** difference the coefficients jderiv times.
+ if (jderiv .eq. 0) go to 30
+ do 23 j=1,jderiv
+ kmj = k-j
+ fkmj = dble(kmj)
+ ilo = kmj
+ do 23 jj=1,kmj
+ aj(jj) = ((aj(jj+1) - aj(jj))/(dm(ilo) + dp(jj)))*fkmj
+ 23 ilo = ilo - 1
+c
+c *** compute value at x in (t(i),t(i+1)) of jderiv-th derivative,
+c given its relevant b-spline coeffs in aj(1),...,aj(k-jderiv).
+ 30 if (jderiv .eq. km1) go to 39
+ jdrvp1 = jderiv + 1
+ do 33 j=jdrvp1,km1
+ kmj = k-j
+ ilo = kmj
+ do 33 jj=1,kmj
+ aj(jj) = (aj(jj+1)*dm(ilo) + aj(jj)*dp(jj))/(dm(ilo)+dp(jj))
+ 33 ilo = ilo - 1
+ 39 bvalue = aj(1)
+c
+ 99 return
+ end
+
+
+ subroutine vinterv ( xt, lxt, x, left, mflag )
+ implicit double precision(a-h,o-z)
+computes left = max( i ; 1 .le. i .le. lxt .and. xt(i) .le. x ) .
+c
+c****** i n p u t ******
+c xt.....a double precision sequence, of length lxt , assumed to be nondecreasing
+c lxt.....number of terms in the sequence xt .
+c x.....the point whose location with respect to the sequence xt is
+c to be determined.
+c
+c****** o u t p u t ******
+c left, mflag.....both integers, whose value is
+c
+c 1 -1 if x .lt. xt(1)
+c i 0 if xt(i) .le. x .lt. xt(i+1)
+c lxt 1 if xt(lxt) .le. x
+c
+c in particular, mflag = 0 is the 'usual' case. mflag .ne. 0
+c indicates that x lies outside the halfopen interval
+c xt(1) .le. y .lt. xt(lxt) . the asymmetric treatment of the
+c interval is due to the decision to make all pp functions cont-
+c inuous from the right.
+c
+c****** m e t h o d ******
+c the program is designed to be efficient in the common situation that
+c it is called repeatedly, with x taken from an increasing or decrea-
+c sing sequence. this will happen, e.g., when a pp function is to be
+c graphed. the first guess for left is therefore taken to be the val-
+c ue returned at the previous call and stored in the l o c a l varia-
+c ble ilo . a first check ascertains that ilo .lt. lxt (this is nec-
+c essary since the present call may have nothing to do with the previ-
+c ous call). then, if xt(ilo) .le. x .lt. xt(ilo+1), we set left =
+c ilo and are done after just three comparisons.
+c otherwise, we repeatedly double the difference istep = ihi - ilo
+c while also moving ilo and ihi in the direction of x , until
+c xt(ilo) .le. x .lt. xt(ihi) ,
+c after which we use bisection to get, in addition, ilo+1 = ihi .
+c left = ilo is then returned.
+c
+ integer left,lxt,mflag, ihi,ilo,istep,middle
+ double precision x,xt(lxt)
+ data ilo /1/
+c save ilo (a valid fortran statement in the new 1977 standard)
+ ihi = ilo + 1
+ if (ihi .lt. lxt) go to 20
+ if (x .ge. xt(lxt)) go to 110
+ if (lxt .le. 1) go to 90
+ ilo = lxt - 1
+ ihi = lxt
+c
+ 20 if (x .ge. xt(ihi)) go to 40
+ if (x .ge. xt(ilo)) go to 100
+c
+c **** now x .lt. xt(ilo) . decrease ilo to capture x .
+ 30 istep = 1
+ 31 ihi = ilo
+ ilo = ihi - istep
+ if (ilo .le. 1) go to 35
+ if (x .ge. xt(ilo)) go to 50
+ istep = istep*2
+ go to 31
+ 35 ilo = 1
+ if (x .lt. xt(1)) go to 90
+ go to 50
+c **** now x .ge. xt(ihi) . increase ihi to capture x .
+ 40 istep = 1
+ 41 ilo = ihi
+ ihi = ilo + istep
+ if (ihi .ge. lxt) go to 45
+ if (x .lt. xt(ihi)) go to 50
+ istep = istep*2
+ go to 41
+ 45 if (x .ge. xt(lxt)) go to 110
+ ihi = lxt
+c
+c **** now xt(ilo) .le. x .lt. xt(ihi) . narrow the interval.
+ 50 middle = (ilo + ihi)/2
+ if (middle .eq. ilo) go to 100
+c note. it is assumed that middle = ilo in case ihi = ilo+1 .
+ if (x .lt. xt(middle)) go to 53
+ ilo = middle
+ go to 50
+ 53 ihi = middle
+ go to 50
+c**** set output and return.
+ 90 mflag = -1
+ left = 1
+ return
+ 100 mflag = 0
+ left = ilo
+ return
+ 110 mflag = 1
+ left = lxt
+ return
+ end
+
+
+c =====================================================================
+c These two subroutines, dpbfa8 and dpbsl8, are called by sslvrg.
+c Note: a rational cholesky version of these functions are available,
+c called vdpbfa7 and vdpbsl7
+c T.Yee 7/10/99
+
+c 1/7/02
+c T.Yee has renamed dbpbfa to dbpbfa8 and dpbsl to dpbsl8, to ensure uniqueness
+
+ subroutine dpbfa8(abd,lda,n,m,info)
+ integer lda,n,m,info
+ double precision abd(lda,1)
+c
+c dpbfa8 factors a double precision symmetric positive definite
+c matrix stored in band form.
+c
+c dpbfa8 is usually called by dpbco, but it can be called
+c directly with a saving in time if rcond is not needed.
+c
+c on entry
+c
+c abd double precision(lda, n)
+c the matrix to be factored. the columns of the upper
+c triangle are stored in the columns of abd and the
+c diagonals of the upper triangle are stored in the
+c rows of abd . see the comments below for details.
+c
+c lda integer
+c the leading dimension of the array abd .
+c lda must be .ge. m + 1 .
+c
+c n integer
+c the order of the matrix a .
+c
+c m integer
+c the number of diagonals above the main diagonal.
+c 0 .le. m .lt. n .
+c
+c on return
+c
+c abd an upper triangular matrix r , stored in band
+c form, so that a = trans(r)*r .
+c
+c info integer
+c = 0 for normal return.
+c = k if the leading minor of order k is not
+c positive definite.
+c
+c band storage
+c
+c if a is a symmetric positive definite band matrix,
+c the following program segment will set up the input.
+c
+c m = (band width above diagonal)
+c do 20 j = 1, n
+c i1 = max0(1, j-m)
+c do 10 i = i1, j
+c k = i-j+m+1
+c abd(k,j) = a(i,j)
+c 10 continue
+c 20 continue
+c
+c linpack. this version dated 08/14/78 .
+c cleve moler, university of new mexico, argonne national lab.
+c
+c subroutines and functions
+c
+c blas ddot
+c fortran max0,dsqrt
+c
+c internal variables
+c
+ double precision ddot8,t
+ double precision s
+ integer ik,j,jk,k,mu
+c begin block with ...exits to 40
+c
+c
+ do 30 j = 1, n
+ info = j
+ s = 0.0d0
+ ik = m + 1
+ jk = max0(j-m,1)
+ mu = max0(m+2-j,1)
+ if (m .lt. mu) go to 20
+ do 10 k = mu, m
+ t = abd(k,j) - ddot8(k-mu,abd(ik,jk),1,abd(mu,j),1)
+ t = t/abd(m+1,jk)
+ abd(k,j) = t
+ s = s + t*t
+ ik = ik - 1
+ jk = jk + 1
+ 10 continue
+ 20 continue
+ s = abd(m+1,j) - s
+c ......exit
+ if (s .le. 0.0d0) go to 40
+ abd(m+1,j) = dsqrt(s)
+ 30 continue
+ info = 0
+ 40 continue
+ return
+ end
+
+ subroutine dpbsl8(abd,lda,n,m,b)
+ integer lda,n,m
+ double precision abd(lda,1),b(1)
+c
+c dpbsl8 solves the double precision symmetric positive definite
+c band system a*x = b
+c using the factors computed by dpbco or dpbfa8.
+c
+c on entry
+c
+c abd double precision(lda, n)
+c the output from dpbco or dpbfa8.
+c
+c lda integer
+c the leading dimension of the array abd .
+c
+c n integer
+c the order of the matrix a .
+c
+c m integer
+c the number of diagonals above the main diagonal.
+c
+c b double precision(n)
+c the right hand side vector.
+c
+c on return
+c
+c b the solution vector x .
+c
+c error condition
+c
+c a division by zero will occur if the input factor contains
+c a zero on the diagonal. technically this indicates
+c singularity but it is usually caused by improper subroutine
+c arguments. it will not occur if the subroutines are called
+c correctly and info .eq. 0 .
+c
+c to compute inverse(a) * c where c is a matrix
+c with p columns
+c call dpbco(abd,lda,n,rcond,z,info)
+c if (rcond is too small .or. info .ne. 0) go to ...
+c do 10 j = 1, p
+c call dpbsl8(abd,lda,n,c(1,j))
+c 10 continue
+c
+c linpack. this version dated 08/14/78 .
+c cleve moler, university of new mexico, argonne national lab.
+c
+c subroutines and functions
+c
+c blas daxpy,ddot
+c fortran min0
+c
+c internal variables
+c
+ double precision ddot8,t
+ integer k,kb,la,lb,lm
+c
+c solve trans(r)*y = b
+c
+ do 10 k = 1, n
+ lm = min0(k-1,m)
+ la = m + 1 - lm
+ lb = k - lm
+ t = ddot8(lm,abd(la,k),1,b(lb),1)
+ b(k) = (b(k) - t)/abd(m+1,k)
+ 10 continue
+c
+c solve r*x = y
+c
+ do 20 kb = 1, n
+ k = n + 1 - kb
+ lm = min0(k-1,m)
+ la = m + 1 - lm
+ lb = k - lm
+ b(k) = b(k)/abd(m+1,k)
+ t = -b(k)
+ call daxpy8(lm,t,abd(la,k),1,b(lb),1)
+ 20 continue
+ return
+ end
+
+
+
+
+
diff --git a/src/gautr.c b/src/gautr.c
new file mode 100644
index 0000000..c4fae28
--- /dev/null
+++ b/src/gautr.c
@@ -0,0 +1,334 @@
+#include "math.h"
+
+/* Frequently used numerical constants: */
+#define OneUponSqrt2Pi .39894228040143267794
+#define twopi 6.283195307179587
+#define LnSqrt2Pi -0.9189385332046727417803296
+#define SQRT2 1.414213562373095049
+#define SQRTPI 1.772453850905516027
+
+/* ---------------------------------------------------------------------------
+
+ UNIVARIATE NORMAL PROBABILITY
+
+ ---------------------------------------------------------------------------*/
+
+#define UPPERLIMIT 20.0
+/* I won't return either of univariate normal density or
+ probability when x < -UPPERLIMIT or x > UPPERLIMIT. */
+
+#define P10 242.66795523053175
+#define P11 21.979261618294152
+#define P12 6.9963834886191355
+#define P13 -.035609843701815385
+#define Q10 215.05887586986120
+#define Q11 91.164905404514901
+#define Q12 15.082797630407787
+#define Q13 1.0
+
+#define P20 300.4592610201616005
+#define P21 451.9189537118729422
+#define P22 339.3208167343436870
+#define P23 152.9892850469404039
+#define P24 43.16222722205673530
+#define P25 7.211758250883093659
+#define P26 .5641955174789739711
+#define P27 -.0000001368648573827167067
+#define Q20 300.4592609569832933
+#define Q21 790.9509253278980272
+#define Q22 931.3540948506096211
+#define Q23 638.9802644656311665
+#define Q24 277.5854447439876434
+#define Q25 77.00015293522947295
+#define Q26 12.78272731962942351
+#define Q27 1.0
+
+#define P30 -.00299610707703542174
+#define P31 -.0494730910623250734
+#define P32 -.226956593539686930
+#define P33 -.278661308609647788
+#define P34 -.0223192459734184686
+#define Q30 .0106209230528467918
+#define Q31 .191308926107829841
+#define Q32 1.05167510706793207
+#define Q33 1.98733201817135256
+#define Q34 1.0
+
+double pnorm1(double x)
+{
+ int sn;
+ double R1, R2, y, y2, y3, y4, y5, y6, y7;
+ double erf, erfc, z, z2, z3, z4;
+ double phi;
+
+ if (x < -UPPERLIMIT) return 0.0;
+ if (x > UPPERLIMIT) return 1.0;
+
+ y = x / SQRT2;
+ if (y < 0)
+ {
+ y = -y;
+ sn = -1;
+ }
+ else
+ sn = 1;
+
+ y2 = y * y;
+ y4 = y2 * y2;
+ y6 = y4 * y2;
+
+ if(y < 0.46875)
+ {
+ R1 = P10 + P11 * y2 + P12 * y4 + P13 * y6;
+ R2 = Q10 + Q11 * y2 + Q12 * y4 + Q13 * y6;
+ erf = y * R1 / R2;
+ if (sn == 1)
+ phi = 0.5 + 0.5*erf;
+ else
+ phi = 0.5 - 0.5*erf;
+ }
+ else
+ if (y < 4.0)
+ {
+ y3 = y2 * y;
+ y5 = y4 * y;
+ y7 = y6 * y;
+ R1 = P20 + P21 * y + P22 * y2 + P23 * y3 +
+ P24 * y4 + P25 * y5 + P26 * y6 + P27 * y7;
+ R2 = Q20 + Q21 * y + Q22 * y2 + Q23 * y3 +
+ Q24 * y4 + Q25 * y5 + Q26 * y6 + Q27 * y7;
+ erfc = exp(-y2) * R1 / R2;
+ if (sn == 1)
+ phi = 1.0 - 0.5*erfc;
+ else
+ phi = 0.5*erfc;
+ }
+ else
+ {
+ z = y4;
+ z2 = z * z;
+ z3 = z2 * z;
+ z4 = z2 * z2;
+ R1 = P30 + P31 * z + P32 * z2 + P33 * z3 + P34 * z4;
+ R2 = Q30 + Q31 * z + Q32 * z2 + Q33 * z3 + Q34 * z4;
+ erfc = (exp(-y2)/y) * (1.0 / SQRTPI + R1 / (R2 * y2));
+ if (sn == 1)
+ phi = 1.0 - 0.5*erfc;
+ else
+ phi = 0.5*erfc;
+ }
+
+ return phi;
+}
+
+
+
+/* ---------------------------------------------------------------------------
+
+ UNIVARIATE NORMAL DENSITY
+
+ ---------------------------------------------------------------------------*/
+
+double dnorm1(double x)
+{
+ if (x < -UPPERLIMIT) return 0.0;
+ if (x > UPPERLIMIT) return 0.0;
+ return OneUponSqrt2Pi * exp(-0.5 * x * x);
+}
+
+
+
+/* ---------------------------------------------------------------------------
+
+ LN OF UNIVARIATE NORMAL DENSITY
+
+ ---------------------------------------------------------------------------*/
+
+double lndnorm1(double x)
+{
+ return LnSqrt2Pi - (0.5*x*x);
+}
+
+
+
+/*---------------------------------------------------------------------------
+
+ BIVARIATE NORMAL PROBABILITY
+
+ ---------------------------------------------------------------------------*/
+
+#define con (twopi / 2.0) * 10.0e-10
+
+double bivnor(double ah, double ak, double r)
+{
+/*
+ based on alg 462 comm. acm oct 73
+ gives the probability that a bivariate normal exceeds (ah,ak).
+ gh and gk are .5 times the right tail areas of ah, ak under a n(0,1)
+
+ Tranlated from FORTRAN to ratfor using struct; from ratfor to C by hand.
+*/
+ double a2, ap, b, cn, conex, ex, g2, gh, gk, gw, h2, h4, rr, s1, s2,
+ sgn, sn, sp, sqr, t, temp, w2, wh, wk;
+ int is;
+
+ temp = -ah;
+ gh = pnorm1(temp);
+ gh = gh / 2.0;
+ temp = -ak;
+ gk = pnorm1(temp);
+ gk = gk / 2.0;
+
+ b = 0;
+
+ if (r==0)
+ b = 4*gh*gk;
+ else {
+ rr = 1-r*r;
+ if (rr<0)
+ return 0; /* zz; 29/6/02; was originally return; not sure */
+ if (rr!=0) {
+ sqr = sqrt(rr);
+ if (ah!=0) {
+ b = gh;
+ if (ah*ak<0)
+ b = b-.5;
+ else if (ah*ak==0)
+ goto label10;
+ }
+ else if (ak==0) {
+ b = atan(r/sqr)/twopi+.25;
+ goto label50;
+ }
+ b = b+gk;
+ if (ah==0)
+ goto label20;
+label10:
+ wh = -ah;
+ wk = (ak/ah-r)/sqr;
+ gw = 2*gh;
+ is = -1;
+ goto label30;
+label20:
+ do {
+ wh = -ak;
+ wk = (ah/ak-r)/sqr;
+ gw = 2*gk;
+ is = 1;
+label30:
+ sgn = -1;
+ t = 0;
+ if (wk!=0) {
+ if (fabs(wk)>=1) {
+ /* this brace added 28/6/02 by tyee */
+ if (fabs(wk)==1) {
+ t = wk*gw*(1-gw)/2;
+ goto label40;
+ }
+ else {
+ sgn = -sgn;
+ wh = wh*wk;
+ g2 = pnorm1(wh);
+ wk = 1/wk;
+ if (wk<0)
+ b = b+.5;
+ b = b-(gw+g2)/2+gw*g2;
+ }
+ }
+ h2 = wh*wh;
+ a2 = wk*wk;
+ h4 = h2*.5;
+ ex = 0;
+ if (h4<150.0)
+ ex = exp(-h4);
+ w2 = h4*ex;
+ ap = 1;
+ s2 = ap-ex;
+ sp = ap;
+ s1 = 0;
+ sn = s1;
+ conex = fabs(con/wk);
+ do {
+ cn = ap*s2/(sn+sp);
+ s1 = s1+cn;
+ if (fabs(cn)<=conex)
+ break;
+ sn = sp;
+ sp = sp+1;
+ s2 = s2-w2;
+ w2 = w2*h4/sp;
+ ap = -ap*a2;
+ } while (1);
+ t = (atan(wk)-wk*s1)/twopi;
+label40:
+ b = b+sgn*t;
+ }
+ if (is>=0)
+ break;
+ } while(ak!=0);
+ }
+ else if (r>=0)
+ if (ah>=ak)
+ b = 2*gh;
+ else
+ b = 2*gk;
+ else if (ah+ak<0)
+ b = 2*(gh+gk)-1;
+ }
+label50:
+ if (b<0)
+ b = 0;
+ if (b>1)
+ b = 1;
+
+ return(b);
+}
+
+/* in the following function
+size measures the dimension of x
+singler == 1 if r is a scalar; otherwise r is same size as x & y
+*/
+
+/* This is called by S */
+
+void pnorm2(double *x, double *y, double *r,
+ int *size, int *singler, double *ans)
+{
+ int i;
+
+ if(*singler == 1)
+ {
+ for(i = 0; i < *size; i++)
+ ans[i] = bivnor(x[i], y[i], *r);
+ }
+ else
+ {
+ for(i = 0; i < *size; i++)
+ ans[i] = bivnor(x[i], y[i], r[i]);
+ }
+}
+
+
+
+/*
+main()
+{
+ int i;
+ double x,y,r;
+
+ x = 0.0;
+ y = 0.0;
+
+ for(i = -9; i<=9; i++)
+ {
+ r = i / 10.0;
+ printf("%10.2f %10.6f \n",r,bivnor(x,y,r));
+
+ }
+
+}
+*/
+
+
+
+
diff --git a/src/lerchphi.c b/src/lerchphi.c
new file mode 100644
index 0000000..6aabbb2
--- /dev/null
+++ b/src/lerchphi.c
@@ -0,0 +1,457 @@
+/*
+
+-------------------------------
+Lerch's transcendent Phi(z,s,v)
+-------------------------------
+
+This program is copyright by
+
+Sergej V. Aksenov (http://www.geocities.com/saksenov) and
+Ulrich D. Jentschura (jentschura at physik.tu-dresden.de), 2002.
+
+Version 1.00 (May 1, 2002)
+
+Calling sequence:
+
+int lerchphi(double *z, double *s, double *v, double *acc,
+ double *result, int *iter)
+
+calculates Lerch's Phi transcendent Phi(z,s,v) with *result to a specified
+accuracy *acc after *iter iterations. Double precision is used throughout the
+calculation.
+The program uses direct summation of the defining series for |z| <= 0.5
+and CNCT for 0.5 < |z| < 1.0.
+The integer return code has to be interpreted as follows.
+
+-------------
+Return codes:
+-------------
+
+0 - Normal termination.
+1 - Lerch Phi diverges for 1 <= |z|.
+2 - Lerch Phi is not defined for integer v <= 0.
+3 - pow() is not defined for v < 0 and s not integer.
+4 - Long integer overflow in aj1234().
+5 - Underflow in remainder estimate omega in lerchphi().
+6 - No convergence within the maximum number of iterations.
+
+
+Implementation note:
+
+In subroutine aj1234(), defining variables ind and two2k as type double
+instead of long int might eliminate overflow error which occurs for
+high indices (error code 4).
+
+*/
+
+#include <stdlib.h>
+#include <math.h>
+#include <float.h>
+
+#define macheps DBL_EPSILON
+#define machmin DBL_MIN
+
+/* If preprocessor macro ADD_UNDERSCORE was defined, add underscore to
+the function name --- needed for linking to Fortran programs on a Sun. */
+
+#if (ADD_UNDERSCORE)
+#define lerchphi lerchphi_
+#endif
+
+/* Function that computes van Wijngaarden's A_j for a given j. */
+
+static int aj1234(double *z, double *s, double *v, int j, double *acc, double *res)
+ {
+
+ double sum, bjk, z2ind;
+ int k, flag;
+ unsigned long int ind, two2k;
+
+ sum = bjk = 0.0;
+ k = -1;
+ two2k = 1;
+ flag = 0;
+
+ /* Sum b^j_k's over k. */
+
+ for (;;)
+ {
+
+ k++;
+
+ /* Index for the term of the original series. */
+
+ if (k > 0) two2k *= 2;
+ ind = two2k * (j + 1) - 1;
+
+ /* If long integer overflow occurs, variables become zero.
+ Not relevant in v1.0 because two2k and ind are double type. */
+
+ if (k > 0 && (two2k == 0 || ind == 0))
+ {
+ flag = 4;
+ break;
+ }
+
+ /* Increment the sum. */
+
+ z2ind = pow(*z, ind);
+ bjk = two2k * z2ind / pow(*v + ind, *s);
+ sum += bjk;
+
+ /* Stop summation if either sum is zero or
+ |term/sum| is below requested accuracy. */
+
+ if (fabs(sum) <= machmin || fabs(bjk/sum) < 1.0e-2 * (*acc)) break;
+
+ }
+
+ *res = sum;
+ return flag;
+
+ }
+
+/* Function that computes approximation to Lerch Phi as a
+ converging sequence of CNC transforms S^n_k. */
+
+int lerchphi(double *z, double *s, double *v, double *acc,
+ double *result, int *iter)
+ {
+
+ const unsigned short int beta = 1, n = 0, imax = 100;
+ unsigned short int j, m;
+ int i, sign, flag;
+ double v1, sn, eps0, eps, skn, skn0, omega, *num, *den, *StoreAj,
+ factor, factor1, x, est, iom, sum1, cacc;
+
+ /* Local copy of v. */
+
+ v1 = *v;
+
+ /* Special cases. */
+
+ /* 1 <= |z|.
+ (Return error, Lerch Phi diverges.) */
+
+ if (1.0 <= fabs(*z))
+ {
+ *result = 1.0;
+ *iter = 0;
+ return 1;
+ }
+
+ /* v <= 0 is integer.
+ (Return error, Lerch Phi is not defined.) */
+
+ if (fabs(floor(*v) - *v) <= macheps*fabs(*v) && *v <= 0.0)
+ {
+ *result = 1.0;
+ *iter = 0;
+ return 2;
+ }
+
+ /* v < 0 is not integer or zero and z != 0 (z == 0 considered below) ... */
+
+ if (*v < 0.0 && fabs(*z) > machmin)
+ {
+
+ /* s is not an integer.
+ (Return error because pow() is not defined.) */
+
+ if (fabs(floor(*s) - *s) > macheps*fabs(*s))
+ {
+ *result = 1.0;
+ *iter = 0;
+ return 3;
+ }
+
+ /* s is an integer.
+ (Transform v to positive). */
+
+ else
+ {
+ m = - (int) floor(*v);
+ v1 += m;
+ sum1 = 0.0;
+ if ((int) *s % 2 == 0) sign = 1;
+ else sign = -1;
+ for (i = 0; i <= m-1; i++)
+ {
+ if ((i > 0) && (*z < 0)) sign = -sign;
+ sum1 += sign*pow(fabs(*z),i)/pow(fabs(*v+i),*s);
+ }
+ }
+ }
+
+ /* z = 0 and ... */
+
+ if (fabs(*z) <= machmin)
+ {
+
+ /* ... v < 0 is not integer or zero and ... */
+
+ if (*v < 0)
+ {
+
+ /* s is not an integer.
+ (Return error because pow() is not defined.) */
+
+ if (fabs(floor(*s) - *s) > macheps*fabs(*s))
+ {
+ *result = 1.0;
+ *iter = 0;
+ return 3;
+ }
+
+ /* s is an integer.
+ (Return first term of series.)*/
+
+ else
+ {
+ if ((int) *s % 2 == 0) sign = 1;
+ else sign = -1;
+ *result = sign * 1.0 / pow(fabs(*v), *s);
+ }
+ }
+
+ /* ... v > 0.
+ (Return first term of series.) */
+
+ else
+ {
+ *result = 1.0 / pow(*v, *s);
+ *iter = 1;
+ return 0;
+ }
+ }
+
+ /* General case. */
+
+ /* Some initializations. */
+
+ /* sn denotes current partial sum of defining series:
+ z > 0.5: sn is partial sum S_n of the van
+ Wijngaarden transformed series.
+ z <= 0.5: sn is the partial sum of the
+ power series defining LerchPhi.
+ skn0 and skn denote successive partial sums S^k_n
+ that are same as sn in case of direct summation and
+ delta-transformed in case of CNCT.
+ eps0 and eps denote successive differences between
+ partial sums S^k_n. */
+
+ eps0 = skn = skn0 = sn = 0.0;
+
+ /* omega is next term of a partial sum (of defining power series
+ for direct summation, of van Wijngaarden transformed series
+ for CNCT) and also becomes a remainder estimate in the delta
+ transformation in CNCT). */
+
+ /* For z <= 0.5 van Wijngaarden transformation is not used
+ [hence no calls to aj1234()]. */
+
+ /* Direct summation and CNCT (z < -0.5) case. */
+
+ if (*z <= 0.5)
+ omega = 1.0 / pow(v1, *s);
+
+ /* CNCT (z > 0.5) case. */
+
+ else
+ {
+ flag = aj1234(z, s, &v1, 0, acc, &omega);
+ if (flag)
+ {
+ *result = 1.0;
+ *iter = 0;
+ return flag;
+ }
+ }
+
+ /* Allocate memory for working arrays. */
+
+ num = (double *) malloc(imax * sizeof(double));
+ den = (double *) malloc(imax * sizeof(double));
+ /* StoreAj is used only in CNCT */
+ if (*z > 0.5) StoreAj = (double *) malloc(imax * sizeof(double));
+
+ flag = 0;
+ i = -1;
+ sign = -1;
+
+ /* Main loop: iterations for S^k_n. */
+
+ for (;;)
+ {
+
+ /* i points to current iterate. */
+
+ i++;
+
+ /* Increment the sum. */
+
+ sign = -sign;
+ sn += omega;
+
+ /* Next term: omega. */
+
+ if (*z < 0.0) /* Direct summation and CNCT (z < -0.5) case. */
+ /* Recurrence for power series. */
+ omega = (*z) * pow((v1+i)/(v1+i+1), *s) * omega;
+ else /* z > 0 */
+ {
+ if (*z <= 0.5) /* "Direct summation". */
+ omega = (*z) * pow((v1+i)/(v1+i+1), *s) * omega;
+ else /* CNCT (z > 0.5) case. */
+ {
+ *(StoreAj+i) = sign * omega;
+ if (i % 2 == 0)
+ /* Recurrence for odd pointer i. */
+ {omega = -sign * 0.5 * (*(StoreAj+i/2) - pow(*z, i/2) /
+ pow(v1+i/2, *s));}
+ else
+ {
+ flag = aj1234(z, s, &v1, i+1, acc, &omega);
+ if (flag) break;
+ else omega = -sign * omega;
+ }
+ }
+ }
+
+ /* Direct summation case: store current sum and remainder estimate. */
+
+ if (fabs(*z) <= 0.5)
+ {
+ skn = sn;
+ est = 2.0 * pow(fabs(*z), (i+1)) / pow(v1+i+1, *s);
+ }
+
+ /* CNCT case. */
+
+ else
+ {
+
+ /* Make sure omega is representable machine number. */
+
+ if (fabs(omega) <= machmin)
+ {
+ flag = 5;
+ break;
+ }
+ else iom = 1.0 / omega;
+
+ /* Last terms in sums of numerator and denominator of
+ i-th partial sum. */
+
+ *(num+i) = sn * iom;
+ *(den+i) = iom;
+
+ /* Recurrence computation of numerator and
+ denominator of a S_k^n. */
+
+ if (i > 0)
+ {
+ factor = 1.0;
+ *(num+i-1) = *(num+i) - factor * (*(num+i-1));
+ *(den+i-1) = *(den+i) - factor * (*(den+i-1));
+ }
+
+ factor1 = (double) (beta+n+i-1) * (beta+n+i-2);
+ for(j = 2; j <= i; j++)
+ {
+ factor = factor1 / (beta+n+i+j-2) / (beta+n+i+j-3);
+ *(num+i-j) = *(num+i-j+1) - factor * (*(num+i-j));
+ *(den+i-j) = *(den+i-j+1) - factor * (*(den+i-j));
+ }
+
+ /* Current approximation of the sum S_k^n. */
+
+ skn = *num / *den;
+
+ } /* else CNCT case. */
+
+ eps = fabs(skn - skn0);
+
+ /* Check the three termination criteria. */
+
+ /* |est/skn| is less than the requested accuracy
+ (est is a remainder estimate). */
+
+ if (i > 0 && eps < eps0)
+ {
+ if (fabs(*z) > 0.5)
+ {
+ x = eps/eps0;
+ est = 2.0/x/(1.0-x)*eps;
+ }
+ cacc = fabs(est/skn);
+ if (cacc < (*acc)) break;
+ }
+
+ /* Successive iterates skn are the same. */
+
+ if (eps <= 0.0) break;
+
+ /* Maximum number of iterations is exceeded. */
+
+ if (i > imax-2)
+ {
+ flag = 6;
+ break;
+ }
+
+ /* Go on to the next iteration. */
+
+ skn0 = skn;
+ eps0 = eps;
+
+ } /* for */
+
+ /* Store the resulting sum. */
+
+ if (*v < 0)
+ {
+ sign = 1;
+ if ((*z < 0) && (m % 2 != 0)) sign = -1;
+ *result = sum1 + skn * sign * pow(fabs(*z),m);
+ }
+ else *result = skn;
+
+ /* Store the number of iterations. */
+
+ *iter = i + 1;
+
+ /* Clean up. */
+
+ free(num);
+ free(den);
+ if (*z > 0.5) free(StoreAj);
+
+ return flag;
+
+ }
+
+#undef macheps
+#undef machmin
+
+
+
+
+
+/* Code below written by T. Yee 14/6/06; is a wrapper function */
+
+void lerchphi123(int *err, int *L,
+ double *z, double *s, double *v, double *acc,
+ double *result, int *iter)
+{
+ int ell;
+
+ for(ell = 0; ell < *L; ell++) {
+ err[ell] = lerchphi(z+ell, s+ell, v+ell, acc,
+ result+ell, iter);
+ }
+}
+
+
+
+
diff --git a/src/lms.f b/src/lms.f
new file mode 100644
index 0000000..e2e600b
--- /dev/null
+++ b/src/lms.f
@@ -0,0 +1,194 @@
+ subroutine dpdlyjn(psi, dwgkz6, sfnva0, fpqrt7, g8jieq, ghry8z)
+ implicit logical (a-z)
+ integer g8jieq
+ double precision psi, dwgkz6, sfnva0, fpqrt7, ghry8z(3)
+ integer one, zl9udh
+ double precision aa, bb, ig5cma, fiumb4
+ logical cc, pos
+ one = 1
+ zl9udh = 0
+ fiumb4 = 1.0d-04
+ cc = (psi .ge. 0.0d0)
+ if(.not.(cc))goto 23000
+ bb = dwgkz6
+ pos = (dabs(dwgkz6) .le. fiumb4)
+ goto 23001
+23000 continue
+ bb = -2.0d0 + dwgkz6
+ pos = (dabs(dwgkz6-2.0d0) .le. fiumb4)
+23001 continue
+ aa = 1.0d0 + psi * bb
+ if(.not.(g8jieq .ge. 0))goto 23002
+ if(.not.(pos))goto 23004
+ ghry8z(1) = psi
+ goto 23005
+23004 continue
+ ghry8z(1) = aa / bb
+23005 continue
+23002 continue
+ if(.not.(g8jieq .ge. 1))goto 23006
+ if(.not.(pos))goto 23008
+ ghry8z(2) = (ghry8z(1)**2) / 2
+ goto 23009
+23008 continue
+ ig5cma = ghry8z(1)
+ ghry8z(2) = (aa * (dlog(aa)/bb) - ig5cma) / bb
+23009 continue
+23006 continue
+ if(.not.(g8jieq .ge. 2))goto 23010
+ if(.not.(pos))goto 23012
+ ghry8z(3) = (ghry8z(1)**3) / 3
+ goto 23013
+23012 continue
+ ig5cma = ghry8z(2) * 2.0d0
+ ghry8z(3) = (aa * (dlog(aa)/bb) ** 2 - ig5cma) / bb
+23013 continue
+23010 continue
+ return
+ end
+ subroutine gleg11(hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg,
+ &ghry8z)
+ implicit logical (a-z)
+ integer ws5jcg
+ double precision hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp(4), ghry8z
+ integer one, two, ynmzp6
+ double precision psi, hc0tub, xkwp2m(3), dq3rxy
+ ynmzp6 = 3
+ two = 2
+ one = 1
+ dq3rxy = 1.41421356237309515d0
+ if(.not.(ws5jcg .gt. 0))goto 23014
+ ghry8z = uvf4mp(4) * (uvf4mp(2)**2 + dq3rxy * fpqrt7 * hr83e *
+ &uvf4mp(3))
+ goto 23015
+23014 continue
+ hc0tub = 0.564189583547756279d0
+ psi = sfnva0 + dq3rxy * fpqrt7 * hr83e
+ call dpdlyjn(psi, dwgkz6, sfnva0, fpqrt7, two, xkwp2m)
+ ghry8z = (dexp(-hr83e*hr83e) * hc0tub) * (xkwp2m(2)**2 + (psi -
+ &sfnva0) * xkwp2m(3)) / fpqrt7**2
+23015 continue
+ return
+ end
+ subroutine zuqx1p(hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg,
+ &ghry8z)
+ implicit logical (a-z)
+ integer ws5jcg
+ double precision hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp(4), ghry8z
+ integer one, two
+ double precision psi, hc0tub, mw6reg(2), dq3rxy
+ two = 2
+ one = 1
+ if(.not.(ws5jcg .gt. 0))goto 23016
+ ghry8z = uvf4mp(4) * (-uvf4mp(2))
+ goto 23017
+23016 continue
+ hc0tub = 0.564189583547756279d0
+ dq3rxy = 1.41421356237309515d0
+ psi = sfnva0 + dq3rxy * fpqrt7 * hr83e
+ call dpdlyjn(psi, dwgkz6, sfnva0, fpqrt7, one, mw6reg)
+ ghry8z = (dexp(-hr83e*hr83e) * hc0tub) * (-mw6reg(2)) / fpqrt7**2
+23017 continue
+ return
+ end
+ subroutine gleg13(hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg,
+ &ghry8z)
+ implicit logical (a-z)
+ integer ws5jcg
+ double precision hr83e, dwgkz6, sfnva0, fpqrt7, uvf4mp(4), ghry8z
+ integer one, two
+ double precision psi, oaqng6, mw6reg(2), dq3rxy
+ two = 2
+ one = 1
+ if(.not.(ws5jcg .gt. 0))goto 23018
+ ghry8z = uvf4mp(4) * (-uvf4mp(2)) * dsqrt(8.0d0) * hr83e
+ goto 23019
+23018 continue
+ oaqng6 = -1.12837916709551256d0
+ dq3rxy = 1.41421356237309515d0
+ psi = sfnva0 + dq3rxy * fpqrt7 * hr83e
+ call dpdlyjn(psi, dwgkz6, sfnva0, fpqrt7, one, mw6reg)
+ ghry8z = dexp(-hr83e*hr83e) * oaqng6 * mw6reg(2) * (psi - sfnva0)
+ &/ fpqrt7**3
+23019 continue
+ return
+ end
+ subroutine rnvz5t(r7zvis, bd8olv, wts, oqie8v, dwgkz6, sfnva0,
+ &fpqrt7, kk, ghry8z, nepms8)
+ implicit logical (a-z)
+ integer kk, nepms8
+ double precision r7zvis, bd8olv, wts(kk), oqie8v(kk), ghry8z,
+ &dwgkz6, sfnva0, fpqrt7
+ integer nd6mep, ws5jcg
+ double precision atx, tns0gf, dy3ljx, uvf4mp(4), byn1gh, k8ousd
+ ws5jcg = 0
+ byn1gh = 0.50d0 * (r7zvis + bd8olv)
+ k8ousd = 0.50d0 * (bd8olv - r7zvis)
+ tns0gf = 0.0d0
+ if(.not.(nepms8 .eq. 1))goto 23020
+ do 23022 nd6mep=1,kk
+ atx = byn1gh + k8ousd * oqie8v(nd6mep)
+ call gleg11(atx, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg, dy3ljx)
+ tns0gf = tns0gf + dy3ljx * wts(nd6mep)
+23022 continue
+ goto 23021
+23020 continue
+ if(.not.(nepms8 .eq. 2))goto 23024
+ do 23026 nd6mep=1,kk
+ atx = byn1gh + k8ousd * oqie8v(nd6mep)
+ call zuqx1p(atx, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg, dy3ljx)
+ tns0gf = tns0gf + dy3ljx * wts(nd6mep)
+23026 continue
+ goto 23025
+23024 continue
+ if(.not.(nepms8 .eq. 3))goto 23028
+ do 23030 nd6mep=1,kk
+ atx = byn1gh + k8ousd * oqie8v(nd6mep)
+ call gleg13(atx, dwgkz6, sfnva0, fpqrt7, uvf4mp, ws5jcg, dy3ljx)
+ tns0gf = tns0gf + dy3ljx * wts(nd6mep)
+23030 continue
+23028 continue
+23025 continue
+23021 continue
+ ghry8z = ghry8z + k8ousd * tns0gf
+ return
+ end
+ subroutine yjngintf(r7zvis, bd8olv, oqie8v, wts, nfiumb4, kk,
+ &dwgkz6, sfnva0, fpqrt7, ghry8z, kqoy6w)
+ implicit logical (a-z)
+ integer nfiumb4, kk
+ double precision r7zvis(nfiumb4), bd8olv(nfiumb4), wts(kk),
+ &oqie8v(kk), dwgkz6(nfiumb4), sfnva0(nfiumb4), fpqrt7(nfiumb4),
+ &ghry8z(3,nfiumb4), kqoy6w
+ integer w3gohz, p1rifj, nd6mep, o2yadh, btip7u, epx9jf, nepms8,
+ &one, two
+ double precision mu4ygk, azgts7, xmr7cj
+ one = 1
+ two = 2
+ o2yadh = 12
+ do 23032 w3gohz = 1,nfiumb4
+ do 23034 nepms8=1,3
+ azgts7 = -10.0d0
+ do 23036 p1rifj=2,o2yadh
+ btip7u = 2 ** p1rifj
+ mu4ygk = (bd8olv(w3gohz) - r7zvis(w3gohz)) / btip7u
+ ghry8z(nepms8,w3gohz) = 0.0d0
+ do 23038 nd6mep=1,btip7u
+ call rnvz5t(r7zvis(w3gohz)+(nd6mep-1)*mu4ygk, r7zvis(w3gohz)+
+ &nd6mep*mu4ygk, wts, oqie8v, dwgkz6(w3gohz), sfnva0(w3gohz),
+ &fpqrt7(w3gohz), kk, ghry8z(nepms8,w3gohz), nepms8)
+23038 continue
+ xmr7cj = dabs(ghry8z(nepms8,w3gohz) - azgts7) / (1.0d0 + dabs(
+ &ghry8z(nepms8,w3gohz)))
+ if(.not.(xmr7cj .lt. kqoy6w))goto 23040
+ goto 234
+ goto 23041
+23040 continue
+ azgts7 = ghry8z(nepms8,w3gohz)
+23041 continue
+23036 continue
+234 epx9jf = 0
+23034 continue
+23032 continue
+ return
+ end
diff --git a/src/muxr.c b/src/muxr.c
new file mode 100644
index 0000000..57eec56
--- /dev/null
+++ b/src/muxr.c
@@ -0,0 +1,453 @@
+/*
+This code is
+Copyright (C) 1998-2005 T. W. Yee, University of Auckland. All rights reserved.
+*/
+
+
+#include <stdio.h>
+#include <math.h>
+
+
+void vdec(int *row_index, int *col_index, int *dimm)
+{
+ int i;
+
+ for(i = 0; i < *dimm; i++) {
+ row_index[i] -= 1;
+ col_index[i] -= 1;
+ }
+}
+
+
+void m2a(double *m, double *a, int *dimm, int *row_index,
+ int *col_index, int *n, int *M, int *upper)
+{
+ int i, k, MM = *M * *M, MMn = *M * *M * *n;
+
+ if(*upper == 1 || *dimm != *M * (*M + 1) / 2)
+ for(k = 0; k < MMn; k++)
+ a[k] = 0.0;
+
+ for(k = 0; k < *n; k++)
+ {
+ for(i = 0; i < *dimm; i++)
+ {
+ a[row_index[i] + col_index[i] * *M] = m[i];
+ if(*upper == 0)
+ a[col_index[i] + row_index[i] * *M] = m[i];
+ }
+ a += MM;
+ m += *dimm;
+ }
+}
+
+
+void a2m(double *a, double *m, int *dimm, int *row_index,
+ int *col_index, int *n, int *M)
+{
+ int i, k, MM= *M * *M;
+
+ for(k = 0; k < *n; k++)
+ {
+ for(i = 0; i < *dimm; i++)
+ m[i] = a[row_index[i] + col_index[i] * *M];
+ a += MM;
+ m += *dimm;
+ }
+}
+
+
+
+
+void mux2(double *cc, double *ymat,
+ double *ans, int *p, int *n, int *M)
+{
+ double s;
+ int i, j, t, Mp = *M * *p;
+
+ for(i = 0; i < *n; i++)
+ {
+ for(j = 0; j < *M; j++)
+ {
+ s = 0.0;
+ for(t = 0; t < *p; t++)
+ s += cc[j + t * *M] * ymat[t];
+ *ans++ = s;
+ }
+ ymat += *p;
+ cc += Mp;
+ }
+}
+
+
+
+void mux22(double *cc, double *ymat, double *ans, int *dimm,
+ int *row_index, int *col_index,
+ int *n, int *M, double *wk, int *upper)
+{
+ double s;
+ int j, t, k, one = 1, lower;
+
+ vdec(row_index, col_index, dimm);
+ for(k = 0; k < *n; k++)
+ {
+ m2a(cc, wk, dimm, row_index, col_index, &one, M, upper);
+
+ for(j = 0; j < *M; j++)
+ {
+ s = 0.0;
+ lower = *upper == 0 ? 0 : j;
+ for(t = lower; t < *M; t++)
+ s += wk[j + t * *M] * ymat[t];
+ *ans++ = s;
+ }
+ ymat += *M;
+ cc += *dimm;
+ }
+}
+
+
+void mux5(double *cc, double *x,
+ double *ans, int *M, int *n, int *r,
+ int *dimm,
+ int *dimr,
+ int *matrix,
+ double *wk, double *wk2,
+ int *row_index_M, int *col_index_M,
+ int *row_index_r, int *col_index_r)
+{
+ double s, *pd, *pd2;
+ int i, j, k, t, Mr = *M * *r, rr = *r * *r, MM = *M * *M, u,
+ jM, jr, kM, kr, one=1, upper=0;
+
+ if(*matrix == 1)
+ {
+ vdec(row_index_M, col_index_M, dimm);
+ vdec(row_index_r, col_index_r, dimr);
+ pd = wk;
+ pd2 = wk2;
+ } else {
+/* Commented out on 2/5/06. Need to fix this up more cleanly.
+ printf("Error: can only handle matrix.arg == 1\n");
+ exit(-1);
+*/
+ }
+
+ for(i = 0; i < *n; i++)
+ {
+ if(*matrix == 1)
+ m2a(cc, pd, dimm, row_index_M, col_index_M, &one, M, &upper);
+ else
+ {
+ pd = cc;
+ pd2 = ans;
+ }
+
+ for(j = 0; j < *r; j++)
+ {
+ jM = j * *M;
+ jr = j * *r;
+ for(k = j; k < *r; k++)
+ {
+ kM = k * *M;
+ kr = k * *r;
+ s = 0.0;
+ for(t = 0; t < *M; t++)
+ for(u = 0; u < *M; u++)
+ s += x[t + jM] * pd[t + u * *M] * x[u + kM];
+ pd2[j + kr] =
+ pd2[k + jr] = s;
+ }
+
+ }
+
+ if(*matrix == 1)
+ a2m(pd2, ans, dimr, row_index_r, col_index_r, &one, r);
+
+ cc += (*matrix == 1 ? *dimm : MM);
+ x += Mr;
+ ans += (*matrix == 1 ? *dimr : rr);
+ }
+}
+
+
+
+void mux55(double *evects, double *evals, double *ans, double *wk,
+ double *wk2, int *row_index, int *col_index,
+ int *M, int *n)
+{
+ double *pd, *pd2, t;
+ int i, j, k, s, MM = *M * *M, one=1,
+ MM12 = *M * (*M + 1)/2;
+
+ vdec(row_index, col_index, &MM12);
+
+ for(i = 0; i < *n; i++)
+ {
+ pd = evects;
+ pd2 = wk2;
+ for(j = 0; j < *M; j++)
+ for(k = 0; k < *M; k++)
+ *pd2++ = *pd++ * evals[j];
+
+ for(j = 0; j < *M; j++)
+ for(k = j; k < *M; k++)
+ {
+ t = 0.0;
+ for(s = 0; s < *M; s++)
+ t += wk2[j + s * *M] * evects[k + s * *M];
+ wk[j + k * *M] =
+ wk[k + j * *M] = t;
+ }
+
+ a2m(wk, ans, &MM12, row_index, col_index, &one, M);
+
+ ans += MM12;
+ evals += *M;
+ evects += MM;
+ }
+}
+
+
+
+
+
+void mux7(double *cc, double *x,
+ double *ans, int *M, int *q, int *n, int *r)
+{
+ double s;
+ int i, j, k, t, Mq = *M * *q, qr = *q * *r, Mr = *M * *r,
+ kq, kM;
+
+ for(i = 0; i < *n; i++)
+ {
+ for(j = 0; j < *M; j++)
+ {
+ for(k = 0; k < *r; k++)
+ {
+ kq = k * *q;
+ kM = k * *M;
+ s = 0.0;
+ for(t = 0; t < *q; t++)
+ s += cc[j + t * *M] * x[t + kq];
+ ans[j + kM] = s;
+ }
+ }
+ cc += Mq;
+ ans += Mr;
+ x += qr;
+ }
+}
+
+
+
+void mux111(double *cc, double *txmat, int *M, int *R, int *n,
+ double *wk, double *wk2, int *row_index, int *col_index,
+ int *dimm, int *upper)
+{
+ double s, *pd2;
+ int i, j, k, t, MM = *M * *M, MR = *M * *R, lower;
+
+ vdec(row_index, col_index, dimm);
+
+ for(i = 0; i < MM; i++)
+ wk[i] = 0.0;
+
+ for(t = 0; t < *n; t++)
+ {
+ for(i = 0; i < *dimm; i++)
+ {
+ if(*upper == 0)
+ wk[row_index[i] + col_index[i] * *M] =
+ wk[col_index[i] + row_index[i] * *M] = *cc++;
+ else
+ wk[row_index[i] + col_index[i] * *M] = *cc++;
+ }
+
+ pd2 = txmat;
+ for(i = 0; i < *M; i++)
+ for(j = 0; j < *R; j++)
+ wk2[i + j * *M] = *pd2++;
+
+ for(i = 0; i < *M; i++)
+ for(j = 0; j < *R; j++)
+ {
+ s = 0.0;
+ lower = *upper == 0 ? 0 : i;
+ for(k = lower; k < *M; k++)
+ s += wk[i + k * *M] * wk2[k + j * *M];
+ txmat[j + i * *R] = s;
+ }
+ txmat += MR;
+ }
+}
+
+
+
+
+void mux15(double *cc, double *x,
+ double *ans, int *M, int *n)
+{
+ double *pd, *pd2;
+ int i, j, k, MM = *M * *M;
+
+ for(i = 0; i < *n; i++)
+ {
+ pd = cc;
+ pd2 = ans;
+ for(j = 0; j < *M; j++)
+ for(k = 0; k < *M; k++)
+ *pd2++ = *pd++ * x[j];
+
+ pd2 = ans;
+ for(j = 0; j < *M; j++)
+ for(k = 0; k < *M; k++)
+ {
+ *pd2 *= x[k];
+ pd2++;
+ }
+
+ ans += MM;
+ x += *M;
+ }
+}
+
+
+
+
+void vchol(double *cc, int *M, int *n, int *ok, double *wk,
+ int *row_index, int *col_index, int *dimm)
+
+{
+ double s, *pd;
+ int t, i, j, k, iM, iiM, upper = 0, one = 1;
+
+ vdec(row_index, col_index, dimm);
+ pd = wk;
+
+ for(t = 0; t < *n; t++)
+ {
+ *ok = 1;
+
+ m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
+
+ for(i = 0; i < *M; i++)
+ {
+ s = 0.0;
+ iM = i * *M;
+ iiM = i + iM;
+ for(k = 0; k < i; k++)
+ s += pd[k + iM] * pd[k + iM];
+
+ pd[iiM] -= s;
+ if(pd[iiM] < 0.0)
+ {
+ *ok = 0;
+ break;
+ }
+ pd[iiM] = sqrt(pd[iiM]);
+
+ for(j = i+1; j < *M; j++)
+ {
+ s = 0.0;
+ for(k = 0; k < i; k++)
+ s += pd[k + iM] * pd[k + j * *M];
+ pd[i + j * *M] = (pd[i + j * *M] - s) / pd[iiM];
+ }
+
+ }
+
+ a2m(wk, cc, dimm, row_index, col_index, &one, M);
+
+ cc += *dimm;
+ ok++;
+ }
+}
+
+
+
+void vforsub(double *cc, double *b, int *M, int *n,
+ double *wk, int *row_index,
+ int *col_index, int *dimm)
+{
+ double s, *pd;
+ int j, k, t, upper = 1, one = 1;
+
+ pd = wk;
+ vdec(row_index, col_index, dimm);
+
+ for(t = 0; t < *n; t++)
+ {
+ m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
+
+ for(j = 0; j < *M; j++)
+ {
+ s = b[j];
+ for(k = 0; k < j; k++)
+ s -= pd[k + j * *M] * b[k];
+ b[j] = s / pd[j + j * *M];
+ }
+ cc += *dimm;
+ b += *M;
+ }
+}
+
+
+
+
+void vbacksub(double *cc, double *b, int *M, int *n,
+ double *wk, int *row_index,
+ int *col_index, int *dimm)
+{
+ double s, *pd;
+ int j, k, t, upper = 1, one = 1;
+
+ pd = wk;
+ vdec(row_index, col_index, dimm);
+
+ for(t = 0; t < *n; t++)
+ {
+ m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
+
+ for(j = *M - 1; j >= 0; j--)
+ {
+ s = b[j];
+ for(k = j + 1; k < *M; k++)
+ s -= pd[j + k * *M] * b[k];
+ b[j] = s / pd[j + j * *M];
+ }
+ cc += *dimm;
+ b += *M;
+ }
+}
+
+
+
+void tapplymat1(double *mat, int *nr, int *nc, int *type)
+{
+ double *pd = mat, *pd2 = mat + *nr;
+ int i, j;
+
+ if(*type==1)
+ for(j = 2; j <= *nc; j++)
+ for(i = 0; i < *nr; i++, pd2++)
+ *pd2 += *pd++;
+
+ if(*type==2)
+ {
+ pd2 = mat + *nr * *nc - 1;
+ pd = pd2 - *nr;
+ for(j = *nc; j >= 2; j--)
+ for(i = 0; i < *nr; i++, pd2--)
+ *pd2 -= *pd--;
+ }
+
+ if(*type==3)
+ for(j = 2; j <= *nc; j++)
+ for(i = 0; i < *nr; i++, pd2++)
+ *pd2 *= *pd++;
+
+ if(*type < 1 || *type > 3)
+ printf("Error: *type not matched\n");
+}
+
diff --git a/src/rgam.f b/src/rgam.f
new file mode 100644
index 0000000..31d3153
--- /dev/null
+++ b/src/rgam.f
@@ -0,0 +1,554 @@
+ subroutine nvhb7f(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk, knot,coef,
+ &sz,rjcq9o, n9peut,l6xrjt,fpcb2n, sz6ohy, yc1ezl,hts1gp,mk2vyr,
+ &thfyl1,la5dcf)
+ implicit logical (a-z)
+ integer nfiumb4, nk, yc1ezl, hts1gp(3), mk2vyr, thfyl1, la5dcf
+ double precision egoxa3, atqh0o, xs(nfiumb4), ys(nfiumb4), ws(
+ &nfiumb4), knot(nk+4), coef(nk), sz(nfiumb4), rjcq9o(nfiumb4),
+ &n9peut, l6xrjt, fpcb2n(3), sz6ohy(1)
+ call gzmfi3(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk, knot,coef,sz,
+ &rjcq9o, n9peut,hts1gp(1),l6xrjt,hts1gp(2), hts1gp(3), fpcb2n(1),
+ &fpcb2n(2),fpcb2n(3), yc1ezl, sz6ohy(1), sz6ohy(nk+1),sz6ohy(2*nk+
+ &1),sz6ohy(3*nk+1),sz6ohy(4*nk+1), sz6ohy(5*nk+1),sz6ohy(6*nk+1),
+ &sz6ohy(7*nk+1),sz6ohy(8*nk+1), sz6ohy(9*nk+1),sz6ohy(9*nk+mk2vyr*
+ &nk+1),sz6ohy(9*nk+2*mk2vyr*nk+1), mk2vyr,thfyl1,la5dcf)
+ return
+ end
+ subroutine gzmfi3(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk, knot,coef,
+ &sz,rjcq9o, n9peut,rlhz2a,dwgkz6,ispar, pga6nu, lspar,uspar,fjo2dy,
+ & yc1ezl, mheq6i, n7cuql,dvpc8x,hdv8br,cbg5ys, vf1jtn,eh6nly,
+ &mvx9at,vbxpg4, rlep7v,lunah2,p2ip, mk2vyr,thfyl1,la5dcf)
+ implicit logical (a-z)
+ integer nfiumb4,nk, rlhz2a,ispar, yc1ezl, mk2vyr,thfyl1,la5dcf
+ integer pga6nu
+ double precision egoxa3,atqh0o,xs(nfiumb4),ys(nfiumb4),ws(nfiumb4)
+ &, knot(nk+4), coef(nk),sz(nfiumb4),rjcq9o(nfiumb4), n9peut,dwgkz6,
+ &lspar,uspar,fjo2dy, mheq6i(nk), n7cuql(nk),dvpc8x(nk),hdv8br(nk),
+ &cbg5ys(nk), vf1jtn(nk),eh6nly(nk),mvx9at(nk),vbxpg4(nk), rlep7v(
+ &mk2vyr,nk),lunah2(mk2vyr,nk),p2ip(thfyl1,nk)
+ double precision t1,t2,dyb3po, a,b,c,d,e,kqoy6w,xm,p,q,r,fjo2dy1,
+ &fjo2dy2,u,v,w, fu,fv,fw,fx,x, ax,bx
+ integer w3gohz, vucgi1r
+ double precision hz0fmy, epx9jf
+ hz0fmy = 8.0d88
+ epx9jf = 0.0d0
+ d = 0.5d0
+ u = 0.5d0
+ dyb3po = 0.5d0
+ w3gohz = 1
+23000 if(.not.(w3gohz.le.nfiumb4))goto 23002
+ if(.not.(ws(w3gohz).gt.0.0d0))goto 23003
+ ws(w3gohz) = dsqrt(ws(w3gohz))
+23003 continue
+ w3gohz = w3gohz+1
+ goto 23000
+23002 continue
+ if(.not.(yc1ezl .eq. 0))goto 23005
+ call poqy8c(vf1jtn,eh6nly,mvx9at,vbxpg4,knot,nk)
+ call ak9vxi(xs,ys,ws,knot, nfiumb4,nk, mheq6i,n7cuql,dvpc8x,
+ &hdv8br,cbg5ys)
+ t1 = 0.0d0
+ t2 = 0.0d0
+ do 23007 w3gohz = 3,nk-3
+ t1 = t1 + n7cuql(w3gohz)
+23007 continue
+ do 23009 w3gohz = 3,nk-3
+ t2 = t2 + vf1jtn(w3gohz)
+23009 continue
+ dyb3po = t1/t2
+ yc1ezl = 1
+23005 continue
+ if(.not.(ispar .eq. 1))goto 23011
+ call oipu6h(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk,rlhz2a, knot,coef,
+ &sz,rjcq9o,n9peut, dwgkz6, mheq6i, n7cuql,dvpc8x,hdv8br,cbg5ys,
+ &vf1jtn,eh6nly,mvx9at,vbxpg4, rlep7v,lunah2,p2ip,mk2vyr,thfyl1,
+ &la5dcf)
+ return
+23011 continue
+ ax = lspar
+ bx = uspar
+ c = 0.381966011250105097d0
+ kqoy6w = 2.0d-7
+ vucgi1r = 0
+ a = ax
+ b = bx
+ v = a + c*(b - a)
+ w = v
+ x = v
+ e = 0.0d0
+ dwgkz6 = dyb3po * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0))
+ call oipu6h(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk,rlhz2a, knot,coef,
+ &sz,rjcq9o,n9peut, dwgkz6, mheq6i, n7cuql,dvpc8x,hdv8br,cbg5ys,
+ &vf1jtn,eh6nly,mvx9at,vbxpg4, rlep7v,lunah2,p2ip,mk2vyr,thfyl1,
+ &la5dcf)
+ fx = n9peut
+ fv = fx
+ fw = fx
+23013 if(.not.(la5dcf .eq. 0))goto 23014
+ vucgi1r = vucgi1r + 1
+ xm = 0.5d0*(a + b)
+ fjo2dy1 = kqoy6w*dabs(x) + fjo2dy/3.0d0
+ fjo2dy2 = 2.0d0*fjo2dy1
+ if(.not.((dabs(x - xm) .le. (fjo2dy2 - 0.5d0*(b - a))) .or.(
+ &vucgi1r .gt. pga6nu)))goto 23015
+ go to 90
+23015 continue
+ if(.not.((dabs(e) .le. fjo2dy1) .or.(fx .ge. hz0fmy) .or.(fv .ge.
+ &hz0fmy) .or.(fw .ge. hz0fmy)))goto 23017
+ go to 40
+23017 continue
+ r = (x - w)*(fx - fv)
+ q = (x - v)*(fx - fw)
+ p = (x - v)*q - (x - w)*r
+ q = 2.0d0 * (q - r)
+ if(.not.(q .gt. 0.0d0))goto 23019
+ p = -p
+23019 continue
+ q = dabs(q)
+ r = e
+ e = d
+30 if(.not.((dabs(p) .ge. dabs(0.5d0*q*r)) .or.(q .eq. 0.0d0)))goto 2
+ &3021
+ go to 40
+23021 continue
+ if(.not.((p .le. q*(a - x)) .or. (p .ge. q*(b - x))))goto 23023
+ go to 40
+23023 continue
+ d = p/q
+ u = x + d
+ if(.not.((u - a) .lt. fjo2dy2))goto 23025
+ d = dsign(fjo2dy1, xm - x)
+23025 continue
+ if(.not.((b - u) .lt. fjo2dy2))goto 23027
+ d = dsign(fjo2dy1, xm - x)
+23027 continue
+ go to 50
+40 if(.not.(x .ge. xm))goto 23029
+ e = a - x
+ goto 23030
+23029 continue
+ e = b - x
+23030 continue
+ d = c*e
+50 if(.not.(dabs(d) .ge. fjo2dy1))goto 23031
+ u = x + d
+ goto 23032
+23031 continue
+ u = x + dsign(fjo2dy1, d)
+23032 continue
+ dwgkz6 = dyb3po * dexp((-2.0d0 + u*6.0) * dlog(16.0d0))
+ call oipu6h(egoxa3,atqh0o,xs,ys,ws, nfiumb4,nk,rlhz2a, knot,coef,
+ &sz,rjcq9o,n9peut, dwgkz6, mheq6i, n7cuql,dvpc8x,hdv8br,cbg5ys,
+ &vf1jtn,eh6nly,mvx9at,vbxpg4, rlep7v,lunah2,p2ip,mk2vyr,thfyl1,
+ &la5dcf)
+ fu = n9peut
+ if(.not.(fu .gt. hz0fmy))goto 23033
+ fu = 2.0d0 * hz0fmy
+23033 continue
+ if(.not.(fu .le. fx))goto 23035
+ if(.not.(u .ge. x))goto 23037
+ a = x
+ goto 23038
+23037 continue
+ b = x
+23038 continue
+ v = w
+ fv = fw
+ w = x
+ fw = fx
+ x = u
+ fx = fu
+ goto 23036
+23035 continue
+ if(.not.(u .lt. x))goto 23039
+ a = u
+ goto 23040
+23039 continue
+ b = u
+23040 continue
+ if(.not.((fu .le. fw) .or. (w .eq. x)))goto 23041
+ v = w
+ fv = fw
+ w = u
+ fw = fu
+ goto 23042
+23041 continue
+ if(.not.((fu .le. fv) .or. (v .eq. x) .or. (v .eq. w)))goto 23043
+ v = u
+ fv = fu
+23043 continue
+23042 continue
+23036 continue
+ goto 23013
+23014 continue
+90 epx9jf = 0.0d0
+ dwgkz6 = dyb3po * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0))
+ n9peut = fx
+ return
+23012 continue
+ return
+ end
+ subroutine poqy8c(vf1jtn,eh6nly,mvx9at,vbxpg4,tb,nb)
+ implicit logical (a-z)
+ integer nb
+ double precision vf1jtn(nb),eh6nly(nb),mvx9at(nb),vbxpg4(nb),tb(
+ &nb+4)
+ integer m5xudf,ilo,i6ndbu, ynmzp6, def4wn
+ integer w3gohz,p1rifj,d9rjek
+ double precision uq9jtc(4,3),bgu6fw(16),avoe4y(4),yw2(4), wpt
+ double precision uoqx2m
+ uoqx2m = 1.0d0 / 3.0d0
+ ynmzp6 = 3
+ def4wn = 4
+ do 23045 w3gohz = 1,nb
+ vf1jtn(w3gohz) = 0.0d0
+ eh6nly(w3gohz) = 0.0d0
+ mvx9at(w3gohz) = 0.0d0
+ vbxpg4(w3gohz) = 0.0d0
+23045 continue
+ ilo = 1
+ do 23047 w3gohz = 1,nb
+ call vinterv(tb(1),(nb+1),tb(w3gohz),m5xudf,i6ndbu)
+ call vbsplvd(tb,def4wn,tb(w3gohz),m5xudf,bgu6fw,uq9jtc,ynmzp6)
+ do 23049 p1rifj = 1,4
+ avoe4y(p1rifj) = uq9jtc(p1rifj,3)
+23049 continue
+ call vbsplvd(tb,def4wn,tb(w3gohz+1),m5xudf,bgu6fw,uq9jtc,ynmzp6)
+ do 23051 p1rifj = 1,4
+ yw2(p1rifj) = uq9jtc(p1rifj,3) - avoe4y(p1rifj)
+23051 continue
+ wpt = tb(w3gohz+1) - tb(w3gohz)
+ if(.not.(m5xudf .ge. 4))goto 23053
+ do 23055 p1rifj = 1,4
+ d9rjek = p1rifj
+ vf1jtn(m5xudf-4+p1rifj) = vf1jtn(m5xudf-4+p1rifj) + wpt * (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+ d9rjek = p1rifj+1
+ if(.not.(d9rjek .le. 4))goto 23057
+ eh6nly(m5xudf+p1rifj-4) = eh6nly(m5xudf+p1rifj-4) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23057 continue
+ d9rjek = p1rifj+2
+ if(.not.(d9rjek .le. 4))goto 23059
+ mvx9at(m5xudf+p1rifj-4) = mvx9at(m5xudf+p1rifj-4) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23059 continue
+ d9rjek = p1rifj+3
+ if(.not.(d9rjek .le. 4))goto 23061
+ vbxpg4(m5xudf+p1rifj-4) = vbxpg4(m5xudf+p1rifj-4) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23061 continue
+23055 continue
+ goto 23054
+23053 continue
+ if(.not.(m5xudf .eq. 3))goto 23063
+ do 23065 p1rifj = 1,3
+ d9rjek = p1rifj
+ vf1jtn(m5xudf-3+p1rifj) = vf1jtn(m5xudf-3+p1rifj) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+ d9rjek = p1rifj+1
+ if(.not.(d9rjek .le. 3))goto 23067
+ eh6nly(m5xudf+p1rifj-3) = eh6nly(m5xudf+p1rifj-3) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23067 continue
+ d9rjek = p1rifj+2
+ if(.not.(d9rjek .le. 3))goto 23069
+ mvx9at(m5xudf+p1rifj-3) = mvx9at(m5xudf+p1rifj-3) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23069 continue
+23065 continue
+ goto 23064
+23063 continue
+ if(.not.(m5xudf .eq. 2))goto 23071
+ do 23073 p1rifj = 1,2
+ d9rjek = p1rifj
+ vf1jtn(m5xudf-2+p1rifj) = vf1jtn(m5xudf-2+p1rifj) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+ d9rjek = p1rifj+1
+ if(.not.(d9rjek .le. 2))goto 23075
+ eh6nly(m5xudf+p1rifj-2) = eh6nly(m5xudf+p1rifj-2) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23075 continue
+23073 continue
+ goto 23072
+23071 continue
+ if(.not.(m5xudf .eq. 1))goto 23077
+ do 23079 p1rifj = 1,1
+ d9rjek = p1rifj
+ vf1jtn(m5xudf-1+p1rifj) = vf1jtn(m5xudf-1+p1rifj) + wpt* (avoe4y(
+ &p1rifj)*avoe4y(d9rjek) + (yw2(p1rifj)*avoe4y(d9rjek) + yw2(d9rjek)
+ &*avoe4y(p1rifj))*0.50 + yw2(p1rifj)*yw2(d9rjek)*uoqx2m)
+23079 continue
+23077 continue
+23072 continue
+23064 continue
+23054 continue
+23047 continue
+ return
+ end
+ subroutine gayot2(rlep7v,lunah2,p2ip, mk2vyr,nk,thfyl1,sbkvx6)
+ implicit logical (a-z)
+ integer mk2vyr,nk,thfyl1,sbkvx6
+ double precision rlep7v(mk2vyr,nk), lunah2(mk2vyr,nk), p2ip(
+ &thfyl1,nk)
+ integer w3gohz, d9rjek, nd6mep
+ double precision yrbij3(3),vef2gk(2),cfko0l(1),c0,c1,c2,c3
+ c1 = 0.0d0
+ c2 = 0.0d0
+ c3 = 0.0d0
+ yrbij3(1) = 0.0d0
+ yrbij3(2) = 0.0d0
+ yrbij3(1) = 0.0d0
+ vef2gk(1) = 0.0d0
+ vef2gk(2) = 0.0d0
+ cfko0l(1) = 0.0d0
+ do 23081 w3gohz = 1,nk
+ d9rjek = nk-w3gohz+1
+ c0 = 1.0d0 / rlep7v(4,d9rjek)
+ if(.not.(d9rjek .le. nk-3))goto 23083
+ c1 = rlep7v(1,d9rjek+3)*c0
+ c2 = rlep7v(2,d9rjek+2)*c0
+ c3 = rlep7v(3,d9rjek+1)*c0
+ goto 23084
+23083 continue
+ if(.not.(d9rjek .eq. nk-2))goto 23085
+ c1 = 0.0d0
+ c2 = rlep7v(2,d9rjek+2)*c0
+ c3 = rlep7v(3,d9rjek+1)*c0
+ goto 23086
+23085 continue
+ if(.not.(d9rjek .eq. nk-1))goto 23087
+ c1 = 0.0d0
+ c2 = 0.0d0
+ c3 = rlep7v(3,d9rjek+1)*c0
+ goto 23088
+23087 continue
+ if(.not.(d9rjek .eq. nk))goto 23089
+ c1 = 0.0d0
+ c2 = 0.0d0
+ c3 = 0.0d0
+23089 continue
+23088 continue
+23086 continue
+23084 continue
+ lunah2(1,d9rjek) = 0.0d0 - (c1*yrbij3(1)+c2*yrbij3(2)+c3*yrbij3(3)
+ &)
+ lunah2(2,d9rjek) = 0.0d0 - (c1*yrbij3(2)+c2*vef2gk(1)+c3*vef2gk(2)
+ &)
+ lunah2(3,d9rjek) = 0.0d0 - (c1*yrbij3(3)+c2*vef2gk(2)+c3*cfko0l(1)
+ &)
+ lunah2(4,d9rjek) = c0**2 + c1**2 * yrbij3(1) + 2.0d0*c1*c2*yrbij3(
+ &2)+2.0d0*c1*c3*yrbij3(3) + c2**2 * vef2gk(1) + 2.0d0*c2*c3*vef2gk(
+ &2) + c3**2 * cfko0l(1)
+ yrbij3(1) = vef2gk(1)
+ yrbij3(2) = vef2gk(2)
+ yrbij3(3) = lunah2(2,d9rjek)
+ vef2gk(1) = cfko0l(1)
+ vef2gk(2) = lunah2(3,d9rjek)
+ cfko0l(1) = lunah2(4,d9rjek)
+23081 continue
+ if(.not.(sbkvx6 .eq. 0))goto 23091
+ return
+23091 continue
+ do 23093 w3gohz = 1,nk
+ d9rjek = nk-w3gohz+1
+ nd6mep = 1
+23095 if(.not.(nd6mep.le.4.and.d9rjek+nd6mep-1.le.nk))goto 23097
+ p2ip(d9rjek,d9rjek+nd6mep-1) = lunah2(5-nd6mep,d9rjek)
+ nd6mep = nd6mep+1
+ goto 23095
+23097 continue
+23093 continue
+ do 23098 w3gohz = 1,nk
+ d9rjek = nk-w3gohz+1
+ nd6mep = d9rjek-4
+23100 if(.not.(nd6mep.ge.1))goto 23102
+ c0 = 1.0 / rlep7v(4,nd6mep)
+ c1 = rlep7v(1,nd6mep+3)*c0
+ c2 = rlep7v(2,nd6mep+2)*c0
+ c3 = rlep7v(3,nd6mep+1)*c0
+ p2ip(nd6mep,d9rjek) = 0.0d0- ( c1*p2ip(nd6mep+3,d9rjek) + c2*p2ip(
+ &nd6mep+2,d9rjek) + c3*p2ip(nd6mep+1,d9rjek) )
+ nd6mep = nd6mep-1
+ goto 23100
+23102 continue
+23098 continue
+ return
+23092 continue
+ end
+ subroutine oipu6h(egoxa3,atqh0o,x,y,w, nfiumb4,nk,rlhz2a, knot,
+ &coef,sz,rjcq9o, n9peut, dwgkz6, mheq6i, n7cuql,dvpc8x,hdv8br,
+ &cbg5ys, vf1jtn,eh6nly,mvx9at,vbxpg4, rlep7v,lunah2,p2ip, mk2vyr,
+ &thfyl1,fjg0qv)
+ implicit logical (a-z)
+ integer nfiumb4,nk,rlhz2a, mk2vyr,thfyl1,fjg0qv
+ double precision egoxa3,atqh0o,x(nfiumb4),y(nfiumb4),w(nfiumb4),
+ &knot(nk+4), coef(nk),sz(nfiumb4),rjcq9o(nfiumb4), n9peut, dwgkz6,
+ &mheq6i(nk), n7cuql(nk),dvpc8x(nk),hdv8br(nk),cbg5ys(nk), vf1jtn(
+ &nk),eh6nly(nk),mvx9at(nk),vbxpg4(nk), rlep7v(mk2vyr,nk),lunah2(
+ &mk2vyr,nk),p2ip(thfyl1,nk)
+ double precision das4bx, bgu6fw(16), b0,b1,b2,b3,kqoy6w, uq9jtc(4,
+ &1), xv,bvalue,df
+ double precision risyv0
+ integer oht3ga, ynmzp6, ilo, i6ndbu, d9rjek, w3gohz, px1yhr,
+ &m5xudf, def4wn
+ ilo = 1
+ kqoy6w = 0.1d-10
+ oht3ga = 0
+ ynmzp6 = 3
+ def4wn = 4
+ do 23103 w3gohz = 1,nk
+ coef(w3gohz) = mheq6i(w3gohz)
+23103 continue
+ do 23105 w3gohz = 1,nk
+ rlep7v(4,w3gohz) = n7cuql(w3gohz)+dwgkz6*vf1jtn(w3gohz)
+23105 continue
+ do 23107 w3gohz = 1,(nk-1)
+ rlep7v(3,w3gohz+1) = dvpc8x(w3gohz)+dwgkz6*eh6nly(w3gohz)
+23107 continue
+ do 23109 w3gohz = 1,(nk-2)
+ rlep7v(2,w3gohz+2) = hdv8br(w3gohz)+dwgkz6*mvx9at(w3gohz)
+23109 continue
+ do 23111 w3gohz = 1,(nk-3)
+ rlep7v(1,w3gohz+3) = cbg5ys(w3gohz)+dwgkz6*vbxpg4(w3gohz)
+23111 continue
+ call dpbfa8(rlep7v,mk2vyr,nk,ynmzp6,fjg0qv)
+ if(.not.(fjg0qv .ne. 0))goto 23113
+ return
+23113 continue
+ call dpbsl8(rlep7v,mk2vyr,nk,ynmzp6,coef)
+ px1yhr = 1
+ do 23115 w3gohz = 1,nfiumb4
+ xv = x(w3gohz)
+ sz(w3gohz) = bvalue(knot,coef, nk,def4wn,xv,oht3ga)
+23115 continue
+ if(.not.(rlhz2a .eq. 0))goto 23117
+ return
+23117 continue
+ call gayot2(rlep7v,lunah2,p2ip, mk2vyr,nk,thfyl1,oht3ga)
+ do 23119 w3gohz = 1,nfiumb4
+ xv = x(w3gohz)
+ call vinterv(knot(1),(nk+1),xv,m5xudf,i6ndbu)
+ if(.not.(i6ndbu .eq. -1))goto 23121
+ m5xudf = 4
+ xv = knot(4) + kqoy6w
+23121 continue
+ if(.not.(i6ndbu .eq. 1))goto 23123
+ m5xudf = nk
+ xv = knot(nk+1) - kqoy6w
+23123 continue
+ d9rjek = m5xudf-3
+ call vbsplvd(knot,4,xv,m5xudf,bgu6fw,uq9jtc,1)
+ b0 = uq9jtc(1,1)
+ b1 = uq9jtc(2,1)
+ b2 = uq9jtc(3,1)
+ b3 = uq9jtc(4,1)
+ rjcq9o(w3gohz) = (lunah2(4,d9rjek)*b0**2 + 2.0d0*lunah2(3,d9rjek)*
+ &b0*b1 + 2.0d0*lunah2(2,d9rjek)*b0*b2 + 2.0d0*lunah2(1,d9rjek)*b0*
+ &b3 + lunah2(4,d9rjek+1)*b1**2 + 2.0d0*lunah2(3,d9rjek+1)*b1*b2 +
+ &2.0d0*lunah2(2,d9rjek+1)*b1*b3 + lunah2(4,d9rjek+2)*b2**2 + 2.0d0*
+ &lunah2(3,d9rjek+2)*b2*b3 + lunah2(4,d9rjek+3)*b3**2 ) * w(w3gohz)*
+ &*2
+23119 continue
+ if(.not.(rlhz2a .eq. 1))goto 23125
+ das4bx = 0.0d0
+ df = 0.0d0
+ risyv0 = 0.0d0
+ do 23127 w3gohz = 1,nfiumb4
+ das4bx = das4bx + ((y(w3gohz)-sz(w3gohz))*w(w3gohz))**2
+ df = df + rjcq9o(w3gohz)
+ risyv0 = risyv0 + w(w3gohz)*w(w3gohz)
+23127 continue
+ n9peut = (das4bx/risyv0)/((1.0d0-(atqh0o+egoxa3*df)/risyv0)**2)
+ goto 23126
+23125 continue
+ if(.not.(rlhz2a .eq. 2))goto 23129
+ n9peut = 0.0d0
+ risyv0 = 0.0d0
+ do 23131 w3gohz = 1,nfiumb4
+ n9peut = n9peut + (((y(w3gohz)-sz(w3gohz))*w(w3gohz))/(1.0d0-
+ &rjcq9o(w3gohz)))**2
+ risyv0 = risyv0 + w(w3gohz)*w(w3gohz)
+23131 continue
+ n9peut = n9peut / risyv0
+ goto 23130
+23129 continue
+ n9peut = 0.0d0
+ do 23133 w3gohz = 1,nfiumb4
+ n9peut = n9peut+rjcq9o(w3gohz)
+23133 continue
+ n9peut = 3.0d0 + (atqh0o-n9peut)**2
+23130 continue
+23126 continue
+ return
+23118 continue
+ end
+ subroutine ak9vxi(p3vlea,hr83e,w,onyz6j, xl6qgm,nfiumb4, wevr5o,
+ &n7cuql,dvpc8x,hdv8br,cbg5ys)
+ implicit logical (a-z)
+ integer xl6qgm,nfiumb4
+ double precision p3vlea(xl6qgm),hr83e(xl6qgm),w(xl6qgm),onyz6j(
+ &nfiumb4+4), wevr5o(nfiumb4), n7cuql(nfiumb4),dvpc8x(nfiumb4),
+ &hdv8br(nfiumb4),cbg5ys(nfiumb4)
+ double precision kqoy6w,uq9jtc(4,1),bgu6fw(16)
+ integer d9rjek,w3gohz,ilo,m5xudf,i6ndbu
+ do 23135 w3gohz = 1,nfiumb4
+ wevr5o(w3gohz) = 0.0d0
+ n7cuql(w3gohz) = 0.0d0
+ dvpc8x(w3gohz) = 0.0d0
+ hdv8br(w3gohz) = 0.0d0
+ cbg5ys(w3gohz) = 0.0d0
+23135 continue
+ ilo = 1
+ kqoy6w = 0.1d-9
+ do 23137 w3gohz = 1,xl6qgm
+ call vinterv(onyz6j(1),(nfiumb4+1),p3vlea(w3gohz),m5xudf,i6ndbu)
+ if(.not.(i6ndbu .eq. 1))goto 23139
+ if(.not.(p3vlea(w3gohz) .le. (onyz6j(m5xudf)+kqoy6w)))goto 23141
+ m5xudf = m5xudf-1
+ goto 23142
+23141 continue
+ return
+23142 continue
+23139 continue
+ call vbsplvd(onyz6j,4,p3vlea(w3gohz),m5xudf,bgu6fw,uq9jtc,1)
+ d9rjek = m5xudf-4+1
+ wevr5o(d9rjek) = wevr5o(d9rjek)+w(w3gohz)**2*hr83e(w3gohz)*uq9jtc(
+ &1,1)
+ n7cuql(d9rjek) = n7cuql(d9rjek)+w(w3gohz)**2*uq9jtc(1,1)**2
+ dvpc8x(d9rjek) = dvpc8x(d9rjek)+w(w3gohz)**2*uq9jtc(1,1)*uq9jtc(2,
+ &1)
+ hdv8br(d9rjek) = hdv8br(d9rjek)+w(w3gohz)**2*uq9jtc(1,1)*uq9jtc(3,
+ &1)
+ cbg5ys(d9rjek) = cbg5ys(d9rjek)+w(w3gohz)**2*uq9jtc(1,1)*uq9jtc(4,
+ &1)
+ d9rjek = m5xudf-4+2
+ wevr5o(d9rjek) = wevr5o(d9rjek)+w(w3gohz)**2*hr83e(w3gohz)*uq9jtc(
+ &2,1)
+ n7cuql(d9rjek) = n7cuql(d9rjek)+w(w3gohz)**2*uq9jtc(2,1)**2
+ dvpc8x(d9rjek) = dvpc8x(d9rjek)+w(w3gohz)**2*uq9jtc(2,1)*uq9jtc(3,
+ &1)
+ hdv8br(d9rjek) = hdv8br(d9rjek)+w(w3gohz)**2*uq9jtc(2,1)*uq9jtc(4,
+ &1)
+ d9rjek = m5xudf-4+3
+ wevr5o(d9rjek) = wevr5o(d9rjek)+w(w3gohz)**2*hr83e(w3gohz)*uq9jtc(
+ &3,1)
+ n7cuql(d9rjek) = n7cuql(d9rjek)+w(w3gohz)**2*uq9jtc(3,1)**2
+ dvpc8x(d9rjek) = dvpc8x(d9rjek)+w(w3gohz)**2*uq9jtc(3,1)*uq9jtc(4,
+ &1)
+ d9rjek = m5xudf-4+4
+ wevr5o(d9rjek) = wevr5o(d9rjek)+w(w3gohz)**2*hr83e(w3gohz)*uq9jtc(
+ &4,1)
+ n7cuql(d9rjek) = n7cuql(d9rjek)+w(w3gohz)**2*uq9jtc(4,1)**2
+23137 continue
+ return
+ end
diff --git a/src/tyeepolygamma.f b/src/tyeepolygamma.f
new file mode 100644
index 0000000..4ec7432
--- /dev/null
+++ b/src/tyeepolygamma.f
@@ -0,0 +1,130 @@
+ subroutine vdgam1(x, ghry8z, c4uxow)
+ implicit logical (a-z)
+ double precision x, ghry8z
+ integer c4uxow
+ double precision w, sqap4b, temp
+ if(.not.(x .le. 0.0d0))goto 23000
+ c4uxow = 0
+ return
+23000 continue
+ c4uxow = 1
+23001 continue
+ if(.not.(x .lt. 6.0d0))goto 23002
+ call vdgam2(x + 6.0d0, temp, c4uxow)
+ ghry8z = temp - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0d0) -
+ &1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0)
+ return
+23002 continue
+ w = 1.0d0 / (x * x)
+ sqap4b = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.
+ &0d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w *
+ & (691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.
+ &0d0)))))))))))))))))))))
+ ghry8z = ( dlog(x) - 0.5d0/x + sqap4b )
+ return
+ end
+ subroutine vdgam2(x, ghry8z, c4uxow)
+ implicit logical (a-z)
+ double precision x, ghry8z
+ integer c4uxow
+ double precision w, sqap4b, temp
+ if(.not.(x .le. 0.0d0))goto 23004
+ c4uxow = 0
+ return
+23004 continue
+ c4uxow = 1
+23005 continue
+ if(.not.(x .lt. 6.0d0))goto 23006
+ call vdgam1(x + 6.0d0, temp, c4uxow)
+ ghry8z = temp - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0d0) -
+ &1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0)
+ return
+23006 continue
+ w = 1.0d0 / (x * x)
+ sqap4b = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.
+ &0d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w *
+ & (691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.
+ &0d0)))))))))))))))))))))
+ ghry8z = ( dlog(x) - 0.5d0/x + sqap4b )
+ return
+ end
+ subroutine vtgam1(x, ghry8z, c4uxow)
+ implicit logical (a-z)
+ double precision x, ghry8z
+ integer c4uxow
+ double precision w, sqap4b, temp
+ if(.not.(x .le. 0.0d0))goto 23008
+ c4uxow = 0
+ return
+23008 continue
+ c4uxow = 1
+23009 continue
+ if(.not.(x .lt. 6.0d0))goto 23010
+ call vtgam2(x + 6.0d0, temp, c4uxow)
+ ghry8z = temp + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x + 2.
+ &0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0/(x +
+ & 5.0d0)**2
+ return
+23010 continue
+ w = 1.0d0 / (x * x)
+ sqap4b = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (
+ &1.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-
+ &691.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0)))))
+ &)))))))))
+ ghry8z = 0.5d0 * w + sqap4b / x
+ return
+ end
+ subroutine vtgam2(x, ghry8z, c4uxow)
+ implicit logical (a-z)
+ double precision x, ghry8z
+ integer c4uxow
+ double precision w, sqap4b, temp
+ if(.not.(x .le. 0.0d0))goto 23012
+ c4uxow = 0
+ return
+23012 continue
+ c4uxow = 1
+23013 continue
+ if(.not.(x .lt. 6.0d0))goto 23014
+ call vtgam1(x + 6.0d0, temp, c4uxow)
+ ghry8z = temp + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x + 2.
+ &0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0/(x +
+ & 5.0d0)**2
+ return
+23014 continue
+ w = 1.0d0 / (x * x)
+ sqap4b = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (
+ &1.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-
+ &691.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0)))))
+ &)))))))))
+ ghry8z = 0.5d0 * w + sqap4b / x
+ return
+ end
+ subroutine dgam1w(x, ghry8z, n, c4uxow)
+ implicit logical (a-z)
+ integer n, c4uxow
+ double precision x(n), ghry8z(n)
+ integer i, lqhm2g
+ c4uxow = 1
+ do 23016 i=1,n
+ call vdgam1(x(i), ghry8z(i), lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23018
+ c4uxow = lqhm2g
+23018 continue
+23016 continue
+ return
+ end
+ subroutine tgam1w(x, ghry8z, n, c4uxow)
+ implicit logical (a-z)
+ integer n, c4uxow
+ double precision x(n), ghry8z(n)
+ integer i, lqhm2g
+ c4uxow = 1
+ do 23020 i=1,n
+ call vtgam1(x(i), ghry8z(i), lqhm2g)
+ if(.not.(lqhm2g .ne. 1))goto 23022
+ c4uxow = lqhm2g
+23022 continue
+23020 continue
+ return
+ end
diff --git a/src/vcall2.f b/src/vcall2.f
new file mode 100644
index 0000000..22e52cc
--- /dev/null
+++ b/src/vcall2.f
@@ -0,0 +1,10 @@
+ subroutine vcall2(onemor,w,y,eta,beta,u)
+ logical onemor
+ double precision w(1), y(1), eta(1), beta(1), u(1)
+ return
+ end
+ subroutine vcall1(onemor,y,eta,beta,u,xbig,cpxbig)
+ logical onemor, cpxbig
+ double precision y(1), eta(1), beta(1), u(1), xbig(1)
+ return
+ end
diff --git a/src/veigen.f b/src/veigen.f
new file mode 100644
index 0000000..99666b3
--- /dev/null
+++ b/src/veigen.f
@@ -0,0 +1,779 @@
+ subroutine veigen(M, n, x, vals, ov, vec, junk1, junk2,
+ * wk, rowi, coli, dimmv, ec)
+ implicit logical (a-z)
+ integer M, n, ov, ec, i, k, dimmv, MM2,
+ * rowi(M*(M+1)/2), coli(M*(M+1)/2), full
+ double precision x(dimmv, n), vals(M, n), vec(M,M,n), junk1(M),
+ * junk2(M), wk(M,M)
+
+ MM2 = M*(M+1)/2
+ if(dimmv.eq.MM2) then
+ full = 1
+ else
+ full = 0
+ end if
+ do 300 i=1,n
+ do 600 k=1,dimmv
+ wk(rowi(k), coli(k)) = x(k,i)
+ wk(coli(k), rowi(k)) = wk(rowi(k), coli(k))
+600 continue
+
+ if(full.eq.0) then
+ do 500 k=dimmv+1,MM2
+ wk(rowi(k), coli(k)) = 0.0d0
+ wk(coli(k), rowi(k)) = 0.0d0
+500 continue
+ end if
+c
+ call vrs818(M, M, wk, vals(1,i), ov, vec(1,1,i), junk1,
+ * junk2, ec)
+
+
+ if(ec.ne.0) goto 200
+300 continue
+c
+200 return
+ end
+
+
+ SUBROUTINE VRS818(NM,N,A,W,MATZ,Z,FV1,FV2,IERR)
+C
+ INTEGER N,NM,IERR,MATZ
+ DOUBLE PRECISION A(NM,N),W(N),Z(NM,N),FV1(N),FV2(N)
+C
+C THIS SUBROUTINE CALLS THE RECOMMENDED SEQUENCE OF
+C SUBROUTINES FROM THE EIGENSYSTEM SUBROUTINE PACKAGE (EISPACK)
+C TO FIND THE EIGENVALUES AND EIGENVECTORS (IF DESIRED)
+C OF A REAL SYMMETRIC MATRIX.
+C
+C ON INPUT
+C
+C NM MUST BE SET TO THE ROW DIMENSION OF THE TWO-DIMENSIONAL
+C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM
+C DIMENSION STATEMENT.
+C
+C N IS THE ORDER OF THE MATRIX A.
+C
+C A CONTAINS THE REAL SYMMETRIC MATRIX.
+C
+C MATZ IS AN INTEGER VARIABLE SET EQUAL TO ZERO IF
+C ONLY EIGENVALUES ARE DESIRED. OTHERWISE IT IS SET TO
+C ANY NON-ZERO INTEGER FOR BOTH EIGENVALUES AND EIGENVECTORS.
+C
+C ON OUTPUT
+C
+C W CONTAINS THE EIGENVALUES IN ASCENDING ORDER.
+C
+C Z CONTAINS THE EIGENVECTORS IF MATZ IS NOT ZERO.
+C
+C IERR IS AN INTEGER OUTPUT VARIABLE SET EQUAL TO AN ERROR
+C COMPLETION CODE DESCRIBED IN THE DOCUMENTATION FOR TQLRAT
+C AND TQL2. THE NORMAL COMPLETION CODE IS ZERO.
+C
+C FV1 AND FV2 ARE TEMPORARY STORAGE ARRAYS.
+C
+C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW,
+C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
+C
+C THIS VERSION DATED AUGUST 1983.
+C
+C ------------------------------------------------------------------
+C
+ IF (N .LE. NM) GO TO 10
+ IERR = 10 * N
+ GO TO 50
+C
+ 10 IF (MATZ .NE. 0) GO TO 20
+C .......... FIND EIGENVALUES ONLY ..........
+ CALL VTRED1(NM,N,A,W,FV1,FV2)
+ CALL TQLRA9(N,W,FV2,IERR)
+ GO TO 50
+C .......... FIND BOTH EIGENVALUES AND EIGENVECTORS ..........
+ 20 CALL VTRED2(NM,N,A,W,FV1,Z)
+ CALL VTQL21(NM,N,W,FV1,Z,IERR)
+ 50 RETURN
+ END
+
+
+
+ SUBROUTINE VTQL21(NM,N,D,E,Z,IERR)
+C
+ INTEGER I,J,K,L,M,N,II,L1,L2,NM,MML,IERR
+ DOUBLE PRECISION D(N),E(N),Z(NM,N)
+ DOUBLE PRECISION C,C2,C3,DL1,EL1,F,G,H,P,R,S,S2,TST1,TST2,PYTHA9
+C
+C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL2,
+C NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND
+C WILKINSON.
+C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971).
+C
+C THIS SUBROUTINE FINDS THE EIGENVALUES AND EIGENVECTORS
+C OF A SYMMETRIC TRIDIAGONAL MATRIX BY THE QL METHOD.
+C THE EIGENVECTORS OF A FULL SYMMETRIC MATRIX CAN ALSO
+C BE FOUND IF TRED2 HAS BEEN USED TO REDUCE THIS
+C FULL MATRIX TO TRIDIAGONAL FORM.
+C
+C ON INPUT
+C
+C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL
+C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM
+C DIMENSION STATEMENT.
+C
+C N IS THE ORDER OF THE MATRIX.
+C
+C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX.
+C
+C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX
+C IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY.
+C
+C Z CONTAINS THE TRANSFORMATION MATRIX PRODUCED IN THE
+C REDUCTION BY TRED2, IF PERFORMED. IF THE EIGENVECTORS
+C OF THE TRIDIAGONAL MATRIX ARE DESIRED, Z MUST CONTAIN
+C THE IDENTITY MATRIX.
+C
+C ON OUTPUT
+C
+C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN
+C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT BUT
+C UNORDERED FOR INDICES 1,2,...,IERR-1.
+C
+C E HAS BEEN DESTROYED.
+C
+C Z CONTAINS ORTHONORMAL EIGENVECTORS OF THE SYMMETRIC
+C TRIDIAGONAL (OR FULL) MATRIX. IF AN ERROR EXIT IS MADE,
+C Z CONTAINS THE EIGENVECTORS ASSOCIATED WITH THE STORED
+C EIGENVALUES.
+C
+C IERR IS SET TO
+C ZERO FOR NORMAL RETURN,
+C J IF THE J-TH EIGENVALUE HAS NOT BEEN
+C DETERMINED AFTER 30 ITERATIONS.
+C
+C CALLS PYTHA9 FOR DSQRT(A*A + B*B) .
+C
+C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW,
+C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
+C
+C THIS VERSION DATED AUGUST 1983.
+C
+C ------------------------------------------------------------------
+c
+c unnecessary initialization of C3 and S2 to keep g77 -Wall happy
+c
+ C3 = 0.0D0
+ S2 = 0.0D0
+C
+ IERR = 0
+ IF (N .EQ. 1) GO TO 1001
+C
+ DO 100 I = 2, N
+ 100 E(I-1) = E(I)
+C
+ F = 0.0D0
+ TST1 = 0.0D0
+ E(N) = 0.0D0
+C
+ DO 240 L = 1, N
+ J = 0
+ H = DABS(D(L)) + DABS(E(L))
+ IF (TST1 .LT. H) TST1 = H
+C .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT ..........
+ DO 110 M = L, N
+ TST2 = TST1 + DABS(E(M))
+ IF (TST2 .EQ. TST1) GO TO 120
+C .......... E(N) IS ALWAYS ZERO, SO THERE IS NO EXIT
+C THROUGH THE BOTTOM OF THE LOOP ..........
+ 110 CONTINUE
+C
+ 120 IF (M .EQ. L) GO TO 220
+ 130 IF (J .EQ. 30) GO TO 1000
+ J = J + 1
+C .......... FORM SHIFT ..........
+ L1 = L + 1
+ L2 = L1 + 1
+ G = D(L)
+ P = (D(L1) - G) / (2.0D0 * E(L))
+ R = PYTHA9(P,1.0D0)
+ D(L) = E(L) / (P + DSIGN(R,P))
+ D(L1) = E(L) * (P + DSIGN(R,P))
+ DL1 = D(L1)
+ H = G - D(L)
+ IF (L2 .GT. N) GO TO 145
+C
+ DO 140 I = L2, N
+ 140 D(I) = D(I) - H
+C
+ 145 F = F + H
+C .......... QL TRANSFORMATION ..........
+ P = D(M)
+ C = 1.0D0
+ C2 = C
+ EL1 = E(L1)
+ S = 0.0D0
+ MML = M - L
+C .......... FOR I=M-1 STEP -1 UNTIL L DO -- ..........
+ DO 200 II = 1, MML
+ C3 = C2
+ C2 = C
+ S2 = S
+ I = M - II
+ G = C * E(I)
+ H = C * P
+ R = PYTHA9(P,E(I))
+ E(I+1) = S * R
+ S = E(I) / R
+ C = P / R
+ P = C * D(I) - S * G
+ D(I+1) = H + S * (C * G + S * D(I))
+C .......... FORM VECTOR ..........
+ DO 180 K = 1, N
+ H = Z(K,I+1)
+ Z(K,I+1) = S * Z(K,I) + C * H
+ Z(K,I) = C * Z(K,I) - S * H
+ 180 CONTINUE
+C
+ 200 CONTINUE
+C
+ P = -S * S2 * C3 * EL1 * E(L) / DL1
+ E(L) = S * P
+ D(L) = C * P
+ TST2 = TST1 + DABS(E(L))
+ IF (TST2 .GT. TST1) GO TO 130
+ 220 D(L) = D(L) + F
+ 240 CONTINUE
+C .......... ORDER EIGENVALUES AND EIGENVECTORS ..........
+ DO 300 II = 2, N
+ I = II - 1
+ K = I
+ P = D(I)
+C
+ DO 260 J = II, N
+ IF (D(J) .GE. P) GO TO 260
+ K = J
+ P = D(J)
+ 260 CONTINUE
+C
+ IF (K .EQ. I) GO TO 300
+ D(K) = D(I)
+ D(I) = P
+C
+ DO 280 J = 1, N
+ P = Z(J,I)
+ Z(J,I) = Z(J,K)
+ Z(J,K) = P
+ 280 CONTINUE
+C
+ 300 CONTINUE
+C
+ GO TO 1001
+C .......... SET ERROR -- NO CONVERGENCE TO AN
+C EIGENVALUE AFTER 30 ITERATIONS ..........
+ 1000 IERR = L
+ 1001 RETURN
+ END
+
+
+ SUBROUTINE TQLRA9(N,D,E2,IERR)
+C
+ INTEGER I,J,L,M,N,II,L1,MML,IERR
+ DOUBLE PRECISION D(N),E2(N)
+ DOUBLE PRECISION B,C,F,G,H,P,R,S,T,EPSLO9,PYTHA9
+C
+C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQLRAT,
+C ALGORITHM 464, COMM. ACM 16, 689(1973) BY REINSCH.
+C
+C THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC
+C TRIDIAGONAL MATRIX BY THE RATIONAL QL METHOD.
+C
+C ON INPUT
+C
+C N IS THE ORDER OF THE MATRIX.
+C
+C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX.
+C
+C E2 CONTAINS THE SQUARES OF THE SUBDIAGONAL ELEMENTS OF THE
+C INPUT MATRIX IN ITS LAST N-1 POSITIONS. E2(1) IS ARBITRARY.
+C
+C ON OUTPUT
+C
+C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN
+C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND
+C ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE
+C THE SMALLEST EIGENVALUES.
+C
+C E2 HAS BEEN DESTROYED.
+C
+C IERR IS SET TO
+C ZERO FOR NORMAL RETURN,
+C J IF THE J-TH EIGENVALUE HAS NOT BEEN
+C DETERMINED AFTER 30 ITERATIONS.
+C
+C CALLS PYTHA9 FOR DSQRT(A*A + B*B) .
+C
+C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW,
+C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
+C
+C THIS VERSION DATED AUGUST 1983.
+C
+C ------------------------------------------------------------------
+c
+c unnecessary initialization of B and C to keep g77 -Wall happy
+c
+ B = 0.0D0
+ C = 0.0D0
+C
+ IERR = 0
+ IF (N .EQ. 1) GO TO 1001
+C
+ DO 100 I = 2, N
+ 100 E2(I-1) = E2(I)
+C
+ F = 0.0D0
+ T = 0.0D0
+ E2(N) = 0.0D0
+C
+ DO 290 L = 1, N
+ J = 0
+ H = DABS(D(L)) + DSQRT(E2(L))
+ IF (T .GT. H) GO TO 105
+ T = H
+ B = EPSLO9(T)
+ C = B * B
+C .......... LOOK FOR SMALL SQUARED SUB-DIAGONAL ELEMENT ..........
+ 105 DO 110 M = L, N
+ IF (E2(M) .LE. C) GO TO 120
+C .......... E2(N) IS ALWAYS ZERO, SO THERE IS NO EXIT
+C THROUGH THE BOTTOM OF THE LOOP ..........
+ 110 CONTINUE
+C
+ 120 IF (M .EQ. L) GO TO 210
+ 130 IF (J .EQ. 30) GO TO 1000
+ J = J + 1
+C .......... FORM SHIFT ..........
+ L1 = L + 1
+ S = DSQRT(E2(L))
+ G = D(L)
+ P = (D(L1) - G) / (2.0D0 * S)
+ R = PYTHA9(P,1.0D0)
+ D(L) = S / (P + DSIGN(R,P))
+ H = G - D(L)
+C
+ DO 140 I = L1, N
+ 140 D(I) = D(I) - H
+C
+ F = F + H
+C .......... RATIONAL QL TRANSFORMATION ..........
+ G = D(M)
+ IF (G .EQ. 0.0D0) G = B
+ H = G
+ S = 0.0D0
+ MML = M - L
+C .......... FOR I=M-1 STEP -1 UNTIL L DO -- ..........
+ DO 200 II = 1, MML
+ I = M - II
+ P = G * H
+ R = P + E2(I)
+ E2(I+1) = S * R
+ S = E2(I) / R
+ D(I+1) = H + S * (H + D(I))
+ G = D(I) - E2(I) / G
+ IF (G .EQ. 0.0D0) G = B
+ H = G * P / R
+ 200 CONTINUE
+C
+ E2(L) = S * G
+ D(L) = H
+C .......... GUARD AGAINST UNDERFLOW IN CONVERGENCE TEST ..........
+ IF (H .EQ. 0.0D0) GO TO 210
+ IF (DABS(E2(L)) .LE. DABS(C/H)) GO TO 210
+ E2(L) = H * E2(L)
+ IF (E2(L) .NE. 0.0D0) GO TO 130
+ 210 P = D(L) + F
+C .......... ORDER EIGENVALUES ..........
+ IF (L .EQ. 1) GO TO 250
+C .......... FOR I=L STEP -1 UNTIL 2 DO -- ..........
+ DO 230 II = 2, L
+ I = L + 2 - II
+ IF (P .GE. D(I-1)) GO TO 270
+ D(I) = D(I-1)
+ 230 CONTINUE
+C
+ 250 I = 1
+ 270 D(I) = P
+ 290 CONTINUE
+C
+ GO TO 1001
+C .......... SET ERROR -- NO CONVERGENCE TO AN
+C EIGENVALUE AFTER 30 ITERATIONS ..........
+ 1000 IERR = L
+ 1001 RETURN
+ END
+
+
+
+ SUBROUTINE VTRED1(NM,N,A,D,E,E2)
+C
+ INTEGER I,J,K,L,N,II,NM,JP1
+ DOUBLE PRECISION A(NM,N),D(N),E(N),E2(N)
+ DOUBLE PRECISION F,G,H,SCALE
+C
+C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED1,
+C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON.
+C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971).
+C
+C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX
+C TO A SYMMETRIC TRIDIAGONAL MATRIX USING
+C ORTHOGONAL SIMILARITY TRANSFORMATIONS.
+C
+C ON INPUT
+C
+C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL
+C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM
+C DIMENSION STATEMENT.
+C
+C N IS THE ORDER OF THE MATRIX.
+C
+C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE
+C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED.
+C
+C ON OUTPUT
+C
+C A CONTAINS INFORMATION ABOUT THE ORTHOGONAL TRANS-
+C FORMATIONS USED IN THE REDUCTION IN ITS STRICT LOWER
+C TRIANGLE. THE FULL UPPER TRIANGLE OF A IS UNALTERED.
+C
+C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX.
+C
+C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL
+C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO.
+C
+C E2 CONTAINS THE SQUARES OF THE CORRESPONDING ELEMENTS OF E.
+C E2 MAY COINCIDE WITH E IF THE SQUARES ARE NOT NEEDED.
+C
+C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW,
+C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
+C
+C THIS VERSION DATED AUGUST 1983.
+C
+C ------------------------------------------------------------------
+C
+ DO 100 I = 1, N
+ D(I) = A(N,I)
+ A(N,I) = A(I,I)
+ 100 CONTINUE
+C .......... FOR I=N STEP -1 UNTIL 1 DO -- ..........
+ DO 300 II = 1, N
+ I = N + 1 - II
+ L = I - 1
+ H = 0.0D0
+ SCALE = 0.0D0
+ IF (L .LT. 1) GO TO 130
+C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) ..........
+ DO 120 K = 1, L
+ 120 SCALE = SCALE + DABS(D(K))
+C
+ IF (SCALE .NE. 0.0D0) GO TO 140
+C
+ DO 125 J = 1, L
+ D(J) = A(L,J)
+ A(L,J) = A(I,J)
+ A(I,J) = 0.0D0
+ 125 CONTINUE
+C
+ 130 E(I) = 0.0D0
+ E2(I) = 0.0D0
+ GO TO 300
+C
+ 140 DO 150 K = 1, L
+ D(K) = D(K) / SCALE
+ H = H + D(K) * D(K)
+ 150 CONTINUE
+C
+ E2(I) = SCALE * SCALE * H
+ F = D(L)
+ G = -DSIGN(DSQRT(H),F)
+ E(I) = SCALE * G
+ H = H - F * G
+ D(L) = F - G
+ IF (L .EQ. 1) GO TO 285
+C .......... FORM A*U ..........
+ DO 170 J = 1, L
+ 170 E(J) = 0.0D0
+C
+ DO 240 J = 1, L
+ F = D(J)
+ G = E(J) + A(J,J) * F
+ JP1 = J + 1
+ IF (L .LT. JP1) GO TO 220
+C
+ DO 200 K = JP1, L
+ G = G + A(K,J) * D(K)
+ E(K) = E(K) + A(K,J) * F
+ 200 CONTINUE
+C
+ 220 E(J) = G
+ 240 CONTINUE
+C .......... FORM P ..........
+ F = 0.0D0
+C
+ DO 245 J = 1, L
+ E(J) = E(J) / H
+ F = F + E(J) * D(J)
+ 245 CONTINUE
+C
+ H = F / (H + H)
+C .......... FORM Q ..........
+ DO 250 J = 1, L
+ 250 E(J) = E(J) - H * D(J)
+C .......... FORM REDUCED A ..........
+ DO 280 J = 1, L
+ F = D(J)
+ G = E(J)
+C
+ DO 260 K = J, L
+ 260 A(K,J) = A(K,J) - F * E(K) - G * D(K)
+C
+ 280 CONTINUE
+C
+ 285 DO 290 J = 1, L
+ F = D(J)
+ D(J) = A(L,J)
+ A(L,J) = A(I,J)
+ A(I,J) = F * SCALE
+ 290 CONTINUE
+C
+ 300 CONTINUE
+C
+ RETURN
+ END
+
+
+
+ SUBROUTINE VTRED2(NM,N,A,D,E,Z)
+C
+ INTEGER I,J,K,L,N,II,NM,JP1
+ DOUBLE PRECISION A(NM,N),D(N),E(N),Z(NM,N)
+ DOUBLE PRECISION F,G,H,HH,SCALE
+C
+C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED2,
+C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON.
+C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971).
+C
+C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX TO A
+C SYMMETRIC TRIDIAGONAL MATRIX USING AND ACCUMULATING
+C ORTHOGONAL SIMILARITY TRANSFORMATIONS.
+C
+C ON INPUT
+C
+C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL
+C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM
+C DIMENSION STATEMENT.
+C
+C N IS THE ORDER OF THE MATRIX.
+C
+C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE
+C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED.
+C
+C ON OUTPUT
+C
+C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX.
+C
+C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL
+C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO.
+C
+C Z CONTAINS THE ORTHOGONAL TRANSFORMATION MATRIX
+C PRODUCED IN THE REDUCTION.
+C
+C A AND Z MAY COINCIDE. IF DISTINCT, A IS UNALTERED.
+C
+C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW,
+C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
+C
+C THIS VERSION DATED AUGUST 1983.
+C
+C ------------------------------------------------------------------
+C
+ DO 100 I = 1, N
+C
+ DO 80 J = I, N
+ 80 Z(J,I) = A(J,I)
+C
+ D(I) = A(N,I)
+ 100 CONTINUE
+C
+ IF (N .EQ. 1) GO TO 510
+C .......... FOR I=N STEP -1 UNTIL 2 DO -- ..........
+ DO 300 II = 2, N
+ I = N + 2 - II
+ L = I - 1
+ H = 0.0D0
+ SCALE = 0.0D0
+ IF (L .LT. 2) GO TO 130
+C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) ..........
+ DO 120 K = 1, L
+ 120 SCALE = SCALE + DABS(D(K))
+C
+ IF (SCALE .NE. 0.0D0) GO TO 140
+ 130 E(I) = D(L)
+C
+ DO 135 J = 1, L
+ D(J) = Z(L,J)
+ Z(I,J) = 0.0D0
+ Z(J,I) = 0.0D0
+ 135 CONTINUE
+C
+ GO TO 290
+C
+ 140 DO 150 K = 1, L
+ D(K) = D(K) / SCALE
+ H = H + D(K) * D(K)
+ 150 CONTINUE
+C
+ F = D(L)
+ G = -DSIGN(DSQRT(H),F)
+ E(I) = SCALE * G
+ H = H - F * G
+ D(L) = F - G
+C .......... FORM A*U ..........
+ DO 170 J = 1, L
+ 170 E(J) = 0.0D0
+C
+ DO 240 J = 1, L
+ F = D(J)
+ Z(J,I) = F
+ G = E(J) + Z(J,J) * F
+ JP1 = J + 1
+ IF (L .LT. JP1) GO TO 220
+C
+ DO 200 K = JP1, L
+ G = G + Z(K,J) * D(K)
+ E(K) = E(K) + Z(K,J) * F
+ 200 CONTINUE
+C
+ 220 E(J) = G
+ 240 CONTINUE
+C .......... FORM P ..........
+ F = 0.0D0
+C
+ DO 245 J = 1, L
+ E(J) = E(J) / H
+ F = F + E(J) * D(J)
+ 245 CONTINUE
+C
+ HH = F / (H + H)
+C .......... FORM Q ..........
+ DO 250 J = 1, L
+ 250 E(J) = E(J) - HH * D(J)
+C .......... FORM REDUCED A ..........
+ DO 280 J = 1, L
+ F = D(J)
+ G = E(J)
+C
+ DO 260 K = J, L
+ 260 Z(K,J) = Z(K,J) - F * E(K) - G * D(K)
+C
+ D(J) = Z(L,J)
+ Z(I,J) = 0.0D0
+ 280 CONTINUE
+C
+ 290 D(I) = H
+ 300 CONTINUE
+C .......... ACCUMULATION OF TRANSFORMATION MATRICES ..........
+ DO 500 I = 2, N
+ L = I - 1
+ Z(N,L) = Z(L,L)
+ Z(L,L) = 1.0D0
+ H = D(I)
+ IF (H .EQ. 0.0D0) GO TO 380
+C
+ DO 330 K = 1, L
+ 330 D(K) = Z(K,I) / H
+C
+ DO 360 J = 1, L
+ G = 0.0D0
+C
+ DO 340 K = 1, L
+ 340 G = G + Z(K,I) * Z(K,J)
+C
+ DO 360 K = 1, L
+ Z(K,J) = Z(K,J) - G * D(K)
+ 360 CONTINUE
+C
+ 380 DO 400 K = 1, L
+ 400 Z(K,I) = 0.0D0
+C
+ 500 CONTINUE
+C
+ 510 DO 520 I = 1, N
+ D(I) = Z(N,I)
+ Z(N,I) = 0.0D0
+ 520 CONTINUE
+C
+ Z(N,N) = 1.0D0
+ E(1) = 0.0D0
+ RETURN
+ END
+
+
+
+ DOUBLE PRECISION FUNCTION EPSLO9(X)
+ DOUBLE PRECISION X
+C
+C ESTIMATE UNIT ROUNDOFF IN QUANTITIES OF SIZE X.
+C
+ DOUBLE PRECISION A,B,C,EPS
+C
+C THIS PROGRAM SHOULD FUNCTION PROPERLY ON ALL SYSTEMS
+C SATISFYING THE FOLLOWING TWO ASSUMPTIONS,
+C 1. THE BASE USED IN REPRESENTING FLOATING POINT
+C NUMBERS IS NOT A POWER OF THREE.
+C 2. THE QUANTITY A IN STATEMENT 10 IS REPRESENTED TO
+C THE ACCURACY USED IN FLOATING POINT VARIABLES
+C THAT ARE STORED IN MEMORY.
+C THE STATEMENT NUMBER 10 AND THE GO TO 10 ARE INTENDED TO
+C FORCE OPTIMIZING COMPILERS TO GENERATE CODE SATISFYING
+C ASSUMPTION 2.
+C UNDER THESE ASSUMPTIONS, IT SHOULD BE TRUE THAT,
+C A IS NOT EXACTLY EQUAL TO FOUR-THIRDS,
+C B HAS A ZERO FOR ITS LAST BIT OR DIGIT,
+C C IS NOT EXACTLY EQUAL TO ONE,
+C EPS MEASURES THE SEPARATION OF 1.0 FROM
+C THE NEXT LARGER FLOATING POINT NUMBER.
+C THE DEVELOPERS OF EISPACK WOULD APPRECIATE BEING INFORMED
+C ABOUT ANY SYSTEMS WHERE THESE ASSUMPTIONS DO NOT HOLD.
+C
+C THIS VERSION DATED 4/6/83.
+C
+ A = 4.0D0/3.0D0
+ 10 B = A - 1.0D0
+ C = B + B + B
+ EPS = DABS(C-1.0D0)
+ IF (EPS .EQ. 0.0D0) GO TO 10
+ EPSLO9 = EPS*DABS(X)
+ RETURN
+ END
+
+
+ DOUBLE PRECISION FUNCTION PYTHA9(A,B)
+ DOUBLE PRECISION A,B
+C
+C FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW
+C
+ DOUBLE PRECISION P,R,S,T,U
+ P = DMAX1(DABS(A),DABS(B))
+ IF (P .EQ. 0.0D0) GO TO 20
+ R = (DMIN1(DABS(A),DABS(B))/P)**2
+ 10 CONTINUE
+ T = 4.0D0 + R
+ IF (T .EQ. 4.0D0) GO TO 20
+ S = R/T
+ U = 1.0D0 + 2.0D0*S
+ P = U*P
+ R = (S/U)**2 * R
+ GO TO 10
+ 20 PYTHA9 = P
+ RETURN
+ END
+
+
+
diff --git a/src/vgam.f b/src/vgam.f
new file mode 100644
index 0000000..58ef965
--- /dev/null
+++ b/src/vgam.f
@@ -0,0 +1,1341 @@
+ subroutine vbvs(nfiumb4,knot,j1l0o1,nk,p3vlea,ocaxi1,ikscn4,
+ &lku8xq)
+ integer nfiumb4, nk, ikscn4, lku8xq
+ double precision knot(nk+4), j1l0o1(nk,lku8xq), p3vlea(nfiumb4),
+ &ocaxi1(nfiumb4,lku8xq)
+ double precision xvalue, bvalue
+ integer w3gohz, d9rjek, def4wn
+ def4wn = 4
+ do 23000 d9rjek=1,lku8xq
+ do 23002 w3gohz=1,nfiumb4
+ xvalue = p3vlea(w3gohz)
+ ocaxi1(w3gohz,d9rjek) = bvalue(knot, j1l0o1(1,d9rjek), nk, def4wn,
+ & xvalue, ikscn4)
+23002 continue
+23000 continue
+ return
+ end
+ subroutine j3navf(nkzg2p, nk, lku8xq, a51l0o, l6xrjt, nf8brk)
+ implicit logical (a-z)
+ integer nk, lku8xq, a51l0o
+ double precision nkzg2p(a51l0o,nk*lku8xq), l6xrjt(lku8xq), nf8brk(
+ &nk,4)
+ integer w3gohz, d9rjek
+ do 23004 w3gohz=1,nk
+ do 23006 d9rjek=1,lku8xq
+ nkzg2p(a51l0o,(w3gohz-1)*lku8xq+d9rjek) = nkzg2p(a51l0o,(w3gohz-1)
+ &*lku8xq+d9rjek) + l6xrjt(d9rjek) * nf8brk(w3gohz,1)
+23006 continue
+23004 continue
+ do 23008 w3gohz=1,(nk-1)
+ do 23010 d9rjek=1,lku8xq
+ nkzg2p(a51l0o-lku8xq,(w3gohz-0)*lku8xq+d9rjek) = nkzg2p(a51l0o-
+ &lku8xq,(w3gohz-0)*lku8xq+d9rjek) + l6xrjt(d9rjek) * nf8brk(w3gohz,
+ &2)
+23010 continue
+23008 continue
+ do 23012 w3gohz=1,(nk-2)
+ do 23014 d9rjek=1,lku8xq
+ nkzg2p(a51l0o-2*lku8xq,(w3gohz+1)*lku8xq+d9rjek) = nkzg2p(a51l0o-
+ &2*lku8xq,(w3gohz+1)*lku8xq+d9rjek) + l6xrjt(d9rjek) * nf8brk(
+ &w3gohz,3)
+23014 continue
+23012 continue
+ do 23016 w3gohz=1,(nk-3)
+ do 23018 d9rjek=1,lku8xq
+ nkzg2p(a51l0o-3*lku8xq,(w3gohz+2)*lku8xq+d9rjek) = nkzg2p(a51l0o-
+ &3*lku8xq,(w3gohz+2)*lku8xq+d9rjek) + l6xrjt(d9rjek) * nf8brk(
+ &w3gohz,4)
+23018 continue
+23016 continue
+ return
+ end
+ subroutine wgy5ta(p1rifj, s17te9, nbbad, uq9jtc, nkzg2p, w8xfic,
+ &evgfu3, anke8p, lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l,
+ &vvl1li)
+ implicit logical (a-z)
+ integer p1rifj, s17te9, nbbad, evgfu3, anke8p, lku8xq, a51l0o,
+ &xhe4cg, nfiumb4, nk, zqve1l(1), vvl1li(1)
+ double precision uq9jtc(4,1), nkzg2p(a51l0o, nk*lku8xq), w8xfic(
+ &nfiumb4,xhe4cg)
+ double precision temp
+ integer xi1mqb, j3ymns, bcol, brow, y9eilo, pazyk8
+ bcol = s17te9 + nbbad
+ brow = s17te9
+ do 23020 xi1mqb=1,xhe4cg
+ temp = w8xfic(p1rifj,xi1mqb) * uq9jtc(evgfu3,1) * uq9jtc(anke8p,1)
+ y9eilo = (brow-1)*lku8xq + zqve1l(xi1mqb)
+ pazyk8 = (bcol-1)*lku8xq + vvl1li(xi1mqb)
+ j3ymns = pazyk8 - y9eilo
+ nkzg2p(a51l0o-j3ymns, pazyk8) = nkzg2p(a51l0o-j3ymns, pazyk8) +
+ &temp
+ if(.not.(nbbad .gt. 0 .and. vvl1li(xi1mqb) .ne. zqve1l(xi1mqb)))
+ &goto 23022
+ y9eilo = (brow-1)*lku8xq + vvl1li(xi1mqb)
+ pazyk8 = (bcol-1)*lku8xq + zqve1l(xi1mqb)
+ j3ymns = pazyk8 - y9eilo
+ nkzg2p(a51l0o-j3ymns, pazyk8) = nkzg2p(a51l0o-j3ymns, pazyk8) +
+ &temp
+23022 continue
+23020 continue
+ return
+ end
+ subroutine vsplin(p3vlea,lj4dph,w8xfic,nfiumb4,onyz6j, nk,a51l0o,
+ &lku8xq,xhe4cg, zqve1l,vvl1li, zxao0o, l6xrjt, fjg0qv, w5poyv,
+ &j1l0o1, nkzg2p, cy3dhl, vb81l0, l8dgox, y6jcvk, bmb, rjcq9o, dof,
+ &sz6ohy, la5dcf, e5jrsg)
+ implicit logical (a-z)
+ integer nfiumb4, nk, a51l0o, lku8xq, xhe4cg, zqve1l(1), vvl1li(1),
+ & fjg0qv, la5dcf, e5jrsg
+ integer y6jcvk
+ double precision p3vlea(nfiumb4), lj4dph(nfiumb4,lku8xq), w8xfic(
+ &nfiumb4,xhe4cg), onyz6j(nk+4), zxao0o(lku8xq,lku8xq,16), l6xrjt(
+ &lku8xq), w5poyv(nfiumb4,lku8xq), j1l0o1(nk,lku8xq), nkzg2p(a51l0o,
+ &nk*lku8xq), cy3dhl(lku8xq,nk)
+ double precision vb81l0(nk,lku8xq), l8dgox(e5jrsg,lku8xq), bmb(
+ &lku8xq,lku8xq), rjcq9o(nfiumb4,lku8xq), dof(lku8xq), sz6ohy(1)
+ integer d9rjek, w3gohz, m5xudf, i6ndbu, xi1mqb, rlhz2a
+ integer yc1ezl, mk2vyr, thfyl1, hts1gp(3), ispar, opf6cv
+ double precision kqoy6w, uq9jtc(4,1), z2djpt(16), egoxa3, n9peut,
+ &bt9lgm, v2isnf, fjo2dy, fpcb2n(3)
+ do 23024 d9rjek=1,lku8xq
+ if(.not.(l6xrjt(d9rjek) .eq. 0.0d0))goto 23026
+ ispar=0
+ rlhz2a=3
+ goto 23027
+23026 continue
+ ispar=1
+ rlhz2a=1
+23027 continue
+ if(.not.((lku8xq .eq. 1) .or. (xhe4cg.eq.lku8xq) .or. (ispar .eq.
+ &0)))goto 23028
+ mk2vyr = 4
+ bt9lgm = 1.50d0
+ v2isnf = -0.25d0
+ thfyl1 = 1
+ fjo2dy=0.001d0
+ hts1gp(1) = rlhz2a
+ hts1gp(2) = ispar
+ hts1gp(3) = 300
+ fpcb2n(1) = v2isnf
+ fpcb2n(2) = bt9lgm
+ fpcb2n(3) = fjo2dy
+ yc1ezl=0
+ if(.not.((lku8xq .eq. 1) .or. (xhe4cg.eq.lku8xq)))goto 23030
+ do 23032 w3gohz=1,nfiumb4
+ lj4dph(w3gohz,d9rjek) = lj4dph(w3gohz,d9rjek) / w8xfic(w3gohz,
+ &d9rjek)
+23032 continue
+ call nvhb7f(egoxa3, dof(d9rjek), p3vlea, lj4dph(1,d9rjek), w8xfic(
+ &1,d9rjek), nfiumb4,nk, onyz6j,j1l0o1(1,d9rjek), w5poyv(1,d9rjek),
+ &rjcq9o(1,d9rjek), n9peut,l6xrjt(d9rjek),fpcb2n, sz6ohy, yc1ezl,
+ &hts1gp, mk2vyr,thfyl1,la5dcf)
+ if(.not.(la5dcf .ne. 0))goto 23034
+ return
+23034 continue
+ do 23036 w3gohz=1,nfiumb4
+ w8xfic(w3gohz,d9rjek) = w8xfic(w3gohz,d9rjek) * w8xfic(w3gohz,
+ &d9rjek)
+23036 continue
+ if(.not.(y6jcvk .ne. 0))goto 23038
+ do 23040 w3gohz=1,nfiumb4
+ l8dgox(w3gohz,d9rjek) = rjcq9o(w3gohz,d9rjek) / w8xfic(w3gohz,
+ &d9rjek)
+23040 continue
+23038 continue
+ goto 23031
+23030 continue
+ call nvhb7f(egoxa3, dof(d9rjek), p3vlea, cy3dhl(1,d9rjek), w8xfic(
+ &1,d9rjek), nfiumb4,nk, onyz6j,j1l0o1(1,d9rjek),w5poyv(1,d9rjek),
+ &rjcq9o(1,d9rjek), n9peut,l6xrjt(d9rjek),fpcb2n, sz6ohy, yc1ezl,
+ &hts1gp, mk2vyr,thfyl1,la5dcf)
+ if(.not.(la5dcf .ne. 0))goto 23042
+ return
+23042 continue
+ do 23044 w3gohz=1,nfiumb4
+ w8xfic(w3gohz,d9rjek) = w8xfic(w3gohz,d9rjek) * w8xfic(w3gohz,
+ &d9rjek)
+23044 continue
+23031 continue
+ if(.not.(la5dcf .ne. 0))goto 23046
+ return
+23046 continue
+23028 continue
+23024 continue
+ if(.not.((lku8xq .eq. 1) .or. (xhe4cg .eq. lku8xq)))goto 23048
+ return
+23048 continue
+ do 23050 w3gohz=1,nk
+ do 23052 d9rjek=1,lku8xq
+ cy3dhl(d9rjek,w3gohz)=0.0d0
+23052 continue
+23050 continue
+ do 23054 w3gohz=1,(nk*lku8xq)
+ do 23056 d9rjek=1,a51l0o
+ nkzg2p(d9rjek,w3gohz) = 0.0d0
+23056 continue
+23054 continue
+ kqoy6w = 0.1d-9
+ do 23058 w3gohz=1,nfiumb4
+ call vinterv(onyz6j(1),(nk+1),p3vlea(w3gohz),m5xudf,i6ndbu)
+ if(.not.(i6ndbu .eq. 1))goto 23060
+ if(.not.(p3vlea(w3gohz) .le. (onyz6j(m5xudf)+kqoy6w)))goto 23062
+ m5xudf=m5xudf-1
+ goto 23063
+23062 continue
+ return
+23063 continue
+23060 continue
+ call vbsplvd(onyz6j,4,p3vlea(w3gohz),m5xudf,z2djpt,uq9jtc,1)
+ d9rjek= m5xudf-4+1
+ do 23064 xi1mqb=1,lku8xq
+ cy3dhl(xi1mqb,d9rjek)=cy3dhl(xi1mqb,d9rjek) + lj4dph(w3gohz,
+ &xi1mqb) * uq9jtc(1,1)
+23064 continue
+ call wgy5ta(w3gohz, d9rjek, 0, uq9jtc, nkzg2p, w8xfic, 1, 1,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 1, uq9jtc, nkzg2p, w8xfic, 1, 2,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 2, uq9jtc, nkzg2p, w8xfic, 1, 3,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 3, uq9jtc, nkzg2p, w8xfic, 1, 4,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ d9rjek= m5xudf-4+2
+ do 23066 xi1mqb=1,lku8xq
+ cy3dhl(xi1mqb,d9rjek)=cy3dhl(xi1mqb,d9rjek) + lj4dph(w3gohz,
+ &xi1mqb) * uq9jtc(2,1)
+23066 continue
+ call wgy5ta(w3gohz, d9rjek, 0, uq9jtc, nkzg2p, w8xfic, 2, 2,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 1, uq9jtc, nkzg2p, w8xfic, 2, 3,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 2, uq9jtc, nkzg2p, w8xfic, 2, 4,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ d9rjek= m5xudf-4+3
+ do 23068 xi1mqb=1,lku8xq
+ cy3dhl(xi1mqb,d9rjek)=cy3dhl(xi1mqb,d9rjek) + lj4dph(w3gohz,
+ &xi1mqb) * uq9jtc(3,1)
+23068 continue
+ call wgy5ta(w3gohz, d9rjek, 0, uq9jtc, nkzg2p, w8xfic, 3, 3,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ call wgy5ta(w3gohz, d9rjek, 1, uq9jtc, nkzg2p, w8xfic, 3, 4,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+ d9rjek= m5xudf-4+4
+ do 23070 xi1mqb=1,lku8xq
+ cy3dhl(xi1mqb,d9rjek)=cy3dhl(xi1mqb,d9rjek) + lj4dph(w3gohz,
+ &xi1mqb) * uq9jtc(4,1)
+23070 continue
+ call wgy5ta(w3gohz, d9rjek, 0, uq9jtc, nkzg2p, w8xfic, 4, 4,
+ &lku8xq, a51l0o, xhe4cg, nfiumb4, nk, zqve1l, vvl1li)
+23058 continue
+ call poqy8c(vb81l0(1,1), vb81l0(1,2), vb81l0(1,3), vb81l0(1,4),
+ &onyz6j, nk)
+ call j3navf(nkzg2p, nk, lku8xq, a51l0o, l6xrjt, vb81l0)
+ call vdpbfa7(nkzg2p, a51l0o, nk*lku8xq, a51l0o-1, fjg0qv, vb81l0)
+ if(.not.(fjg0qv .ne. 0))goto 23072
+ return
+23072 continue
+ call vdpbsl7(nkzg2p, a51l0o, nk*lku8xq, a51l0o-1, cy3dhl, vb81l0)
+ opf6cv = 0
+ do 23074 w3gohz=1,nk
+ do 23076 d9rjek=1,lku8xq
+ opf6cv = opf6cv + 1
+ j1l0o1(w3gohz,d9rjek) = cy3dhl(d9rjek,w3gohz)
+23076 continue
+23074 continue
+ call ye3zvn(onyz6j, p3vlea, j1l0o1, nfiumb4, nk, lku8xq, w5poyv)
+ call gyzcj5(nkzg2p, nkzg2p, vb81l0, zxao0o, a51l0o-1, nk*lku8xq)
+ call jiyw4z(nkzg2p, p3vlea, onyz6j, l8dgox, a51l0o, nfiumb4, nk,
+ &lku8xq, y6jcvk, bmb, zxao0o, w8xfic, rjcq9o, xhe4cg, zqve1l,
+ &vvl1li, e5jrsg)
+ return
+ end
+ subroutine ye3zvn(knot, p3vlea, j1l0o1, nfiumb4, nk, lku8xq,
+ &w5poyv)
+ implicit logical (a-z)
+ integer nfiumb4, nk, lku8xq
+ double precision knot(nk+4), p3vlea(nfiumb4), j1l0o1(nk,lku8xq),
+ &w5poyv(nfiumb4,lku8xq)
+ double precision xvalue, bvalue
+ integer w3gohz, d9rjek
+ do 23078 w3gohz=1,nfiumb4
+ xvalue = p3vlea(w3gohz)
+ do 23080 d9rjek=1,lku8xq
+ w5poyv(w3gohz,d9rjek) = bvalue(knot, j1l0o1(1,d9rjek), nk, 4,
+ &xvalue, 0)
+23080 continue
+23078 continue
+ return
+ end
+ subroutine vsuff9(nfiumb4,uxs1iq,ynk9ah, p3vlea,jmwo0z,w8xfic,
+ &qxy6aj,bz3pyo,ax1cdp,f0pzmy,lg3zhr, lku8xq, xhe4cg, zkjqhi,
+ &zqve1l, vvl1li, bgu6fw, ve2mqu, ifo4ew, du8jbv, wj5shg, x6rito,
+ &c4uxow)
+ implicit logical (a-z)
+ integer nfiumb4, uxs1iq, ynk9ah(nfiumb4), lku8xq, xhe4cg, zkjqhi,
+ &zqve1l(1),vvl1li(1), du8jbv, wj5shg, x6rito, c4uxow
+ double precision p3vlea(nfiumb4), jmwo0z(nfiumb4,lku8xq), w8xfic(
+ &nfiumb4,xhe4cg), qxy6aj(uxs1iq), bz3pyo(uxs1iq,lku8xq), ax1cdp(
+ &uxs1iq,1), f0pzmy(zkjqhi,uxs1iq), lg3zhr(uxs1iq,lku8xq), bgu6fw(
+ &lku8xq,lku8xq+1), ve2mqu(du8jbv,du8jbv+1), ifo4ew(lku8xq,du8jbv)
+ integer w3gohz, d9rjek, nd6mep, xi1mqb, i1nkrb, j0qwtz
+ integer uxzze7
+ uxzze7 = 1
+ if(.not.(wj5shg .eq. 1))goto 23082
+ if(.not.((zkjqhi .ne. xhe4cg) .or. (du8jbv .ne. lku8xq)))goto 2308
+ &4
+ c4uxow = 0
+ return
+23084 continue
+23082 continue
+ j0qwtz = lku8xq * (lku8xq+1) / 2
+ if(.not.(xhe4cg .gt. j0qwtz))goto 23086
+23086 continue
+ call qh4ulb(zqve1l, vvl1li, lku8xq)
+ do 23088 w3gohz=1,nfiumb4
+ qxy6aj(ynk9ah(w3gohz))=p3vlea(w3gohz)
+23088 continue
+ do 23090 d9rjek=1,lku8xq
+ do 23092 w3gohz=1,uxs1iq
+ lg3zhr(w3gohz,d9rjek) = 0.0d0
+23092 continue
+23090 continue
+ do 23094 d9rjek=1,xhe4cg
+ do 23096 w3gohz=1,uxs1iq
+ ax1cdp(w3gohz,d9rjek) = 0.0d0
+23096 continue
+23094 continue
+ if(.not.(xhe4cg .ne. j0qwtz))goto 23098
+ do 23100 nd6mep=1,lku8xq
+ do 23102 d9rjek=1,lku8xq
+ bgu6fw(d9rjek,nd6mep) = 0.0d0
+23102 continue
+23100 continue
+23098 continue
+ do 23104 w3gohz=1,nfiumb4
+ do 23106 d9rjek=1,xhe4cg
+ bgu6fw(zqve1l(d9rjek),vvl1li(d9rjek)) = w8xfic(w3gohz,d9rjek)
+ bgu6fw(vvl1li(d9rjek),zqve1l(d9rjek)) = bgu6fw(zqve1l(d9rjek),
+ &vvl1li(d9rjek))
+23106 continue
+ do 23108 d9rjek=1,lku8xq
+ do 23110 nd6mep=1,lku8xq
+ lg3zhr(ynk9ah(w3gohz),d9rjek) = lg3zhr(ynk9ah(w3gohz),d9rjek) +
+ &bgu6fw(d9rjek,nd6mep)*jmwo0z(w3gohz,nd6mep)
+23110 continue
+23108 continue
+ do 23112 d9rjek=1,xhe4cg
+ ax1cdp(ynk9ah(w3gohz),d9rjek) = ax1cdp(ynk9ah(w3gohz),d9rjek) +
+ &w8xfic(w3gohz,d9rjek)
+23112 continue
+23104 continue
+ c4uxow = 1
+ if(.not.(wj5shg .eq. 1))goto 23114
+ do 23116 w3gohz=1,uxs1iq
+ do 23118 d9rjek=1,xhe4cg
+ bgu6fw(zqve1l(d9rjek),vvl1li(d9rjek)) = ax1cdp(w3gohz,d9rjek)
+ bgu6fw(vvl1li(d9rjek),zqve1l(d9rjek)) = bgu6fw(zqve1l(d9rjek),
+ &vvl1li(d9rjek))
+23118 continue
+ do 23120 d9rjek=1,lku8xq
+ bgu6fw(d9rjek,lku8xq+1)=lg3zhr(w3gohz,d9rjek)
+23120 continue
+ call vcholf(bgu6fw, bgu6fw(1,lku8xq+1), lku8xq, c4uxow, uxzze7)
+ if(.not.(c4uxow .ne. 1))goto 23122
+ return
+23122 continue
+ if(.not.(x6rito .ne. 0))goto 23124
+ do 23126 d9rjek=1,xhe4cg
+ f0pzmy(d9rjek,w3gohz) = bgu6fw(zqve1l(d9rjek),vvl1li(d9rjek))
+23126 continue
+23124 continue
+ do 23128 d9rjek=1,lku8xq
+ bz3pyo(w3gohz,d9rjek)=bgu6fw(d9rjek,lku8xq+1)
+23128 continue
+23116 continue
+ goto 23115
+23114 continue
+ if(.not.(xhe4cg .ne. j0qwtz))goto 23130
+ do 23132 d9rjek=1,lku8xq
+ do 23134 nd6mep=1,lku8xq
+ bgu6fw(d9rjek,nd6mep) = 0.0d0
+23134 continue
+23132 continue
+23130 continue
+ do 23136 w3gohz=1,uxs1iq
+ call qh4ulb(zqve1l, vvl1li, lku8xq)
+ do 23138 d9rjek=1,xhe4cg
+ bgu6fw(zqve1l(d9rjek),vvl1li(d9rjek)) = ax1cdp(w3gohz,d9rjek)
+ bgu6fw(vvl1li(d9rjek),zqve1l(d9rjek)) = bgu6fw(zqve1l(d9rjek),
+ &vvl1li(d9rjek))
+23138 continue
+ do 23140 d9rjek=1,lku8xq
+ bgu6fw(d9rjek,lku8xq+1)=lg3zhr(w3gohz,d9rjek)
+23140 continue
+ do 23142 d9rjek=1,du8jbv
+ do 23144 nd6mep=d9rjek,du8jbv
+ ve2mqu(d9rjek,nd6mep) = 0.0d0
+ do 23146 xi1mqb=1,lku8xq
+ do 23148 i1nkrb=1,lku8xq
+ ve2mqu(d9rjek,nd6mep) = ve2mqu(d9rjek,nd6mep) + ifo4ew(xi1mqb,
+ &d9rjek) * bgu6fw(xi1mqb,i1nkrb) * ifo4ew(i1nkrb,nd6mep)
+23148 continue
+23146 continue
+23144 continue
+23142 continue
+ call qh4ulb(zqve1l, vvl1li, du8jbv)
+ do 23150 d9rjek=1,zkjqhi
+ ax1cdp(w3gohz,d9rjek) = ve2mqu(zqve1l(d9rjek),vvl1li(d9rjek))
+23150 continue
+ do 23152 d9rjek=1,du8jbv
+ ve2mqu(d9rjek,du8jbv+1) = 0.0d0
+ do 23154 xi1mqb=1,lku8xq
+ ve2mqu(d9rjek,du8jbv+1) = ve2mqu(d9rjek,du8jbv+1) + ifo4ew(xi1mqb,
+ &d9rjek) * bgu6fw(xi1mqb,lku8xq+1)
+23154 continue
+23152 continue
+ do 23156 d9rjek=1,du8jbv
+ lg3zhr(w3gohz,d9rjek) = ve2mqu(d9rjek,du8jbv+1)
+23156 continue
+ call vcholf(ve2mqu, ve2mqu(1,du8jbv+1), du8jbv, c4uxow, uxzze7)
+ if(.not.(c4uxow .ne. 1))goto 23158
+ return
+23158 continue
+ if(.not.(x6rito .ne. 0))goto 23160
+ do 23162 d9rjek=1,zkjqhi
+ f0pzmy(d9rjek,w3gohz) = ve2mqu(zqve1l(d9rjek),vvl1li(d9rjek))
+23162 continue
+23160 continue
+ do 23164 d9rjek=1,du8jbv
+ bz3pyo(w3gohz,d9rjek) = ve2mqu(d9rjek,du8jbv+1)
+23164 continue
+23136 continue
+23115 continue
+ return
+ end
+ subroutine jiyw4z(n5fkml, p3vlea, onyz6j, svpr1i, a51l0o, nfiumb4,
+ & nk, lku8xq, ifvar, bmb, bgu6fw, w8xfic, rjcq9o, xhe4cg, zqve1l,
+ &vvl1li, e5jrsg)
+ implicit logical (a-z)
+ integer a51l0o, nfiumb4, nk, lku8xq, ifvar, xhe4cg, zqve1l(1),
+ &vvl1li(1), e5jrsg
+ double precision n5fkml(a51l0o,nk*lku8xq), p3vlea(nfiumb4),
+ &onyz6j(nk+4), svpr1i(e5jrsg,lku8xq), bmb(lku8xq,lku8xq), bgu6fw(
+ &lku8xq,lku8xq), w8xfic(nfiumb4,xhe4cg), rjcq9o(nfiumb4,lku8xq)
+ integer w3gohz, d9rjek, nd6mep, m5xudf, i6ndbu, xi1mqb, i1nkrb
+ double precision kqoy6w, z2djpt(16), uq9jtc(4,1)
+ if(.not.(ifvar .ne. 0))goto 23166
+ do 23168 nd6mep=1,lku8xq
+ do 23170 w3gohz=1,nfiumb4
+ svpr1i(w3gohz,nd6mep) = 0.0d0
+23170 continue
+23168 continue
+23166 continue
+ kqoy6w = 0.10d-9
+ call qh4ulb(zqve1l, vvl1li, lku8xq)
+ do 23172 w3gohz=1,nfiumb4
+ do 23174 d9rjek=1,lku8xq
+ do 23176 nd6mep=1,lku8xq
+ bmb(d9rjek,nd6mep)=0.0d0
+23176 continue
+23174 continue
+ call vinterv(onyz6j(1), (nk+1), p3vlea(w3gohz), m5xudf, i6ndbu)
+ if(.not.(i6ndbu.eq. 1))goto 23178
+ if(.not.(p3vlea(w3gohz) .le. (onyz6j(m5xudf)+kqoy6w)))goto 23180
+ m5xudf=m5xudf-1
+ goto 23181
+23180 continue
+ return
+23181 continue
+23178 continue
+ call vbsplvd(onyz6j, 4, p3vlea(w3gohz), m5xudf, z2djpt, uq9jtc, 1)
+ d9rjek= m5xudf-4+1
+ do 23182 xi1mqb=d9rjek,d9rjek+3
+ call vsel(xi1mqb, xi1mqb, lku8xq, nk, a51l0o, n5fkml, bgu6fw)
+ call bf7qci(lku8xq, uq9jtc(xi1mqb-d9rjek+1,1) * uq9jtc(xi1mqb-
+ &d9rjek+1,1), bgu6fw, bmb)
+23182 continue
+ do 23184 xi1mqb=d9rjek,d9rjek+3
+ do 23186 i1nkrb=xi1mqb+1,d9rjek+3
+ call vsel(xi1mqb, i1nkrb, lku8xq, nk, a51l0o, n5fkml, bgu6fw)
+ call bf7qci(lku8xq, 2.0d0 * uq9jtc(xi1mqb-d9rjek+1,1) * uq9jtc(
+ &i1nkrb-d9rjek+1,1), bgu6fw, bmb)
+23186 continue
+23184 continue
+ if(.not.(ifvar .ne. 0))goto 23188
+ do 23190 d9rjek=1,lku8xq
+ svpr1i(w3gohz,d9rjek) = bmb(d9rjek,d9rjek)
+23190 continue
+23188 continue
+ call dp2zwv(bmb, w8xfic, bgu6fw, rjcq9o, lku8xq, nfiumb4, xhe4cg,
+ &zqve1l, vvl1li, w3gohz)
+23172 continue
+ return
+ end
+ subroutine bf7qci(lku8xq, uq9jtc, bgu6fw, bmb)
+ implicit logical (a-z)
+ integer lku8xq
+ double precision uq9jtc, bgu6fw(lku8xq,lku8xq), bmb(lku8xq,lku8xq)
+ integer d9rjek, nd6mep
+ do 23192 d9rjek=1,lku8xq
+ do 23194 nd6mep=1,lku8xq
+ bgu6fw(d9rjek,nd6mep) = bgu6fw(d9rjek,nd6mep) * uq9jtc
+23194 continue
+23192 continue
+ do 23196 d9rjek=1,lku8xq
+ do 23198 nd6mep=1,lku8xq
+ bmb(nd6mep,d9rjek) = bmb(nd6mep,d9rjek) + bgu6fw(nd6mep,d9rjek)
+23198 continue
+23196 continue
+ return
+ end
+ subroutine vsel(s, t, lku8xq, nk, a51l0o, minv, bgu6fw)
+ implicit logical (a-z)
+ integer s, t, lku8xq, nk, a51l0o
+ double precision minv(a51l0o,nk*lku8xq), bgu6fw(lku8xq,lku8xq)
+ integer w3gohz, d9rjek, y9eilo, pazyk8
+ do 23200 w3gohz=1,lku8xq
+ do 23202 d9rjek=1,lku8xq
+ bgu6fw(w3gohz,d9rjek) = 0.0d0
+23202 continue
+23200 continue
+ if(.not.(s .ne. t))goto 23204
+ do 23206 w3gohz=1,lku8xq
+ y9eilo = (s-1)*lku8xq + w3gohz
+ do 23208 d9rjek=1,lku8xq
+ pazyk8 = (t-1)*lku8xq + d9rjek
+ bgu6fw(w3gohz,d9rjek) = minv(a51l0o-(pazyk8-y9eilo), pazyk8)
+23208 continue
+23206 continue
+ goto 23205
+23204 continue
+ do 23210 w3gohz=1,lku8xq
+ y9eilo = (s-1)*lku8xq + w3gohz
+ do 23212 d9rjek=w3gohz,lku8xq
+ pazyk8 = (t-1)*lku8xq + d9rjek
+ bgu6fw(w3gohz,d9rjek) = minv(a51l0o-(pazyk8-y9eilo), pazyk8)
+23212 continue
+23210 continue
+ do 23214 w3gohz=1,lku8xq
+ do 23216 d9rjek=w3gohz+1,lku8xq
+ bgu6fw(d9rjek,w3gohz) = bgu6fw(w3gohz,d9rjek)
+23216 continue
+23214 continue
+23205 continue
+ return
+ end
+ subroutine dp2zwv(bmb, w8xfic, bgu6fw, rjcq9o, lku8xq, nfiumb4,
+ &xhe4cg, zqve1l, vvl1li, p1rifj)
+ implicit logical (a-z)
+ integer lku8xq, nfiumb4, xhe4cg, zqve1l(1), vvl1li(1), p1rifj
+ double precision bmb(lku8xq,lku8xq), w8xfic(nfiumb4,xhe4cg),
+ &bgu6fw(lku8xq,lku8xq), rjcq9o(nfiumb4,lku8xq)
+ double precision qnk4zf, temp
+ integer d9rjek, nd6mep, xi1mqb, i1nkrb
+ do 23218 i1nkrb=1,lku8xq
+ do 23220 d9rjek=1,lku8xq
+ do 23222 nd6mep=1,lku8xq
+ bgu6fw(nd6mep,d9rjek) = 0.0d0
+23222 continue
+23220 continue
+ do 23224 xi1mqb=1,xhe4cg
+ temp = w8xfic(p1rifj,xi1mqb)
+ bgu6fw(zqve1l(xi1mqb),vvl1li(xi1mqb)) = temp
+ bgu6fw(vvl1li(xi1mqb),zqve1l(xi1mqb)) = temp
+23224 continue
+ qnk4zf = 0.0d0
+ do 23226 d9rjek=1,lku8xq
+ qnk4zf = qnk4zf + bmb(i1nkrb,d9rjek) * bgu6fw(d9rjek,i1nkrb)
+23226 continue
+ rjcq9o(p1rifj,i1nkrb) = qnk4zf
+23218 continue
+ return
+ end
+ subroutine gyzcj5(n5fkml, jrxg6l, d, uu, lku8xq, nfiumb4)
+ implicit logical (a-z)
+ integer lku8xq, nfiumb4
+ double precision n5fkml(lku8xq+1,nfiumb4), jrxg6l(lku8xq+1,
+ &nfiumb4), d(nfiumb4), uu(lku8xq+1,lku8xq+1)
+ integer w3gohz, nd6mep, dibm1x, p4gdax, c3qxjo, j0izmn, myx3od
+ n5fkml(lku8xq+1,nfiumb4) = 1.0d0 / d(nfiumb4)
+ j0izmn = lku8xq+1
+ c3qxjo = nfiumb4+1 - j0izmn
+ do 23228 myx3od=c3qxjo,nfiumb4
+ do 23230 w3gohz=1,j0izmn
+ uu(w3gohz, myx3od-c3qxjo+1) = jrxg6l(w3gohz, myx3od)
+23230 continue
+23228 continue
+ w3gohz = nfiumb4-1
+23232 if(.not.(w3gohz.ge.1))goto 23234
+ if(.not.(lku8xq .lt. nfiumb4-w3gohz))goto 23235
+ p4gdax = lku8xq
+ goto 23236
+23235 continue
+ p4gdax = nfiumb4-w3gohz
+23236 continue
+ dibm1x=1
+23237 if(.not.(dibm1x.le.p4gdax))goto 23239
+ n5fkml(-dibm1x+lku8xq+1,w3gohz+dibm1x) = 0.0d0
+ nd6mep=1
+23240 if(.not.(nd6mep.le.dibm1x))goto 23242
+ n5fkml(-dibm1x+lku8xq+1,w3gohz+dibm1x) = n5fkml(-dibm1x+lku8xq+1,
+ &w3gohz+dibm1x) - uu(-nd6mep+lku8xq+1,w3gohz+nd6mep -c3qxjo+1) *
+ &n5fkml(nd6mep-dibm1x+lku8xq+1,w3gohz+dibm1x)
+ nd6mep=nd6mep+1
+ goto 23240
+23242 continue
+23243 if(.not.(nd6mep.le.p4gdax))goto 23245
+ n5fkml(-dibm1x+lku8xq+1,w3gohz+dibm1x) = n5fkml(-dibm1x+lku8xq+1,
+ &w3gohz+dibm1x) - uu(-nd6mep+lku8xq+1,w3gohz+nd6mep -c3qxjo+1) *
+ &n5fkml(dibm1x-nd6mep+lku8xq+1,w3gohz+nd6mep)
+ nd6mep=nd6mep+1
+ goto 23243
+23245 continue
+ dibm1x=dibm1x+1
+ goto 23237
+23239 continue
+ n5fkml(lku8xq+1,w3gohz) = 1.0d0 / d(w3gohz)
+ dibm1x = 1
+23246 if(.not.(dibm1x.le.p4gdax))goto 23248
+ n5fkml(lku8xq+1,w3gohz) = n5fkml(lku8xq+1,w3gohz) - uu(-dibm1x+
+ &lku8xq+1,w3gohz+dibm1x -c3qxjo+1) * n5fkml(-dibm1x+lku8xq+1,
+ &w3gohz+dibm1x)
+ dibm1x=dibm1x+1
+ goto 23246
+23248 continue
+ if(.not.(w3gohz .eq. c3qxjo))goto 23249
+ c3qxjo = c3qxjo-1
+ if(.not.(c3qxjo .lt. 1))goto 23251
+ c3qxjo = 1
+ goto 23252
+23251 continue
+ myx3od=j0izmn-1
+23253 if(.not.(myx3od.ge.1))goto 23255
+ nd6mep=1
+23256 if(.not.(nd6mep.le.j0izmn))goto 23258
+ uu(nd6mep,myx3od+1) = uu(nd6mep,myx3od)
+ nd6mep=nd6mep+1
+ goto 23256
+23258 continue
+ myx3od=myx3od-1
+ goto 23253
+23255 continue
+ nd6mep=1
+23259 if(.not.(nd6mep.le.j0izmn))goto 23261
+ uu(nd6mep,1) = jrxg6l(nd6mep,c3qxjo)
+ nd6mep=nd6mep+1
+ goto 23259
+23261 continue
+23252 continue
+23249 continue
+ w3gohz = w3gohz-1
+ goto 23232
+23234 continue
+ return
+ end
+ subroutine ntju9b(bz4guf,jmwo0z,w8xfic, nfiumb4,lku8xq,ynk9ah,
+ &uxs1iq, l6xrjt,dof,smo,zo5jyl, s0, vy5hmo,yin,lj4dph,win, ykdc2t,
+ &phqco4, xhe4cg, la5dcf, a51l0o, fjg0qv, y6jcvk, vb81l0, j1l0o1,
+ &qc7zyb, jko0o1,zqve1l,vvl1li, bmb, rjcq9o, zxao0o, wj5shg,du8jbv,
+ &i83h1, ifo4ew, lq8reh, i0qvzl, jq8lra, kn7qya, vfd2pw, blq5vu,
+ &dfsom3)
+ implicit logical (a-z)
+ integer nfiumb4,lku8xq,ynk9ah(nfiumb4),uxs1iq, xhe4cg, la5dcf,
+ &a51l0o, fjg0qv, y6jcvk, jko0o1,zqve1l(1),vvl1li(1), wj5shg,
+ &du8jbv, i83h1(du8jbv*2)
+ double precision bz4guf(nfiumb4), jmwo0z(nfiumb4,lku8xq), w8xfic(
+ &nfiumb4,xhe4cg), l6xrjt(du8jbv), dof(du8jbv), smo(nfiumb4,du8jbv),
+ & zo5jyl(nfiumb4,du8jbv)
+ double precision s0(2*du8jbv, 2*du8jbv,2)
+ double precision ykdc2t(1), phqco4(1), vb81l0(1), j1l0o1(1),
+ &qc7zyb(jko0o1+4)
+ double precision vy5hmo(uxs1iq), yin(uxs1iq,lku8xq), lj4dph(
+ &uxs1iq,lku8xq), win(uxs1iq,1), bmb(1), rjcq9o(uxs1iq,du8jbv),
+ &zxao0o(lku8xq,lku8xq,16), ifo4ew(lku8xq,du8jbv)
+ double precision lq8reh(2*du8jbv), i0qvzl(2*du8jbv), jq8lra(
+ &uxs1iq,du8jbv), kn7qya(du8jbv,uxs1iq), vfd2pw(du8jbv,uxs1iq),
+ &blq5vu(uxs1iq*du8jbv), dfsom3(1)
+ integer ybfr6z
+ integer w3gohz, d9rjek, nd6mep, c4bdmu, o9ljyn, tvyd2b, zx1610,
+ &c4uxow
+ integer uxzze7
+ double precision kogeb2, tap0km, t7sbea
+ uxzze7 = 1
+ if(.not.(wj5shg .eq. 1))goto 23262
+ ybfr6z = xhe4cg
+ goto 23263
+23262 continue
+ ybfr6z = du8jbv*(du8jbv+1)/2
+23263 continue
+ call qh4ulb(zqve1l, vvl1li, lku8xq)
+ call vsuff9(nfiumb4,uxs1iq,ynk9ah, bz4guf,jmwo0z,w8xfic, vy5hmo,
+ &yin,win,dfsom3,lj4dph, lku8xq, xhe4cg, ybfr6z, zqve1l, vvl1li,
+ &zxao0o, zxao0o(1,1,3), ifo4ew, du8jbv, wj5shg, uxzze7, c4uxow)
+ if(.not.(c4uxow .ne. 1))goto 23264
+ return
+23264 continue
+ kogeb2 = vy5hmo(1)
+ tap0km = vy5hmo(uxs1iq)-vy5hmo(1)
+ do 23266 w3gohz=1,uxs1iq
+ vy5hmo(w3gohz) = (vy5hmo(w3gohz)-kogeb2)/tap0km
+23266 continue
+ a51l0o = 4*du8jbv
+ la5dcf = 0
+ do 23268 d9rjek=1,du8jbv
+ if(.not.(l6xrjt(d9rjek) .eq. 0.0d0))goto 23270
+ dof(d9rjek) = dof(d9rjek) + 1.0d0
+23270 continue
+23268 continue
+ call qh4ulb(zqve1l, vvl1li, du8jbv)
+ call vsplin(vy5hmo,lj4dph,win,uxs1iq,qc7zyb, jko0o1,a51l0o,du8jbv,
+ &ybfr6z, zqve1l,vvl1li, zxao0o, l6xrjt, fjg0qv, jq8lra, j1l0o1,
+ &phqco4(1), phqco4(1+jko0o1*du8jbv*a51l0o), vb81l0, zo5jyl, y6jcvk,
+ & bmb, rjcq9o, dof, ykdc2t, la5dcf, nfiumb4)
+ do 23272 d9rjek=1,du8jbv
+ dof(d9rjek) = -1.0d0
+ do 23274 w3gohz=1,uxs1iq
+ dof(d9rjek)=dof(d9rjek)+rjcq9o(w3gohz,d9rjek)
+23274 continue
+23272 continue
+ if(.not.(du8jbv .ge. 1))goto 23276
+ t7sbea = 1.0d-7
+ c4bdmu = uxs1iq*du8jbv
+ o9ljyn = 2*du8jbv
+ tvyd2b = 101
+ fjg0qv = 1
+ call kgevo5(vy5hmo, phqco4, uxs1iq, du8jbv)
+ call qh4ulb(zqve1l, vvl1li, du8jbv)
+ call mux17f(dfsom3, phqco4, du8jbv, o9ljyn, uxs1iq, zxao0o(1,1,1),
+ & zxao0o(1,1,2), zqve1l, vvl1li, ybfr6z, c4bdmu)
+ do 23278 nd6mep=1,o9ljyn
+ i83h1(nd6mep) = nd6mep
+23278 continue
+ call dhkt9w(phqco4,c4bdmu,c4bdmu,o9ljyn,i0qvzl,i83h1,ykdc2t,
+ &zx1610,t7sbea)
+ call qh4ulb(zqve1l, vvl1li, du8jbv)
+ call mux22f(dfsom3,jq8lra,kn7qya,ybfr6z,zqve1l,vvl1li,uxs1iq,
+ &du8jbv,zxao0o)
+ call vdqrsl(phqco4,c4bdmu,c4bdmu,zx1610,i0qvzl,kn7qya,ykdc2t(1),
+ &blq5vu,lq8reh, ykdc2t(1),vfd2pw,tvyd2b,fjg0qv)
+ call vbksf(dfsom3,vfd2pw,du8jbv,uxs1iq,zxao0o,zqve1l,vvl1li,
+ &ybfr6z)
+ if(.not.(y6jcvk .ne. 0))goto 23280
+ call vrinvf9(phqco4, c4bdmu, o9ljyn, c4uxow, s0(1,1,1), s0(1,1,2))
+ if(.not.(c4uxow .ne. 1))goto 23282
+ return
+23282 continue
+ do 23284 d9rjek=1,du8jbv
+ do 23286 w3gohz=1,uxs1iq
+ zo5jyl(w3gohz,d9rjek) = zo5jyl(w3gohz,d9rjek) - s0(d9rjek,d9rjek,
+ &1) - vy5hmo(w3gohz) * (2.0d0 * s0(d9rjek,d9rjek+du8jbv,1) +
+ &vy5hmo(w3gohz) * s0(d9rjek+du8jbv,d9rjek+du8jbv,1))
+23286 continue
+23284 continue
+23280 continue
+ goto 23277
+23276 continue
+ call rpfnk6(uxs1iq, vy5hmo, win, jq8lra, vfd2pw, zo5jyl, y6jcvk)
+23277 continue
+ do 23288 w3gohz=1,uxs1iq
+ do 23290 d9rjek=1,du8jbv
+ jq8lra(w3gohz,d9rjek) = jq8lra(w3gohz,d9rjek) - vfd2pw(d9rjek,
+ &w3gohz)
+23290 continue
+23288 continue
+ do 23292 d9rjek=1,du8jbv
+ call uwye7d(nfiumb4, uxs1iq, ynk9ah, jq8lra(1,d9rjek), smo(1,
+ &d9rjek))
+23292 continue
+ return
+ end
+ subroutine vbfa(d8gwha,n,lku8xq,gqai81, p3vlea,jmwo0z,w8xfic,
+ &l6xrjt,dof, ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g,go0l1q,s0, lq8reh,
+ &zo5jyl,h4fgoy, ioqzvb,i0qvzl, i83h1, xbig, jrxg6l, ifo4ew, ozuw3p,
+ & hwi2tb, nbd5rl, wj5shg, ykdc2t, wk2, zxao0o, phqco4, vb81l0, bmb,
+ & rjcq9o, mwk, t5vlzq, j1l0o1, qc7zyb, das4bx, vlni8d, jko0o1,
+ &mnh3up, fg3pxq)
+ implicit logical (a-z)
+ integer d8gwha, n, lku8xq, gqai81(15), ynk9ah(1),uxs1iq(1),vliac4(
+ &1), i83h1(1)
+ integer ozuw3p(1), hwi2tb(1), nbd5rl(1), wj5shg(1), vlni8d(1),
+ &jko0o1(1), mnh3up(1), fg3pxq(1)
+ double precision p3vlea(1),jmwo0z(1),w8xfic(1),l6xrjt(1),dof(1),
+ &vfd2pw(1),sazp9g(1), go0l1q(1), s0(lku8xq), lq8reh(1),zo5jyl(1),
+ &h4fgoy, ioqzvb(1),i0qvzl(1)
+ double precision xbig(1), jrxg6l(1), ifo4ew(1), ykdc2t(1), wk2(n,
+ &lku8xq,3), zxao0o(lku8xq,lku8xq,16), phqco4(1), vb81l0(1), bmb(1),
+ & rjcq9o(1), mwk(1), t5vlzq(1), j1l0o1(1), qc7zyb(1), das4bx
+ integer p,q,y6jcvk,nucgi1r,no2fik, c4bdmu, o9ljyn, tiav4e, xhe4cg,
+ & zkjqhi, la5dcf,a51l0o
+ integer ucgi1r
+ integer sehz7y
+ integer w3gohz, j0qwtz, zx1610
+ j0qwtz = lku8xq*(lku8xq+1)/2
+ p=gqai81(2)
+ q=gqai81(3)
+ y6jcvk= 0
+ if(.not.(gqai81(4) .eq. 1))goto 23294
+ y6jcvk = 1
+23294 continue
+ no2fik=gqai81(6)
+ zx1610=gqai81(7)
+ c4bdmu=gqai81(9)
+ o9ljyn=gqai81(10)
+ tiav4e=gqai81(11)
+ xhe4cg=gqai81(12)
+ zkjqhi=gqai81(13)
+ la5dcf = 0
+ a51l0o=gqai81(15)
+ sehz7y = 1
+ if(.not.(tiav4e .gt. 0))goto 23296
+ do 23298 w3gohz=1,tiav4e
+ ykdc2t(w3gohz) = dof(w3gohz)
+ ykdc2t(w3gohz+tiav4e) = l6xrjt(w3gohz)
+ ykdc2t(w3gohz+2*tiav4e) = dof(w3gohz)
+23298 continue
+23296 continue
+ ucgi1r = 0
+23300 if(.not.(sehz7y .ne. 0))goto 23301
+ ucgi1r = ucgi1r+1
+ if(.not.(ucgi1r .gt. 1))goto 23302
+ if(.not.(tiav4e .gt. 0))goto 23304
+ do 23306 w3gohz=1,tiav4e
+ if(.not.(ykdc2t(w3gohz+tiav4e).eq.0.0d0 .and.(dabs(ykdc2t(w3gohz+
+ &2*tiav4e)-dof(w3gohz))/dof(w3gohz).gt.0.05d0)))goto 23308
+ ykdc2t(w3gohz+2*tiav4e) = dof(w3gohz)
+ dof(w3gohz)=ykdc2t(w3gohz)
+ l6xrjt(w3gohz)=0.0d0
+ goto 23309
+23308 continue
+ ykdc2t(w3gohz+2*tiav4e) = dof(w3gohz)
+23309 continue
+23306 continue
+23304 continue
+23302 continue
+ call xqasw0(d8gwha,n,lku8xq, p3vlea,jmwo0z,w8xfic,l6xrjt,dof,
+ &ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g,go0l1q,s0, lq8reh,zo5jyl,
+ &h4fgoy, ioqzvb,i0qvzl, zx1610,i83h1, xbig, jrxg6l, ifo4ew, ozuw3p,
+ & hwi2tb, nbd5rl(1), nbd5rl(1 + j0qwtz), wj5shg, ykdc2t(1+3*tiav4e)
+ &, zxao0o, phqco4, vb81l0, bmb, rjcq9o, mwk, t5vlzq, j1l0o1,
+ &qc7zyb, das4bx, vlni8d, jko0o1, mnh3up, fg3pxq, p,q,y6jcvk,
+ &nucgi1r,no2fik, wk2(1,1,1), wk2(1,1,2), wk2(1,1,3), c4bdmu,
+ &o9ljyn, tiav4e, xhe4cg, zkjqhi, la5dcf, a51l0o)
+ if(.not.(d8gwha .ne. 0))goto 23310
+ call vcall2(sehz7y,w,y,go0l1q,lq8reh,jrxg6l)
+ goto 23311
+23310 continue
+ sehz7y = 0
+23311 continue
+ if(.not.(sehz7y .ne. 0))goto 23312
+ zx1610=0
+23312 continue
+ goto 23300
+23301 continue
+ gqai81(7) = zx1610
+ gqai81(5) = nucgi1r
+ gqai81(14) = la5dcf
+ return
+ end
+ subroutine xqasw0(d8gwha,nfiumb4,lku8xq, p3vlea,jmwo0z,w8xfic,
+ &l6xrjt,dof, ynk9ah,uxs1iq,vliac4, vfd2pw,sazp9g,go0l1q,s0, lq8reh,
+ &zo5jyl,h4fgoy, ioqzvb,i0qvzl, zx1610,i83h1, xbig, jrxg6l, ifo4ew,
+ &ozuw3p, hwi2tb, zqve1l, vvl1li, wj5shg, ykdc2t, zxao0o, phqco4,
+ &vb81l0, bmb, rjcq9o, mwk, t5vlzq, j1l0o1, qc7zyb, das4bx, vlni8d,
+ &jko0o1, mnh3up, fg3pxq, p, q, y6jcvk, nucgi1r, no2fik, hr83e,
+ &x7aort, wk2, c4bdmu, o9ljyn, tiav4e, xhe4cg, zkjqhi, la5dcf,
+ &a51l0o)
+ implicit logical (a-z)
+ integer zx1610
+ integer vvl1li(1), zqve1l(1)
+ integer p, q, y6jcvk, nucgi1r, no2fik, c4bdmu, o9ljyn, tiav4e,
+ &xhe4cg, zkjqhi, la5dcf, a51l0o
+ integer d8gwha, nfiumb4, lku8xq, ynk9ah(nfiumb4,q),uxs1iq(q),
+ &vliac4(q), i83h1(o9ljyn)
+ integer ozuw3p(q), hwi2tb(q), wj5shg(q), vlni8d(q+1), jko0o1(q),
+ &mnh3up(1), fg3pxq(q+1)
+ double precision p3vlea(nfiumb4,p), jmwo0z(nfiumb4,lku8xq),
+ &w8xfic(nfiumb4,xhe4cg), l6xrjt(tiav4e), dof(tiav4e)
+ double precision vfd2pw(lku8xq,nfiumb4), sazp9g(nfiumb4,tiav4e),
+ &go0l1q(lku8xq,nfiumb4), s0(lku8xq), lq8reh(o9ljyn), zo5jyl(
+ &nfiumb4,tiav4e), h4fgoy, ioqzvb(c4bdmu,o9ljyn), i0qvzl(o9ljyn)
+ double precision xbig(c4bdmu,o9ljyn), jrxg6l(zkjqhi,nfiumb4),
+ &ifo4ew(lku8xq,tiav4e), ykdc2t(1), wk2(nfiumb4,lku8xq), zxao0o(
+ &lku8xq,lku8xq,16), phqco4(1), vb81l0(1), bmb(1), rjcq9o(1), mwk(1)
+ &, t5vlzq(1), j1l0o1(1), qc7zyb(1), das4bx
+ double precision hr83e(nfiumb4,lku8xq), x7aort(nfiumb4,lku8xq)
+ integer tvyd2b,fjg0qv,rbwx6v
+ integer w3gohz, d9rjek, nd6mep, jbyv3q
+ double precision gwu72m, jcp1ti,dyb3po, njdgw8, gcjn3k,t7sbea
+ t7sbea = 1.0d-7
+ tvyd2b = 101
+ fjg0qv = 1
+ if(.not.(q .eq. 0))goto 23314
+ no2fik = 1
+23314 continue
+ if(.not.(d8gwha .ne. 0))goto 23316
+ do 23318 d9rjek=1,o9ljyn
+ do 23320 w3gohz=1,c4bdmu
+ ioqzvb(w3gohz,d9rjek)=xbig(w3gohz,d9rjek)
+23320 continue
+23318 continue
+23316 continue
+ if(.not.(zx1610.eq.0))goto 23322
+ call qh4ulb(zqve1l,vvl1li,lku8xq)
+ call mux17f(jrxg6l, ioqzvb, lku8xq, o9ljyn, nfiumb4, zxao0o(1,1,1)
+ &, zxao0o(1,1,2), zqve1l, vvl1li, zkjqhi, c4bdmu)
+ do 23324 nd6mep=1,o9ljyn
+ i83h1(nd6mep) = nd6mep
+23324 continue
+ call dhkt9w(ioqzvb,c4bdmu,c4bdmu,o9ljyn,i0qvzl,i83h1,t5vlzq,
+ &zx1610,t7sbea)
+23322 continue
+ do 23326 d9rjek=1,lku8xq
+ do 23328 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz)=0.0d0
+23328 continue
+ if(.not.(q .gt. 0))goto 23330
+ do 23332 nd6mep=1,q
+ if(.not.(wj5shg(nd6mep).eq.1))goto 23334
+ do 23336 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) + sazp9g(w3gohz,
+ &hwi2tb(nd6mep)+d9rjek-1)
+23336 continue
+ goto 23335
+23334 continue
+ do 23338 jbyv3q=1,ozuw3p(nd6mep)
+ do 23340 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) + ifo4ew(d9rjek,
+ &hwi2tb(nd6mep)+jbyv3q-1) * sazp9g(w3gohz,hwi2tb(nd6mep)+jbyv3q-1)
+23340 continue
+23338 continue
+23335 continue
+23332 continue
+23330 continue
+23326 continue
+ nucgi1r = 0
+ dyb3po = 1.0d0
+23342 if(.not.((dyb3po .gt. h4fgoy ) .and. (nucgi1r .lt. no2fik)))goto 2
+ &3343
+ nucgi1r = nucgi1r + 1
+ njdgw8 = 0.0d0
+ do 23344 d9rjek=1,lku8xq
+ do 23346 w3gohz=1,nfiumb4
+ hr83e(w3gohz,d9rjek)=jmwo0z(w3gohz,d9rjek)-go0l1q(d9rjek,w3gohz)
+23346 continue
+23344 continue
+ call qh4ulb(zqve1l,vvl1li,lku8xq)
+ call mux22f(jrxg6l,hr83e, t5vlzq, zkjqhi,zqve1l,vvl1li,nfiumb4,
+ &lku8xq,zxao0o)
+ call vdqrsl(ioqzvb,c4bdmu,c4bdmu,zx1610,i0qvzl, t5vlzq, wk2,wk2,
+ &lq8reh, wk2,vfd2pw,tvyd2b,fjg0qv)
+ das4bx=0.0d0
+ do 23348 w3gohz=1,nfiumb4
+ do 23350 d9rjek=1,lku8xq
+ gwu72m = t5vlzq((w3gohz-1)*lku8xq+d9rjek) - vfd2pw(d9rjek,w3gohz)
+ das4bx = das4bx + gwu72m * gwu72m
+23350 continue
+23348 continue
+ call vbksf(jrxg6l,vfd2pw,lku8xq,nfiumb4,zxao0o,zqve1l,vvl1li,
+ &zkjqhi)
+ if(.not.(q .gt. 0))goto 23352
+ do 23354 nd6mep=1,q
+ do 23356 d9rjek=1,lku8xq
+ if(.not.(wj5shg(nd6mep).eq.1))goto 23358
+ do 23360 w3gohz=1,nfiumb4
+ x7aort(w3gohz,d9rjek)=sazp9g(w3gohz,hwi2tb(nd6mep)+d9rjek-1)
+ hr83e(w3gohz,d9rjek) = jmwo0z(w3gohz,d9rjek) - vfd2pw(d9rjek,
+ &w3gohz) - go0l1q(d9rjek,w3gohz) + x7aort(w3gohz,d9rjek)
+23360 continue
+ goto 23359
+23358 continue
+ do 23362 w3gohz=1,nfiumb4
+ x7aort(w3gohz,d9rjek)=0.0d0
+ do 23364 jbyv3q=1,ozuw3p(nd6mep)
+ x7aort(w3gohz,d9rjek)=x7aort(w3gohz,d9rjek) + ifo4ew(d9rjek,
+ &hwi2tb(nd6mep)+jbyv3q-1) * sazp9g(w3gohz,hwi2tb(nd6mep)+jbyv3q-1)
+23364 continue
+ hr83e(w3gohz,d9rjek) = jmwo0z(w3gohz,d9rjek) - vfd2pw(d9rjek,
+ &w3gohz) - go0l1q(d9rjek,w3gohz) + x7aort(w3gohz,d9rjek)
+23362 continue
+23359 continue
+23356 continue
+ rbwx6v = uxs1iq(nd6mep)
+ call ntju9b(p3vlea(1,vliac4(nd6mep)),hr83e,w8xfic, nfiumb4,lku8xq,
+ &ynk9ah(1,nd6mep),rbwx6v, l6xrjt(hwi2tb(nd6mep)), dof(hwi2tb(
+ &nd6mep)), sazp9g(1,hwi2tb(nd6mep)), zo5jyl(1,hwi2tb(nd6mep)), s0,
+ &mwk(1), mwk(1+rbwx6v), mwk(1+rbwx6v*(lku8xq+1)), mwk(1+rbwx6v*(2*
+ &lku8xq+1)), ykdc2t, phqco4, xhe4cg, la5dcf, a51l0o, fjg0qv,
+ &y6jcvk, vb81l0, j1l0o1(vlni8d(nd6mep)), qc7zyb(fg3pxq(nd6mep)),
+ &jko0o1(nd6mep),zqve1l, vvl1li, bmb, rjcq9o, zxao0o, wj5shg(nd6mep)
+ &,ozuw3p(nd6mep),mnh3up, ifo4ew(1,hwi2tb(nd6mep)), t5vlzq(1),
+ &t5vlzq(1+2*ozuw3p(nd6mep)), t5vlzq(1+4*ozuw3p(nd6mep)), t5vlzq(1+(
+ &4+rbwx6v)*ozuw3p(nd6mep)), t5vlzq(1+(4+2*rbwx6v)*ozuw3p(nd6mep)),
+ &t5vlzq(1+(4+3*rbwx6v)*ozuw3p(nd6mep)), t5vlzq(1+(4+4*rbwx6v)*
+ &ozuw3p(nd6mep)))
+ do 23366 d9rjek=1,lku8xq
+ if(.not.(wj5shg(nd6mep).eq.1))goto 23368
+ do 23370 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) + sazp9g(w3gohz,
+ &hwi2tb(nd6mep)+d9rjek-1)
+23370 continue
+ goto 23369
+23368 continue
+ do 23372 jbyv3q=1,ozuw3p(nd6mep)
+ do 23374 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz)=go0l1q(d9rjek,w3gohz) + ifo4ew(d9rjek,
+ &hwi2tb(nd6mep)+jbyv3q-1) * sazp9g(w3gohz,hwi2tb(nd6mep)+jbyv3q-1)
+23374 continue
+23372 continue
+23369 continue
+ do 23376 w3gohz=1,nfiumb4
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) - x7aort(w3gohz,
+ &d9rjek)
+23376 continue
+23366 continue
+ do 23378 d9rjek=1,lku8xq
+ if(.not.(wj5shg(nd6mep) .eq. 1))goto 23380
+ njdgw8 = njdgw8 + jcp1ti(nfiumb4,x7aort(1,d9rjek),sazp9g(1,hwi2tb(
+ &nd6mep)+d9rjek-1), w8xfic(1,d9rjek))
+ goto 23381
+23380 continue
+ do 23382 w3gohz=1,nfiumb4
+ t5vlzq(w3gohz) = 0.0d0
+ do 23384 jbyv3q=1,ozuw3p(nd6mep)
+ t5vlzq(w3gohz) = t5vlzq(w3gohz) + ifo4ew(d9rjek,hwi2tb(nd6mep)+
+ &jbyv3q-1) * sazp9g(w3gohz,hwi2tb(nd6mep)+jbyv3q-1)
+23384 continue
+23382 continue
+ njdgw8 = njdgw8 + jcp1ti(nfiumb4, x7aort(1,d9rjek), t5vlzq,
+ &w8xfic(1,d9rjek))
+23381 continue
+23378 continue
+ do 23386 d9rjek=1,lku8xq
+ do 23388 w3gohz=1,nfiumb4
+ hr83e(w3gohz,d9rjek)=jmwo0z(w3gohz,d9rjek)-go0l1q(d9rjek,w3gohz)
+23388 continue
+23386 continue
+ call qh4ulb(zqve1l,vvl1li,lku8xq)
+ call mux22f(jrxg6l,hr83e, t5vlzq, zkjqhi,zqve1l,vvl1li,nfiumb4,
+ &lku8xq,zxao0o)
+ call vdqrsl(ioqzvb,c4bdmu,c4bdmu,zx1610,i0qvzl, t5vlzq, wk2,wk2,
+ &lq8reh, wk2,vfd2pw,tvyd2b,fjg0qv)
+ call vbksf(jrxg6l,vfd2pw,lku8xq,nfiumb4,zxao0o,zqve1l,vvl1li,
+ &zkjqhi)
+23354 continue
+23352 continue
+ if(.not.(q .gt. 0))goto 23390
+ gcjn3k=0.0d0
+ do 23392 d9rjek=1,lku8xq
+ do 23394 w3gohz=1,nfiumb4
+ gcjn3k = gcjn3k + w8xfic(w3gohz,d9rjek) * go0l1q(d9rjek,w3gohz)**
+ &2
+23394 continue
+23392 continue
+ if(.not.(gcjn3k .gt. 0.0d0))goto 23396
+ dyb3po = dsqrt(njdgw8/gcjn3k)
+ goto 23397
+23396 continue
+ dyb3po = 0.0d0
+23397 continue
+23390 continue
+ if(.not.(nucgi1r .eq. 1))goto 23398
+ dyb3po = 1.0d0
+23398 continue
+ goto 23342
+23343 continue
+ do 23400 d9rjek=1,o9ljyn
+ t5vlzq(d9rjek)=lq8reh(d9rjek)
+23400 continue
+ do 23402 d9rjek=1,o9ljyn
+ lq8reh(i83h1(d9rjek))=t5vlzq(d9rjek)
+23402 continue
+ do 23404 w3gohz=1,nfiumb4
+ do 23406 d9rjek=1,lku8xq
+ go0l1q(d9rjek,w3gohz) = go0l1q(d9rjek,w3gohz) + vfd2pw(d9rjek,
+ &w3gohz)
+23406 continue
+23404 continue
+ if(.not.((y6jcvk .ne. 0) .and. (q .gt. 0)))goto 23408
+ do 23410 nd6mep=1,q
+ do 23412 jbyv3q=1,ozuw3p(nd6mep)
+ call uwye7d(nfiumb4,uxs1iq(nd6mep),ynk9ah(1,nd6mep), zo5jyl(1,
+ &hwi2tb(nd6mep)+jbyv3q-1),x7aort)
+ do 23414 w3gohz=1,nfiumb4
+ zo5jyl(w3gohz,hwi2tb(nd6mep)+jbyv3q-1) = x7aort(w3gohz,1)
+23414 continue
+23412 continue
+23410 continue
+23408 continue
+ return
+ end
+ subroutine kgevo5(p3vlea, xout, nfiumb4, lku8xq)
+ implicit logical (a-z)
+ integer nfiumb4, lku8xq
+ double precision p3vlea(nfiumb4), xout(1)
+ integer w3gohz, d9rjek, nd6mep, xtiel4
+ xtiel4=1
+ do 23416 d9rjek=1,lku8xq
+ do 23418 w3gohz=1,nfiumb4
+ do 23420 nd6mep=1,lku8xq
+ if(.not.(d9rjek .eq. nd6mep))goto 23422
+ xout(xtiel4) = 1.0d0
+ goto 23423
+23422 continue
+ xout(xtiel4) = 0.0d0
+23423 continue
+ xtiel4=xtiel4+1
+23420 continue
+23418 continue
+23416 continue
+ do 23424 d9rjek=1,lku8xq
+ do 23426 w3gohz=1,nfiumb4
+ do 23428 nd6mep=1,lku8xq
+ if(.not.(d9rjek .eq. nd6mep))goto 23430
+ xout(xtiel4) = p3vlea(w3gohz)
+ goto 23431
+23430 continue
+ xout(xtiel4) = 0.0d0
+23431 continue
+ xtiel4=xtiel4+1
+23428 continue
+23426 continue
+23424 continue
+ return
+ end
+ double precision function jcp1ti(nfiumb4, yvec, go0l1q, wvec)
+ integer nfiumb4
+ double precision yvec(nfiumb4), go0l1q(nfiumb4), wvec(nfiumb4)
+ integer w3gohz
+ double precision wtot, risyv0, bgu6fw
+ risyv0 = 0.0d0
+ wtot = 0.0d0
+ do 23432 w3gohz=1,nfiumb4
+ bgu6fw = yvec(w3gohz) - go0l1q(w3gohz)
+ risyv0 = risyv0 + wvec(w3gohz)*bgu6fw*bgu6fw
+ wtot = wtot + wvec(w3gohz)
+23432 continue
+ if(.not.(wtot .gt. 0.0d0))goto 23434
+ jcp1ti=risyv0/wtot
+ goto 23435
+23434 continue
+ jcp1ti=0.0d0
+23435 continue
+ return
+ end
+ subroutine usytl1(nfiumb4, yvec, wvec, ghry8z, wtot)
+ implicit logical (a-z)
+ integer nfiumb4
+ double precision yvec(nfiumb4), wvec(nfiumb4), ghry8z, wtot
+ double precision risyv0
+ integer w3gohz
+ wtot = 0.0d0
+ risyv0 = 0.0d0
+ do 23436 w3gohz=1,nfiumb4
+ risyv0 = risyv0 + yvec(w3gohz) * wvec(w3gohz)
+ wtot = wtot + wvec(w3gohz)
+23436 continue
+ if(.not.(wtot .gt. 0.0d0))goto 23438
+ ghry8z = risyv0 / wtot
+ goto 23439
+23438 continue
+ ghry8z = 0.0d0
+23439 continue
+ return
+ end
+ subroutine rpfnk6(nfiumb4, x, w, yvec, vfd2pw, zo5jyl, y6jcvk)
+ implicit logical (a-z)
+ integer nfiumb4
+ integer y6jcvk
+ double precision x(nfiumb4), w(nfiumb4), yvec(nfiumb4), vfd2pw(
+ &nfiumb4)
+ double precision zo5jyl(nfiumb4,1)
+ integer w3gohz
+ double precision bz3pyo, qxy6aj, qnk4zf, vgh4cp, u7hbqo, agfy3b,
+ &qe3jcd, j0izmn, wtot
+ call usytl1(nfiumb4,yvec,w,bz3pyo, wtot)
+ call usytl1(nfiumb4,x,w,qxy6aj, wtot)
+ vgh4cp = 0.0d0
+ qnk4zf = 0.0d0
+ do 23440 w3gohz=1,nfiumb4
+ j0izmn = x(w3gohz)-qxy6aj
+ vgh4cp = vgh4cp + j0izmn * (yvec(w3gohz)-bz3pyo) * w(w3gohz)
+ j0izmn = j0izmn * j0izmn
+ qnk4zf = qnk4zf + j0izmn * w(w3gohz)
+23440 continue
+ u7hbqo = vgh4cp/qnk4zf
+ agfy3b = bz3pyo - u7hbqo * qxy6aj
+ do 23442 w3gohz=1,nfiumb4
+ vfd2pw(w3gohz) = agfy3b + u7hbqo * x(w3gohz)
+23442 continue
+ qe3jcd = agfy3b + u7hbqo * x(1)
+ if(.not.(y6jcvk .ne. 0))goto 23444
+ do 23446 w3gohz=1,nfiumb4
+ j0izmn = x(w3gohz)-qxy6aj
+ if(.not.(w(w3gohz) .gt. 0.0d0))goto 23448
+ zo5jyl(w3gohz,1) = zo5jyl(w3gohz,1) - 1.0d0/wtot - j0izmn *
+ &j0izmn / qnk4zf
+ goto 23449
+23448 continue
+ zo5jyl(w3gohz,1) = 0.0d0
+23449 continue
+23446 continue
+23444 continue
+ return
+ end
+ subroutine uwye7d(nfiumb4, p, ynk9ah, qxy6aj, x)
+ implicit logical (a-z)
+ integer nfiumb4, p, ynk9ah(nfiumb4)
+ double precision qxy6aj(p), x(nfiumb4)
+ integer w3gohz
+ do 23450 w3gohz=1,nfiumb4
+ x(w3gohz) = qxy6aj(ynk9ah(w3gohz))
+23450 continue
+ return
+ end
+ subroutine vknotl2(x, nfiumb4, knot, xl6qgm, q9wyop)
+ implicit logical (a-z)
+ integer nfiumb4, xl6qgm, q9wyop
+ double precision x(nfiumb4), knot(nfiumb4)
+ integer ndk, d9rjek
+ if(.not.(q9wyop .eq. 0))goto 23452
+ if(.not.(nfiumb4 .le. 40))goto 23454
+ ndk = nfiumb4
+ goto 23455
+23454 continue
+ ndk = 40 + dexp(0.25d0 * dlog(nfiumb4-40.0d0))
+23455 continue
+ goto 23453
+23452 continue
+ ndk = xl6qgm - 6
+23453 continue
+ xl6qgm = ndk + 6
+ do 23456 d9rjek = 1,3
+ knot(d9rjek) = x(1)
+23456 continue
+ do 23458 d9rjek = 1,ndk
+ knot(d9rjek+3) = x( 1 + (d9rjek-1)*(nfiumb4-1)/(ndk-1) )
+23458 continue
+ do 23460 d9rjek = 1,3
+ knot(ndk+3+d9rjek) = x(nfiumb4)
+23460 continue
+ return
+ end
+ subroutine pknotl2(knot, nfiumb4, keep, fjo2dy)
+ implicit logical (a-z)
+ integer nfiumb4, keep(nfiumb4)
+ double precision knot(nfiumb4), fjo2dy
+ integer w3gohz, ilower
+ do 23462 w3gohz=1,4
+ keep(w3gohz) = 1
+23462 continue
+ ilower = 4
+ do 23464 w3gohz=5,(nfiumb4-4)
+ if(.not.((knot(w3gohz) - knot(ilower) .ge. fjo2dy) .and.(knot(
+ &nfiumb4) - knot(w3gohz) .ge. fjo2dy)))goto 23466
+ keep(w3gohz) = 1
+ ilower = w3gohz
+ goto 23467
+23466 continue
+ keep(w3gohz) = 0
+23467 continue
+23464 continue
+ do 23468 w3gohz=(nfiumb4-3),nfiumb4
+ keep(w3gohz) = 1
+23468 continue
+ return
+ end
+ subroutine vglmf(xbig,c4bdmu,o9ljyn,d8gwha,nfiumb4, jmwo0z,lq8reh,
+ &go0l1q,blq5vu, ioqzvb,i0qvzl, zx1610,i83h1, bgu6fw,zxao0o,jrxg6l,
+ &lku8xq,zkjqhi,xhe4cg, zqve1l, vvl1li, cpxbig, das4bx)
+ implicit logical (a-z)
+ integer c4bdmu,o9ljyn,d8gwha,nfiumb4, zx1610,i83h1(o9ljyn),
+ &lku8xq,zkjqhi,xhe4cg, zqve1l(1), vvl1li(1), cpxbig
+ double precision xbig(c4bdmu,o9ljyn), jmwo0z(nfiumb4,lku8xq),
+ &lq8reh(o9ljyn),go0l1q(lku8xq,nfiumb4),blq5vu(c4bdmu), ioqzvb(
+ &c4bdmu,o9ljyn),i0qvzl(o9ljyn), bgu6fw(1), zxao0o(lku8xq,lku8xq,5),
+ & jrxg6l(1), das4bx
+ integer sehz7y
+ call qh4ulb(zqve1l,vvl1li,lku8xq)
+ sehz7y = 1
+23470 if(.not.(sehz7y .ne. 0))goto 23471
+ call vfit(lku8xq,c4bdmu,o9ljyn,nfiumb4, xbig,jmwo0z,lq8reh,go0l1q,
+ &blq5vu, jrxg6l,ioqzvb,i0qvzl,das4bx, zx1610,i83h1, bgu6fw,zxao0o,
+ &xhe4cg,zkjqhi,zqve1l,vvl1li)
+ if(.not.(d8gwha .ne. 0))goto 23472
+ call vcall1(sehz7y,jmwo0z,go0l1q,lq8reh,jrxg6l,xbig,cpxbig)
+ goto 23473
+23472 continue
+ sehz7y= 0
+23473 continue
+ if(.not.(sehz7y .ne. 0))goto 23474
+ zx1610=0
+23474 continue
+ goto 23470
+23471 continue
+ return
+ end
+ subroutine vfit(lku8xq,c4bdmu,o9ljyn,nfiumb4, xbig,jmwo0z,lq8reh,
+ &go0l1q,blq5vu, jrxg6l,ioqzvb,i0qvzl,das4bx, zx1610,i83h1, bgu6fw,
+ &zxao0o, xhe4cg, zkjqhi, zqve1l, vvl1li)
+ implicit logical (a-z)
+ integer lku8xq, c4bdmu, o9ljyn, nfiumb4, zx1610, i83h1(o9ljyn),
+ &xhe4cg, zkjqhi, zqve1l(1), vvl1li(1)
+ double precision xbig(c4bdmu,o9ljyn), jmwo0z(nfiumb4,lku8xq),
+ &lq8reh(o9ljyn), go0l1q(lku8xq,nfiumb4), blq5vu(c4bdmu), jrxg6l(
+ &zkjqhi,nfiumb4), ioqzvb(c4bdmu,o9ljyn), i0qvzl(o9ljyn), das4bx,
+ &bgu6fw(c4bdmu), zxao0o(lku8xq,lku8xq,5)
+ integer w3gohz, d9rjek, nd6mep, xi1mqb, hv3wja, tvyd2b, fjg0qv
+ double precision gwu72m, dyb3po, t7sbea
+ t7sbea=1.0d-7
+ tvyd2b=101
+ fjg0qv=1
+ dyb3po=1.0d0
+ if(.not.(zx1610 .eq. 0))goto 23476
+ do 23478 d9rjek=1,o9ljyn
+ do 23480 w3gohz=1,c4bdmu
+ ioqzvb(w3gohz,d9rjek) = xbig(w3gohz,d9rjek)
+23480 continue
+23478 continue
+ do 23482 nd6mep=1,o9ljyn
+ i83h1(nd6mep) = nd6mep
+23482 continue
+ call mux17f(jrxg6l, ioqzvb, lku8xq, o9ljyn, nfiumb4, zxao0o(1,1,1)
+ &, zxao0o(1,1,2), zqve1l, vvl1li, zkjqhi, c4bdmu)
+ call dhkt9w(ioqzvb,c4bdmu,c4bdmu,o9ljyn,i0qvzl,i83h1,bgu6fw,
+ &zx1610,t7sbea)
+23476 continue
+ call mux22f(jrxg6l,jmwo0z,bgu6fw,zkjqhi,zqve1l,vvl1li,nfiumb4,
+ &lku8xq,zxao0o)
+ nd6mep=1
+ do 23484 d9rjek=1,lku8xq
+ do 23486 w3gohz=1,nfiumb4
+ jmwo0z(w3gohz,d9rjek)=bgu6fw(nd6mep)
+ nd6mep=nd6mep+1
+23486 continue
+23484 continue
+ call vdqrsl(ioqzvb,c4bdmu,c4bdmu,zx1610,i0qvzl,jmwo0z,bgu6fw(1),
+ &blq5vu,lq8reh, bgu6fw(1),go0l1q,tvyd2b,fjg0qv)
+ das4bx=0.0d0
+ xi1mqb=0
+ hv3wja=1
+ do 23488 w3gohz=1,nfiumb4
+ do 23490 d9rjek=1,lku8xq
+ xi1mqb = xi1mqb + 1
+ if(.not.(xi1mqb .gt. nfiumb4))goto 23492
+ xi1mqb = 1
+ hv3wja = hv3wja + 1
+23492 continue
+ gwu72m = jmwo0z(xi1mqb,hv3wja) - go0l1q(d9rjek,w3gohz)
+ das4bx = das4bx + gwu72m * gwu72m
+23490 continue
+23488 continue
+ call vbksf(jrxg6l,go0l1q,lku8xq,nfiumb4,zxao0o,zqve1l,vvl1li,
+ &xhe4cg)
+ do 23494 d9rjek=1,o9ljyn
+ bgu6fw(d9rjek) = lq8reh(d9rjek)
+23494 continue
+ do 23496 d9rjek=1,o9ljyn
+ lq8reh(i83h1(d9rjek)) = bgu6fw(d9rjek)
+23496 continue
+ return
+ end
diff --git a/src/vlinpack1.f b/src/vlinpack1.f
new file mode 100644
index 0000000..9d9d623
--- /dev/null
+++ b/src/vlinpack1.f
@@ -0,0 +1,81 @@
+ subroutine dhkt9w(x,ldx,n,p,i0qvzl,jpvt,bgu6fw,cqui1v,eps)
+ implicit double precision (a-h,o-z)
+ implicit integer (i-n)
+ double precision dsign, dabs, dmax1, dsqrt
+ integer min0
+ integer ldx,n,p,cqui1v
+ integer jpvt(1)
+ integer j,jj,jp,l,lup,qxy4wd
+ double precision x(ldx,p),i0qvzl(p),bgu6fw(1),eps
+ double precision vdnrm2,tt
+ double precision ddot8,nrmxl,t
+ do 23000 j=1,p
+ i0qvzl(j) = vdnrm2(n,x(1,j),ldx,1)
+ bgu6fw(j) = i0qvzl(j)
+23000 continue
+ l=1
+ lup = min0(n,p)
+ qxy4wd = p
+23002 if(.not.(l.le.lup))goto 23003
+ i0qvzl(l) = 0.0d0
+ nrmxl = vdnrm2(n-l+1, x(l,l), ldx, 1)
+ if(.not.(nrmxl .lt. eps))goto 23004
+ call dshift8(x,ldx,n,l,qxy4wd)
+ jp = jpvt(l)
+ t=i0qvzl(l)
+ tt=bgu6fw(l)
+ j=l+1
+23006 if(.not.(j.le.qxy4wd))goto 23008
+ jj=j-1
+ jpvt(jj)=jpvt(j)
+ i0qvzl(jj)=i0qvzl(j)
+ bgu6fw(jj)=bgu6fw(j)
+ j=j+1
+ goto 23006
+23008 continue
+ jpvt(qxy4wd)=jp
+ i0qvzl(qxy4wd)=t
+ bgu6fw(qxy4wd)=tt
+ qxy4wd=qxy4wd-1
+ if(.not.(lup.gt.qxy4wd))goto 23009
+ lup=qxy4wd
+23009 continue
+ goto 23005
+23004 continue
+ if(.not.(l.eq.n))goto 23011
+ goto 23003
+23011 continue
+ if(.not.(x(l,l).ne.0.0d0))goto 23013
+ nrmxl = dsign(nrmxl,x(l,l))
+23013 continue
+ call dscal8(n-l+1,1.0d0/nrmxl,x(l,l),1)
+ x(l,l) = 1.0d0+x(l,l)
+ j=l+1
+23015 if(.not.(j.le.qxy4wd))goto 23017
+ t = -ddot8(n-l+1,x(l,l),1,x(l,j),1)/x(l,l)
+ call daxpy8(n-l+1,t,x(l,l),1,x(l,j),1)
+ if(.not.(i0qvzl(j).ne.0.0d0))goto 23018
+ tt = 1.0d0-(dabs(x(l,j))/i0qvzl(j))**2
+ tt = dmax1(tt,0.0d0)
+ t = tt
+ tt = 1.0d0+0.05d0*tt*(i0qvzl(j)/bgu6fw(j))**2
+ if(.not.(tt.ne.1.0d0))goto 23020
+ i0qvzl(j) = i0qvzl(j)*dsqrt(t)
+ goto 23021
+23020 continue
+ i0qvzl(j) = vdnrm2(n-l,x(l+1,j),ldx,1)
+ bgu6fw(j) = i0qvzl(j)
+23021 continue
+23018 continue
+ j=j+1
+ goto 23015
+23017 continue
+ i0qvzl(l) = x(l,l)
+ x(l,l) = -nrmxl
+ l=l+1
+23005 continue
+ goto 23002
+23003 continue
+ cqui1v = lup
+ return
+ end
diff --git a/src/vlinpack2.f b/src/vlinpack2.f
new file mode 100644
index 0000000..ecd0ce5
--- /dev/null
+++ b/src/vlinpack2.f
@@ -0,0 +1,298 @@
+c This file contains modified code from Hastie and Tibshirani's
+c GAMFIT code, as well as a rational cholesky function or two.
+c All code here derives from linpack
+c T.Yee 7/10/99
+
+
+
+
+c This function was formerly real function dnrm2, but now converted
+c to double precision
+c Nb. changed "float(n)" to "dfloat(n)"
+
+ double precision function vdnrm2 ( n, dx,ldx, incx)
+c
+c added by tyee 23/9/00:
+ implicit double precision (a-h,o-z)
+ implicit integer (i-n)
+c
+ integer next
+ double precision dx(ldx), cutlo, cuthi, hitest, sum
+ double precision xmax,zero,one
+ data zero, one /0.0d0, 1.0d0/
+c
+c euclidean norm of the n-vector stored in dx() with storage
+c increment incx .
+c if n .le. 0 return with result = 0.
+c if n .ge. 1 then incx must be .ge. 1
+c
+c c.l.lawson, 1978 jan 08
+c
+c four phase method using two built-in constants that are
+c hopefully applicable to all machines.
+c cutlo = maximum of dsqrt(u/eps) over all known machines.
+c cuthi = minimum of dsqrt(v) over all known machines.
+c where
+c eps = smallest no. such that eps + 1. .gt. 1.
+c u = smallest positive no. (underflow limit)
+c v = largest no. (overflow limit)
+c
+c brief outline of algorithm..
+c
+c phase 1 scans zero components.
+c move to phase 2 when a component is nonzero and .le. cutlo
+c move to phase 3 when a component is .gt. cutlo
+c move to phase 4 when a component is .ge. cuthi/m
+c where m = n for x() double precision and m = 2*n for complex.
+c
+c values for cutlo and cuthi..
+c from the environmental parameters listed in the imsl converter
+c document the limiting values are as follows..
+c cutlo, s.p. u/eps = 2**(-102) for honeywell. close seconds are
+c univac and dec at 2**(-103)
+c thus cutlo = 2**(-51) = 4.44089e-16
+c cuthi, s.p. v = 2**127 for univac, honeywell, and dec.
+c thus cuthi = 2**(63.5) = 1.30438e19
+c cutlo, d.p. u/eps = 2**(-67) for honeywell and dec.
+c thus cutlo = 2**(-33.5) = 8.23181e-11
+c cuthi, d.p. same as s.p. cuthi = 1.30438e19
+c data cutlo, cuthi / 8.232e-11, 1.304e19 /
+c data cutlo, cuthi / 4.441e-16, 1.304e19 /
+ data cutlo, cuthi / 8.232e-11, 1.304e19 /
+c
+
+ if(n .gt. 0) go to 10
+ vdnrm2 = zero
+ go to 300
+c
+ 10 next = 30
+ sum = zero
+ nn = n * incx
+c begin main loop
+ i = 1
+c 20 go to next,(30, 50, 70, 110)
+ 20 if(next .eq. 30) go to 30
+ if(next .eq. 50) go to 50
+ if(next .eq. 70) go to 70
+ if(next .eq. 110) go to 110
+c An error!!!
+ vdnrm2 = 0.0d0
+ return
+
+ 30 if( dabs(dx(i)) .gt. cutlo) go to 85
+ next = 50
+ xmax = zero
+c
+c phase 1. sum is zero
+c
+ 50 if( dx(i) .eq. zero) go to 200
+ if( dabs(dx(i)) .gt. cutlo) go to 85
+c
+c prepare for phase 2.
+ next = 70
+ go to 105
+c
+c prepare for phase 4.
+c
+ 100 i = j
+ next = 110
+ sum = (sum / dx(i)) / dx(i)
+ 105 xmax = dabs(dx(i))
+ go to 115
+c
+c phase 2. sum is small.
+c scale to avoid destructive underflow.
+c
+ 70 if( dabs(dx(i)) .gt. cutlo ) go to 75
+c
+c common code for phases 2 and 4.
+c in phase 4 sum is large. scale to avoid overflow.
+c
+ 110 if( dabs(dx(i)) .le. xmax ) go to 115
+c 11/4/01: replacing "**2.0d0" by "**2" (three times in this file)
+ sum = one + sum * (xmax / dx(i))**2
+ xmax = dabs(dx(i))
+ go to 200
+c
+ 115 sum = sum + (dx(i)/xmax)**2
+ go to 200
+c
+c
+c prepare for phase 3.
+c
+ 75 sum = (sum * xmax) * xmax
+c
+c
+c for real or d.p. set hitest = cuthi/n
+c for complex set hitest = cuthi/(2*n)
+c
+ 85 hitest = cuthi / dfloat( n )
+c
+c phase 3. sum is mid-range. no scaling.
+c
+ do 95 j =i,nn,incx
+ if(dabs(dx(j)) .ge. hitest) go to 100
+ 95 sum = sum + dx(j)**2
+ vdnrm2 = dsqrt( sum )
+ go to 300
+c
+ 200 continue
+ i = i + incx
+ if ( i .le. nn ) go to 20
+c
+c end of main loop.
+c
+c compute square root and adjust for scaling.
+c
+ vdnrm2 = xmax * dsqrt(sum)
+ 300 continue
+ return
+ end
+
+
+
+c ==============================================================
+c This is modified linpack Fortran code
+c Changes marked with yyy
+c 23/9/99
+c Works
+
+
+ subroutine vdpbfa7(abd,lda,n,m,info,d)
+ integer lda,n,m,info
+ double precision abd(lda,1), d(n)
+c
+c vdpbfa7 is dpbfa8 but uses Rational Cholesky instead of ordinary
+c Cholesky
+c
+c abd = t(u) d u where u is unit upper triangular and d is diagonal
+c the diagonal of d is stored where the 1's of the u would be stored
+c
+c See dpbfa8 for more information
+c d(1:n) is assigned the values of diag(d), and abd(m+1,) <- 1
+c
+c Improvement yet to do:
+c delete d and put its contents into abd(m+1,) (intrinsic 1's)
+c
+c internal variables
+c
+c double precision ddot8
+ double precision s,t
+ integer ik,j,jk,k,mu, i,row
+c begin block with ...exits to 40
+c
+c
+c yyy
+ d(1) = abd(m+1,1)
+c
+ do 30 j = 1, n
+c print *, "j = ", j
+ info = j
+ s = 0.0d0
+ ik = m + 1
+ jk = max0(j-m,1)
+ mu = max0(m+2-j,1)
+ if (m .lt. mu) go to 20
+ do 10 k = mu, m
+c print *, " k = ", k
+c t = abd(k,j) - ddot8(k-mu,abd(ik,jk),1,abd(mu,j),1)
+c
+ t = abd(k,j)
+ do 1 i = 1,k-mu
+ row = mu-2+i+j-m
+ t = t - d(row)*abd(ik-1+i,jk)*abd(mu-1+i,j)
+c print *, " row = ", row
+ 1 continue
+c
+c yyy
+c t = t/abd(m+1,jk)
+ row = mu-2+(k-mu+1)+j-m
+c print *, " row = ", row
+ t = t/d(row)
+c
+ abd(k,j) = t
+c
+c yyy
+c print *, " index = ", mu-1+i+j-m
+ s = s + t*t*d(row)
+c
+ ik = ik - 1
+ jk = jk + 1
+ 10 continue
+ 20 continue
+ s = abd(m+1,j) - s
+c
+c ......exit
+ if (s .le. 0.0d0) go to 40
+c
+c yyy
+c abd(m+1,j) = dsqrt(s)
+ abd(m+1,j) = 1d0
+ d(j) = s
+c
+ 30 continue
+ info = 0
+ 40 continue
+ return
+ end
+
+
+
+ subroutine vdpbsl7(abd,lda,n,m,b,d)
+ integer lda,n,m
+ double precision abd(lda,1),b(1),d(1)
+c
+c vdpbsl7 is dpbsl8 but uses Rational Cholesky instead of ordinary
+c Cholesky
+c
+c See dpbsl8 for more information
+c
+c Improvement yet to do:
+c delete d and put its contents into abd(m+1,) (intrinsic 1's)
+c
+c internal variables
+c
+ double precision ddot8,t
+ integer k,kb,la,lb,lm
+c
+c solve trans(r)*y = b
+c
+ do 10 k = 1, n
+ lm = min0(k-1,m)
+ la = m + 1 - lm
+ lb = k - lm
+ t = ddot8(lm,abd(la,k),1,b(lb),1)
+c
+c yyy
+c b(k) = (b(k) - t)/abd(m+1,k)
+ b(k) = b(k) - t
+c
+ 10 continue
+c
+c
+c yyy
+ do 15 k = 1, n
+ b(k) = b(k)/d(k)
+ 15 continue
+c
+c
+c solve r*x = y
+c
+ do 20 kb = 1, n
+ k = n + 1 - kb
+ lm = min0(k-1,m)
+ la = m + 1 - lm
+ lb = k - lm
+c
+c yyy
+c b(k) = b(k)/abd(m+1,k)
+c
+ t = -b(k)
+ call daxpy8(lm,t,abd(la,k),1,b(lb),1)
+ 20 continue
+ return
+ end
+
+
+
+
diff --git a/src/vlinpack3.f b/src/vlinpack3.f
new file mode 100644
index 0000000..77ce929
--- /dev/null
+++ b/src/vlinpack3.f
@@ -0,0 +1,662 @@
+c 1/4/00
+c The following code is linpack.f from GAMFIT
+c For R.1.0-0, subroutine dshift is needed
+
+c 12/7/02; T.Yee
+c I've modifed the routines in this file so that reals become double
+c precisions. The subroutine and functions may have a "8" put after it
+c to (hopefully) make it unique.
+c All this for the VGAM package.
+c For example, "real function ddot" to "double precision function ddot8".
+c I might add a "implicit logical (a-z)" line to pick up errors.
+
+
+
+ subroutine daxpy8(n,da,dx,incx,dy,incy)
+ implicit logical (a-z)
+c
+c constant times a vector plus a vector.
+c uses unrolled loops for increments equal to one.
+c jack dongarra, linpack, 3/11/78.
+c
+ double precision dx(1),dy(1),da
+ integer i,incx,incy,m,mp1,n
+
+c Undeclared, so added by T.Yee
+ integer ix, iy
+
+c
+ if(n.le.0)return
+ if (da .eq. 0.0d0) return
+ if(incx.eq.1.and.incy.eq.1)go to 20
+c
+c code for unequal increments or equal increments
+c not equal to 1
+c
+ ix = 1
+ iy = 1
+ if(incx.lt.0)ix = (-n+1)*incx + 1
+ if(incy.lt.0)iy = (-n+1)*incy + 1
+ do 10 i = 1,n
+ dy(iy) = dy(iy) + da*dx(ix)
+ ix = ix + incx
+ iy = iy + incy
+ 10 continue
+ return
+c
+c code for both increments equal to 1
+c
+c
+c clean-up loop
+c
+ 20 m = mod(n,4)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ dy(i) = dy(i) + da*dx(i)
+ 30 continue
+ if( n .lt. 4 ) return
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,4
+ dy(i) = dy(i) + da*dx(i)
+ dy(i + 1) = dy(i + 1) + da*dx(i + 1)
+ dy(i + 2) = dy(i + 2) + da*dx(i + 2)
+ dy(i + 3) = dy(i + 3) + da*dx(i + 3)
+ 50 continue
+ return
+ end
+
+
+
+ subroutine dcopy8(n,dx,incx,dy,incy)
+ implicit logical (a-z)
+c
+c copies a vector, x, to a vector, y.
+c uses unrolled loops for increments equal to one.
+c jack dongarra, linpack, 3/11/78.
+c
+ double precision dx(1),dy(1)
+ integer i,incx,incy,ix,iy,m,mp1,n
+c
+ if(n.le.0)return
+ if(incx.eq.1.and.incy.eq.1)go to 20
+c
+c code for unequal increments or equal increments
+c not equal to 1
+c
+ ix = 1
+ iy = 1
+ if(incx.lt.0)ix = (-n+1)*incx + 1
+ if(incy.lt.0)iy = (-n+1)*incy + 1
+ do 10 i = 1,n
+ dy(iy) = dx(ix)
+ ix = ix + incx
+ iy = iy + incy
+ 10 continue
+ return
+c
+c code for both increments equal to 1
+c
+c
+c clean-up loop
+c
+ 20 m = mod(n,7)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ dy(i) = dx(i)
+ 30 continue
+ if( n .lt. 7 ) return
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,7
+ dy(i) = dx(i)
+ dy(i + 1) = dx(i + 1)
+ dy(i + 2) = dx(i + 2)
+ dy(i + 3) = dx(i + 3)
+ dy(i + 4) = dx(i + 4)
+ dy(i + 5) = dx(i + 5)
+ dy(i + 6) = dx(i + 6)
+ 50 continue
+ return
+ end
+
+
+
+ double precision function ddot8(n,dx,incx,dy,incy)
+c
+c 12/7/02; T.Yee
+c I've modifed "real function ddot" to "double precision function ddot8" for
+c the VGAM package
+c I've added the "implicit logical (a-z)" line
+
+ implicit logical (a-z)
+
+c
+c forms the dot product of two vectors.
+c uses unrolled loops for increments equal to one.
+c jack dongarra, linpack, 3/11/78.
+c
+ double precision dx(1),dy(1),dtemp
+ integer i,incx,incy,ix,iy,m,mp1,n
+c
+ ddot8 = 0.0d0
+ dtemp = 0.0d0
+ if(n.le.0)return
+ if(incx.eq.1.and.incy.eq.1)go to 20
+c
+c code for unequal increments or equal increments
+c not equal to 1
+c
+ ix = 1
+ iy = 1
+ if(incx.lt.0)ix = (-n+1)*incx + 1
+ if(incy.lt.0)iy = (-n+1)*incy + 1
+ do 10 i = 1,n
+ dtemp = dtemp + dx(ix)*dy(iy)
+ ix = ix + incx
+ iy = iy + incy
+ 10 continue
+ ddot8 = dtemp
+ return
+c
+c code for both increments equal to 1
+c
+c
+c clean-up loop
+c
+ 20 m = mod(n,5)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ dtemp = dtemp + dx(i)*dy(i)
+ 30 continue
+ if( n .lt. 5 ) go to 60
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,5
+ dtemp = dtemp + dx(i)*dy(i) + dx(i + 1)*dy(i + 1) +
+ * dx(i + 2)*dy(i + 2) + dx(i + 3)*dy(i + 3)+dx(i + 4)*dy(i + 4)
+ 50 continue
+ 60 ddot8 = dtemp
+ return
+ end
+
+
+
+ double precision function dnrm28 ( n, dx,ldx, incx)
+ implicit logical (a-z)
+
+c Undeclared, so added by T.Yee
+ integer n, ldx, incx, i, j, nn
+
+ integer next
+ double precision dx(ldx), cutlo, cuthi, hitest, sum,
+ * xmax,zero,one
+
+ data zero, one /0.0d0, 1.0d0/
+c
+c euclidean norm of the n-vector stored in dx() with storage
+c increment incx .
+c if n .le. 0 return with result = 0.
+c if n .ge. 1 then incx must be .ge. 1
+c
+c c.l.lawson, 1978 jan 08
+c
+c four phase method using two built-in constants that are
+c hopefully applicable to all machines.
+c cutlo = maximum of sqrt(u/eps) over all known machines.
+c cuthi = minimum of sqrt(v) over all known machines.
+c where
+c eps = smallest no. such that eps + 1. .gt. 1.
+c u = smallest positive no. (underflow limit)
+c v = largest no. (overflow limit)
+c
+c brief outline of algorithm..
+c
+c phase 1 scans zero components.
+c move to phase 2 when a component is nonzero and .le. cutlo
+c move to phase 3 when a component is .gt. cutlo
+c move to phase 4 when a component is .ge. cuthi/m
+c where m = n for x() double precision and m = 2*n for complex.
+c
+c values for cutlo and cuthi..
+c from the environmental parameters listed in the imsl converter
+c document the limiting values are as follows..
+c cutlo, s.p. u/eps = 2**(-102) for honeywell. close seconds are
+c univac and dec at 2**(-103)
+c thus cutlo = 2**(-51) = 4.44089d-16
+c cuthi, s.p. v = 2**127 for univac, honeywell, and dec.
+c thus cuthi = 2**(63.5) = 1.30438d19
+c cutlo, d.p. u/eps = 2**(-67) for honeywell and dec.
+c thus cutlo = 2**(-33.5) = 8.23181d-11
+c cuthi, d.p. same as s.p. cuthi = 1.30438d19
+c data cutlo, cuthi / 8.232d-11, 1.304d19 /
+c data cutlo, cuthi / 4.441d-16, 1.304d19 /
+ data cutlo, cuthi / 8.232d-11, 1.304d19 /
+c
+ if(n .gt. 0) go to 10
+ dnrm28 = zero
+ go to 300
+c
+ 10 next = 30
+ sum = zero
+ nn = n * incx
+c begin main loop
+ i = 1
+c 20 go to next,(30, 50, 70, 110)
+ 20 if(next .eq. 30) go to 30
+ if(next .eq. 50) go to 50
+ if(next .eq. 70) go to 70
+ if(next .eq. 110) go to 110
+c An error!!!
+ dnrm28 = 0.0d0
+ return
+c
+ 30 if( dabs(dx(i)) .gt. cutlo) go to 85
+ next = 50
+ xmax = zero
+c
+c phase 1. sum is zero
+c
+ 50 if( dx(i) .eq. zero) go to 200
+ if( dabs(dx(i)) .gt. cutlo) go to 85
+c
+c prepare for phase 2.
+ next = 70
+ go to 105
+c
+c prepare for phase 4.
+c
+ 100 i = j
+ next = 110
+ sum = (sum / dx(i)) / dx(i)
+ 105 xmax = dabs(dx(i))
+ go to 115
+c
+c phase 2. sum is small.
+c scale to avoid destructive underflow.
+c
+ 70 if( dabs(dx(i)) .gt. cutlo ) go to 75
+c
+c common code for phases 2 and 4.
+c in phase 4 sum is large. scale to avoid overflow.
+c
+ 110 if( dabs(dx(i)) .le. xmax ) go to 115
+ sum = one + sum * (xmax / dx(i))**2
+ xmax = dabs(dx(i))
+ go to 200
+c
+ 115 sum = sum + (dx(i)/xmax)**2
+ go to 200
+c
+c
+c prepare for phase 3.
+c
+ 75 sum = (sum * xmax) * xmax
+c
+c
+c for real or d.p. set hitest = cuthi/n
+c for complex set hitest = cuthi/(2*n)
+c
+c "float" changed to "dfloat" by T.Yee
+ 85 hitest = cuthi/dfloat( n )
+c
+c phase 3. sum is mid-range. no scaling.
+c
+ do 95 j =i,nn,incx
+ if(dabs(dx(j)) .ge. hitest) go to 100
+ 95 sum = sum + dx(j)**2
+ dnrm28 = dsqrt( sum )
+ go to 300
+c
+ 200 continue
+ i = i + incx
+ if ( i .le. nn ) go to 20
+c
+c end of main loop.
+c
+c compute square root and adjust for scaling.
+c
+ dnrm28 = xmax * dsqrt(sum)
+ 300 continue
+ return
+ end
+
+
+ subroutine dscal8(n,da,dx,incx)
+ implicit logical (a-z)
+c
+c scales a vector by a constant.
+c uses unrolled loops for increment equal to one.
+c jack dongarra, linpack, 3/11/78.
+c
+ double precision da,dx(1)
+ integer i,incx,m,mp1,n,nincx
+c
+ if(n.le.0)return
+ if(incx.eq.1)go to 20
+c
+c code for increment not equal to 1
+c
+ nincx = n*incx
+ do 10 i = 1,nincx,incx
+ dx(i) = da*dx(i)
+ 10 continue
+ return
+c
+c code for increment equal to 1
+c
+c
+c clean-up loop
+c
+ 20 m = mod(n,5)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ dx(i) = da*dx(i)
+ 30 continue
+ if( n .lt. 5 ) return
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,5
+ dx(i) = da*dx(i)
+ dx(i + 1) = da*dx(i + 1)
+ dx(i + 2) = da*dx(i + 2)
+ dx(i + 3) = da*dx(i + 3)
+ dx(i + 4) = da*dx(i + 4)
+ 50 continue
+ return
+ end
+
+
+
+ subroutine dshift8(x,ldx,n,j,k)
+ implicit logical (a-z)
+ integer ldx,n,j,k
+ double precision x(ldx,k), tt
+ integer i,jj
+ if(k.le.j)return
+ do 100 i=1,n
+ tt=x(i,j)
+ do 50 jj=j+1,k
+ x(i,jj-1)=x(i,jj)
+ 50 continue
+ x(i,k)=tt
+100 continue
+ return
+ end
+
+
+
+
+ subroutine vdqrsl(x,ldx,n,k,qraux,y,qy,qty,b,rsd,xb,job,info)
+ implicit logical (a-z)
+ integer ldx,n,k,job,info
+ double precision x(ldx,1),qraux(1),y(1),qy(1),qty(1),b(1),rsd(1),
+ * xb(1)
+c
+c dqrsl applies the output of dqrdc to compute coordinate
+c transformations, projections, and least squares solutions.
+c for k .le. min(n,p), let xk be the matrix
+c
+c xk = (x(jpvt(1)),x(jpvt(2)), ... ,x(jpvt(k)))
+c
+c formed from columnns jpvt(1), ... ,jpvt(k) of the original
+c n x p matrix x that was input to dqrdc (if no pivoting was
+c done, xk consists of the first k columns of x in their
+c original order). dqrdc produces a factored orthogonal matrix q
+c and an upper triangular matrix r such that
+c
+c xk = q * (r)
+c (0)
+c
+c this information is contained in coded form in the arrays
+c x and qraux.
+c
+c on entry
+c
+c x double precision(ldx,p).
+c x contains the output of dqrdc.
+c
+c ldx integer.
+c ldx is the leading dimension of the array x.
+c
+c n integer.
+c n is the number of rows of the matrix xk. it must
+c have the same value as n in dqrdc.
+c
+c k integer.
+c k is the number of columns of the matrix xk. k
+c must nnot be greater than min(n,p), where p is the
+c same as in the calling sequence to dqrdc.
+c
+c qraux double precision(p).
+c qraux contains the auxiliary output from dqrdc.
+c
+c y double precision(n)
+c y contains an n-vector that is to be manipulated
+c by dqrsl.
+c
+c job integer.
+c job specifies what is to be computed. job has
+c the decimal expansion abcde, with the following
+c meaning.
+c
+c if a.ne.0, compute qy.
+c if b,c,d, or e .ne. 0, compute qty.
+c if c.ne.0, compute b.
+c if d.ne.0, compute rsd.
+c if e.ne.0, compute xb.
+c
+c note that a request to compute b, rsd, or xb
+c automatically triggers the computation of qty, for
+c which an array must be provided in the calling
+c sequence.
+c
+c on return
+c
+c qy double precision(n).
+c qy conntains q*y, if its computation has been
+c requested.
+c
+c qty double precision(n).
+c qty contains trans(q)*y, if its computation has
+c been requested. here trans(q) is the
+c transpose of the matrix q.
+c
+c b double precision(k)
+c b contains the solution of the least squares problem
+c
+c minimize norm2(y - xk*b),
+c
+c if its computation has been requested. (note that
+c if pivoting was requested in dqrdc, the j-th
+c component of b will be associated with column jpvt(j)
+c of the original matrix x that was input into dqrdc.)
+c
+c rsd double precision(n).
+c rsd contains the least squares residual y - xk*b,
+c if its computation has been requested. rsd is
+c also the orthogonal projection of y onto the
+c orthogonal complement of the column space of xk.
+c
+c xb double precision(n).
+c xb contains the least squares approximation xk*b,
+c if its computation has been requested. xb is also
+c the orthogonal projection of y onto the column space
+c of x.
+c
+c info integer.
+c info is zero unless the computation of b has
+c been requested and r is exactly singular. in
+c this case, info is the index of the first zero
+c diagonal element of r and b is left unaltered.
+c
+c the parameters qy, qty, b, rsd, and xb are not referenced
+c if their computation is not requested and in this case
+c can be replaced by dummy variables in the calling program.
+c to save storage, the user may in some cases use the same
+c array for different parameters in the calling sequence. a
+c frequently occuring example is when one wishes to compute
+c any of b, rsd, or xb and does not need y or qty. in this
+c case one may identify y, qty, and one of b, rsd, or xb, while
+c providing separate arrays for anything else that is to be
+c computed. thus the calling sequence
+c
+c call dqrsl(x,ldx,n,k,qraux,y,dum,y,b,y,dum,110,info)
+c
+c will result in the computation of b and rsd, with rsd
+c overwriting y. more generally, each item in the following
+c list contains groups of permissible identifications for
+c a single callinng sequence.
+c
+c 1. (y,qty,b) (rsd) (xb) (qy)
+c
+c 2. (y,qty,rsd) (b) (xb) (qy)
+c
+c 3. (y,qty,xb) (b) (rsd) (qy)
+c
+c 4. (y,qy) (qty,b) (rsd) (xb)
+c
+c 5. (y,qy) (qty,rsd) (b) (xb)
+c
+c 6. (y,qy) (qty,xb) (b) (rsd)
+c
+c in any group the value returned in the array allocated to
+c the group corresponds to the last member of the group.
+c
+c linpack. this version dated 08/14/78 .
+c g.w. stewart, university of maryland, argonne national lab.
+c
+c dqrsl uses the following functions and subprograms.
+c
+c blas daxpy8,dcopy8,ddot8
+c fortran dabs,min0,mod
+c
+c internal variables
+c
+ integer i,j,jj,ju,kp1
+ double precision ddot8,t,temp
+ logical cb,cqy,cqty,cr,cxb
+c
+c
+c set info flag.
+c
+ info = 0
+c
+c determine what is to be computed.
+c
+ cqy = job/10000 .ne. 0
+ cqty = mod(job,10000) .ne. 0
+ cb = mod(job,1000)/100 .ne. 0
+ cr = mod(job,100)/10 .ne. 0
+ cxb = mod(job,10) .ne. 0
+ ju = min0(k,n-1)
+c
+c special action when n=1.
+c
+ if (ju .ne. 0) go to 40
+ if (cqy) qy(1) = y(1)
+ if (cqty) qty(1) = y(1)
+ if (cxb) xb(1) = y(1)
+ if (.not.cb) go to 30
+ if (x(1,1) .ne. 0.0d0) go to 10
+ info = 1
+ go to 20
+ 10 continue
+ b(1) = y(1)/x(1,1)
+ 20 continue
+ 30 continue
+ if (cr) rsd(1) = 0.0d0
+ go to 250
+ 40 continue
+c
+c set up to compute qy or qty.
+c
+ if (cqy) call dcopy8(n,y,1,qy,1)
+ if (cqty) call dcopy8(n,y,1,qty,1)
+ if (.not.cqy) go to 70
+c
+c compute qy.
+c
+ do 60 jj = 1, ju
+ j = ju - jj + 1
+ if (qraux(j) .eq. 0.0d0) go to 50
+ temp = x(j,j)
+ x(j,j) = qraux(j)
+ t = -ddot8(n-j+1,x(j,j),1,qy(j),1)/x(j,j)
+ call daxpy8(n-j+1,t,x(j,j),1,qy(j),1)
+ x(j,j) = temp
+ 50 continue
+ 60 continue
+ 70 continue
+ if (.not.cqty) go to 100
+c
+c compute trans(q)*y.
+c
+ do 90 j = 1, ju
+ if (qraux(j) .eq. 0.0d0) go to 80
+ temp = x(j,j)
+ x(j,j) = qraux(j)
+ t = -ddot8(n-j+1,x(j,j),1,qty(j),1)/x(j,j)
+ call daxpy8(n-j+1,t,x(j,j),1,qty(j),1)
+ x(j,j) = temp
+ 80 continue
+ 90 continue
+ 100 continue
+c
+c set up to compute b, rsd, or xb.
+c
+ if (cb) call dcopy8(k,qty,1,b,1)
+ kp1 = k + 1
+ if (cxb) call dcopy8(k,qty,1,xb,1)
+ if(cr .and. k .lt. n) call dcopy8(n-k,qty(kp1),1,rsd(kp1),1)
+ if (.not.cxb .or. kp1 .gt. n) go to 120
+ do 110 i = kp1, n
+ xb(i) = 0.0d0
+ 110 continue
+ 120 continue
+ if (.not.cr) go to 140
+ do 130 i = 1, k
+ rsd(i) = 0.0d0
+ 130 continue
+ 140 continue
+ if (.not.cb) go to 190
+c
+c compute b.
+c
+ do 170 jj = 1, k
+ j = k - jj + 1
+ if (x(j,j) .ne. 0.0d0) go to 150
+ info = j
+c ......exit
+ go to 180
+ 150 continue
+ b(j) = b(j)/x(j,j)
+ if (j .eq. 1) go to 160
+ t = -b(j)
+ call daxpy8(j-1,t,x(1,j),1,b,1)
+ 160 continue
+ 170 continue
+ 180 continue
+ 190 continue
+ if (.not.cr .and. .not.cxb) go to 240
+c
+c compute rsd or xb as required.
+c
+ do 230 jj = 1, ju
+ j = ju - jj + 1
+ if (qraux(j) .eq. 0.0d0) go to 220
+ temp = x(j,j)
+ x(j,j) = qraux(j)
+ if (.not.cr) go to 200
+ t = -ddot8(n-j+1,x(j,j),1,rsd(j),1)/x(j,j)
+ call daxpy8(n-j+1,t,x(j,j),1,rsd(j),1)
+ 200 continue
+ if (.not.cxb) go to 210
+ t = -ddot8(n-j+1,x(j,j),1,xb(j),1)/x(j,j)
+ call daxpy8(n-j+1,t,x(j,j),1,xb(j),1)
+ 210 continue
+ x(j,j) = temp
+ 220 continue
+ 230 continue
+ 240 continue
+ 250 continue
+ return
+ end
+
+
diff --git a/src/vmux.f b/src/vmux.f
new file mode 100644
index 0000000..bb223e5
--- /dev/null
+++ b/src/vmux.f
@@ -0,0 +1,578 @@
+ subroutine qh4ulb(zqve1l, vvl1li, lku8xq)
+ implicit logical (a-z)
+ integer lku8xq, zqve1l(1), vvl1li(1)
+ integer xi1mqb, i1nkrb, w3gohz
+ w3gohz = 1
+ xi1mqb = lku8xq
+23000 if(.not.(xi1mqb.ge.1))goto 23002
+ do 23003 i1nkrb=1,xi1mqb
+ zqve1l(w3gohz) = i1nkrb
+ w3gohz = w3gohz+1
+23003 continue
+ xi1mqb=xi1mqb-1
+ goto 23000
+23002 continue
+ w3gohz = 1
+ do 23005 xi1mqb=1,lku8xq
+ do 23007 i1nkrb=xi1mqb,lku8xq
+ vvl1li(w3gohz) = i1nkrb
+ w3gohz = w3gohz+1
+23007 continue
+23005 continue
+ return
+ end
+ integer function viamf(s17te9, xl6qgm, lku8xq, zqve1l, vvl1li)
+ integer s17te9, xl6qgm, lku8xq, zqve1l(1), vvl1li(1)
+ integer xi1mqb, j0qwtz
+ j0qwtz = lku8xq*(lku8xq+1)/2
+ do 23009 xi1mqb=1,j0qwtz
+ if(.not.((zqve1l(xi1mqb).eq.s17te9 .and. vvl1li(xi1mqb).eq.xl6qgm)
+ & .or.(zqve1l(xi1mqb).eq.xl6qgm .and. vvl1li(xi1mqb).eq.s17te9)))
+ &goto 23011
+ viamf = xi1mqb
+ return
+23011 continue
+23009 continue
+ viamf = 0
+ return
+ end
+ subroutine vm2af(mat, a, p1yjqz, zqve1l, vvl1li, nfiumb4, lku8xq,
+ &teola6)
+ implicit logical (a-z)
+ integer p1yjqz, zqve1l(p1yjqz), vvl1li(p1yjqz), nfiumb4, lku8xq,
+ &teola6
+ double precision mat(p1yjqz,nfiumb4), a(lku8xq,lku8xq,nfiumb4)
+ integer w3gohz, d9rjek, nd6mep, j0qwtz
+ j0qwtz = lku8xq * (lku8xq + 1) / 2
+ if(.not.(teola6 .eq. 1 .or. p1yjqz .ne. j0qwtz))goto 23013
+ w3gohz = 1
+23015 if(.not.(w3gohz.le.nfiumb4))goto 23017
+ d9rjek = 1
+23018 if(.not.(d9rjek.le.lku8xq))goto 23020
+ nd6mep = 1
+23021 if(.not.(nd6mep.le.lku8xq))goto 23023
+ a(nd6mep,d9rjek,w3gohz) = 0.0d0
+ nd6mep=nd6mep+1
+ goto 23021
+23023 continue
+ d9rjek=d9rjek+1
+ goto 23018
+23020 continue
+ w3gohz=w3gohz+1
+ goto 23015
+23017 continue
+23013 continue
+ do 23024 w3gohz=1,nfiumb4
+ do 23026 d9rjek=1,p1yjqz
+ a(zqve1l(d9rjek),vvl1li(d9rjek),w3gohz) = mat(d9rjek,w3gohz)
+ if(.not.(teola6 .eq. 0))goto 23028
+ a(vvl1li(d9rjek),zqve1l(d9rjek),w3gohz) = mat(d9rjek,w3gohz)
+23028 continue
+23026 continue
+23024 continue
+ return
+ end
+ subroutine mux22f(jrxg6l, jmwo0z, ghry8z, zkjqhi, zqve1l, vvl1li,
+ &nfiumb4, lku8xq, mbd8lk)
+ implicit logical (a-z)
+ integer zkjqhi, zqve1l(1), vvl1li(1), nfiumb4, lku8xq
+ double precision jrxg6l(zkjqhi,nfiumb4), jmwo0z(nfiumb4,lku8xq),
+ &ghry8z(lku8xq,nfiumb4), mbd8lk(lku8xq,lku8xq)
+ double precision qnk4zf
+ integer w3gohz, d9rjek, i1nkrb, one, teola6
+ one = 1
+ teola6 = 1
+ w3gohz = 1
+23030 if(.not.(w3gohz.le.nfiumb4))goto 23032
+ call vm2af(jrxg6l(1,w3gohz), mbd8lk, zkjqhi, zqve1l, vvl1li, one,
+ &lku8xq, teola6)
+ d9rjek = 1
+23033 if(.not.(d9rjek.le.lku8xq))goto 23035
+ qnk4zf = 0.0d0
+ i1nkrb = d9rjek
+23036 if(.not.(i1nkrb.le.lku8xq))goto 23038
+ qnk4zf = qnk4zf + mbd8lk(d9rjek,i1nkrb) * jmwo0z(w3gohz,i1nkrb)
+ i1nkrb=i1nkrb+1
+ goto 23036
+23038 continue
+ ghry8z(d9rjek,w3gohz) = qnk4zf
+ d9rjek=d9rjek+1
+ goto 23033
+23035 continue
+ w3gohz=w3gohz+1
+ goto 23030
+23032 continue
+ return
+ end
+ subroutine vbksf(jrxg6l, yg1jzv, lku8xq, nfiumb4, mbd8lk, zqve1l,
+ &vvl1li, zkjqhi)
+ implicit logical (a-z)
+ integer lku8xq, nfiumb4, zqve1l, vvl1li, zkjqhi
+ double precision jrxg6l(zkjqhi,nfiumb4), yg1jzv(lku8xq,nfiumb4),
+ &mbd8lk(lku8xq,lku8xq)
+ double precision qnk4zf
+ integer w3gohz, d9rjek, nd6mep, teola6, one
+ teola6 = 1
+ one = 1
+ w3gohz = 1
+23039 if(.not.(w3gohz.le.nfiumb4))goto 23041
+ call vm2af(jrxg6l(1,w3gohz), mbd8lk, zkjqhi, zqve1l, vvl1li, one,
+ &lku8xq, teola6)
+ d9rjek = lku8xq
+23042 if(.not.(d9rjek.ge.1))goto 23044
+ qnk4zf = yg1jzv(d9rjek,w3gohz)
+ nd6mep = d9rjek+1
+23045 if(.not.(nd6mep.le.lku8xq))goto 23047
+ qnk4zf = qnk4zf - mbd8lk(d9rjek,nd6mep) * yg1jzv(nd6mep,w3gohz)
+ nd6mep=nd6mep+1
+ goto 23045
+23047 continue
+ yg1jzv(d9rjek,w3gohz) = qnk4zf / mbd8lk(d9rjek,d9rjek)
+ d9rjek=d9rjek-1
+ goto 23042
+23044 continue
+ w3gohz=w3gohz+1
+ goto 23039
+23041 continue
+ return
+ end
+ subroutine vcholf(w8xfic, yg1jzv, lku8xq, c4uxow, ex7hfo)
+ implicit logical (a-z)
+ integer ex7hfo
+ integer lku8xq, c4uxow
+ double precision w8xfic(lku8xq,lku8xq), yg1jzv(lku8xq)
+ double precision qnk4zf, dsqrt
+ integer w3gohz, d9rjek, nd6mep
+ c4uxow=1
+ do 23048 w3gohz=1,lku8xq
+ qnk4zf = 0d0
+ do 23050 nd6mep=1,w3gohz-1
+ qnk4zf = qnk4zf + w8xfic(nd6mep,w3gohz) * w8xfic(nd6mep,w3gohz)
+23050 continue
+ w8xfic(w3gohz,w3gohz) = w8xfic(w3gohz,w3gohz) - qnk4zf
+ if(.not.(w8xfic(w3gohz,w3gohz) .le. 0d0))goto 23052
+ c4uxow = 0
+ return
+23052 continue
+ w8xfic(w3gohz,w3gohz) = dsqrt(w8xfic(w3gohz,w3gohz))
+ do 23054 d9rjek=w3gohz+1,lku8xq
+ qnk4zf = 0d0
+ do 23056 nd6mep=1,w3gohz-1
+ qnk4zf = qnk4zf + w8xfic(nd6mep,w3gohz) * w8xfic(nd6mep,d9rjek)
+23056 continue
+ w8xfic(w3gohz,d9rjek) = (w8xfic(w3gohz,d9rjek) - qnk4zf) / w8xfic(
+ &w3gohz,w3gohz)
+23054 continue
+23048 continue
+ if(.not.(ex7hfo .eq. 0))goto 23058
+ do 23060 w3gohz=2,lku8xq
+ do 23062 d9rjek=1,w3gohz-1
+ w8xfic(w3gohz,d9rjek) = 0.0d0
+23062 continue
+ return
+23060 continue
+23058 continue
+ do 23064 d9rjek=1,lku8xq
+ qnk4zf = yg1jzv(d9rjek)
+ do 23066 nd6mep=1,d9rjek-1
+ qnk4zf = qnk4zf - w8xfic(nd6mep,d9rjek) * yg1jzv(nd6mep)
+23066 continue
+ yg1jzv(d9rjek) = qnk4zf / w8xfic(d9rjek,d9rjek)
+23064 continue
+ d9rjek = lku8xq
+23068 if(.not.(d9rjek.ge.1))goto 23070
+ qnk4zf = yg1jzv(d9rjek)
+ nd6mep = d9rjek+1
+23071 if(.not.(nd6mep.le.lku8xq))goto 23073
+ qnk4zf = qnk4zf - w8xfic(d9rjek,nd6mep) * yg1jzv(nd6mep)
+ nd6mep=nd6mep+1
+ goto 23071
+23073 continue
+ yg1jzv(d9rjek) = qnk4zf / w8xfic(d9rjek,d9rjek)
+ d9rjek=d9rjek-1
+ goto 23068
+23070 continue
+ return
+ end
+ subroutine mux17f(jrxg6l, p3vlea, lku8xq, o9ljyn, nfiumb4, mbd8lk,
+ & cm6nof, zqve1l, vvl1li, zkjqhi, c4bdmu)
+ implicit logical (a-z)
+ integer zkjqhi, lku8xq, o9ljyn, nfiumb4, zqve1l(1), vvl1li(1),
+ &c4bdmu
+ double precision jrxg6l(zkjqhi,nfiumb4), p3vlea(c4bdmu,o9ljyn),
+ &mbd8lk(lku8xq,lku8xq), cm6nof(lku8xq,o9ljyn)
+ double precision qnk4zf
+ integer w3gohz, d9rjek, nd6mep, i1nkrb
+ do 23074 d9rjek=1,lku8xq
+ do 23076 w3gohz=1,lku8xq
+ mbd8lk(w3gohz,d9rjek) = 0.0d0
+23076 continue
+23074 continue
+ do 23078 w3gohz=1,nfiumb4
+ do 23080 i1nkrb=1,zkjqhi
+ mbd8lk(zqve1l(i1nkrb), vvl1li(i1nkrb)) = jrxg6l(i1nkrb,w3gohz)
+23080 continue
+ do 23082 nd6mep=1,o9ljyn
+ do 23084 d9rjek=1,lku8xq
+ cm6nof(d9rjek,nd6mep) = p3vlea((w3gohz-1)*lku8xq+d9rjek,nd6mep)
+23084 continue
+23082 continue
+ do 23086 nd6mep=1,o9ljyn
+ do 23088 d9rjek=1,lku8xq
+ qnk4zf = 0d0
+ do 23090 i1nkrb=d9rjek,lku8xq
+ qnk4zf = qnk4zf + mbd8lk(d9rjek,i1nkrb) * cm6nof(i1nkrb,nd6mep)
+23090 continue
+ p3vlea((w3gohz-1)*lku8xq+d9rjek,nd6mep) = qnk4zf
+23088 continue
+23086 continue
+23078 continue
+ return
+ end
+ subroutine vrinvf9(jrxg6l, ldr, lku8xq, c4uxow, ku0goz, bgu6fw)
+ implicit logical (a-z)
+ integer ldr, lku8xq, c4uxow
+ double precision jrxg6l(ldr,lku8xq), ku0goz(lku8xq,lku8xq),
+ &bgu6fw(lku8xq,lku8xq)
+ double precision qnk4zf
+ integer d9rjek, nd6mep, col, mavy5hmod
+ c4uxow = 1
+ d9rjek = 1
+23092 if(.not.(d9rjek.le.lku8xq))goto 23094
+ col = 1
+23095 if(.not.(col.le.lku8xq))goto 23097
+ bgu6fw(d9rjek,col) = 0.0d0
+ col=col+1
+ goto 23095
+23097 continue
+ d9rjek=d9rjek+1
+ goto 23092
+23094 continue
+ col = 1
+23098 if(.not.(col.le.lku8xq))goto 23100
+ d9rjek = col
+23101 if(.not.(d9rjek.ge.1))goto 23103
+ if(.not.(d9rjek .eq. col))goto 23104
+ qnk4zf = 1.0d0
+ goto 23105
+23104 continue
+ qnk4zf = 0.0d0
+23105 continue
+ nd6mep = d9rjek+1
+23106 if(.not.(nd6mep.le.col))goto 23108
+ qnk4zf = qnk4zf - jrxg6l(d9rjek,nd6mep) * bgu6fw(nd6mep,col)
+ nd6mep=nd6mep+1
+ goto 23106
+23108 continue
+ if(.not.(jrxg6l(d9rjek,d9rjek) .eq. 0.0d0))goto 23109
+ c4uxow = 0
+ goto 23110
+23109 continue
+ bgu6fw(d9rjek,col) = qnk4zf / jrxg6l(d9rjek,d9rjek)
+23110 continue
+ d9rjek=d9rjek-1
+ goto 23101
+23103 continue
+ col=col+1
+ goto 23098
+23100 continue
+ d9rjek = 1
+23111 if(.not.(d9rjek.le.lku8xq))goto 23113
+ col = d9rjek
+23114 if(.not.(col.le.lku8xq))goto 23116
+ if(.not.(d9rjek .lt. col))goto 23117
+ mavy5hmod = col
+ goto 23118
+23117 continue
+ mavy5hmod = d9rjek
+23118 continue
+ qnk4zf = 0.0d0
+ nd6mep = mavy5hmod
+23119 if(.not.(nd6mep.le.lku8xq))goto 23121
+ qnk4zf = qnk4zf + bgu6fw(d9rjek,nd6mep) * bgu6fw(col,nd6mep)
+ nd6mep=nd6mep+1
+ goto 23119
+23121 continue
+ ku0goz(d9rjek,col) = qnk4zf
+ ku0goz(col,d9rjek) = qnk4zf
+ col=col+1
+ goto 23114
+23116 continue
+ d9rjek=d9rjek+1
+ goto 23111
+23113 continue
+ return
+ end
+ subroutine atez9d(xx, ghry8z)
+ implicit logical (a-z)
+ double precision xx, ghry8z
+ double precision x, y, j0izmn, qnk4zf, mu4ygk(6)
+ integer d9rjek
+ mu4ygk(1)= 76.18009172947146d0
+ mu4ygk(2)= -86.50532032941677d0
+ mu4ygk(3)= 24.01409824083091d0
+ mu4ygk(4)= -1.231739572450155d0
+ mu4ygk(5)= 0.1208650973866179d-2
+ mu4ygk(6)= -0.5395239384953d-5
+ x = xx
+ y = xx
+ j0izmn = x+5.50d0
+ j0izmn = j0izmn - (x+0.50d0) * dlog(j0izmn)
+ qnk4zf=1.000000000190015d0
+ d9rjek=1
+23122 if(.not.(d9rjek.le.6))goto 23124
+ y = y + 1.0d0
+ qnk4zf = qnk4zf + mu4ygk(d9rjek)/y
+ d9rjek=d9rjek+1
+ goto 23122
+23124 continue
+ ghry8z = -j0izmn + dlog(2.5066282746310005d0 * qnk4zf / x)
+ return
+ end
+ subroutine enbin9(qe3jcd, gq815b, xkcm3b, ogq67o, n, c4uxow,
+ &lzgs0f, hmr3dx, jftq1, nh2qxl)
+ implicit logical (a-z)
+ integer n, c4uxow, lzgs0f, nh2qxl
+ double precision qe3jcd(n, lzgs0f), gq815b(n, lzgs0f), xkcm3b(n,
+ &lzgs0f), ogq67o, hmr3dx, jftq1
+ integer w3gohz, myx3od
+ double precision nh5zwa, pohw8d, ydb, dyb3po1, qbca1x, pvl5mc,
+ &pdjzm4, rxe0so, epx9jf, qnk4zf, scnrp6
+ real yogfz6
+ if(.not.(ogq67o .le. 0.80d0 .or. ogq67o .ge. 1.0d0))goto 23125
+ c4uxow = 0
+ return
+23125 continue
+ pohw8d = 100.0d0 * jftq1
+ nh5zwa = 0.001d0
+ c4uxow = 1
+ myx3od=1
+23127 if(.not.(myx3od.le.lzgs0f))goto 23129
+ w3gohz=1
+23130 if(.not.(w3gohz.le.n))goto 23132
+ dyb3po1 = xkcm3b(w3gohz,myx3od) / gq815b(w3gohz,myx3od)
+ if(.not.((dyb3po1 .lt. nh5zwa) .or. (xkcm3b(w3gohz,myx3od) .gt. 1.
+ &0d5)))goto 23133
+ qe3jcd(w3gohz,myx3od) = -xkcm3b(w3gohz,myx3od) * (1.0d0 + gq815b(
+ &w3gohz,myx3od)/(gq815b(w3gohz,myx3od) + xkcm3b(w3gohz,myx3od))) /
+ &gq815b(w3gohz,myx3od)**2
+ if(.not.(qe3jcd(w3gohz,myx3od) .gt. -pohw8d))goto 23135
+ qe3jcd(w3gohz,myx3od) = -pohw8d
+23135 continue
+ goto 20
+23133 continue
+ qnk4zf = 0.0d0
+ pvl5mc = gq815b(w3gohz,myx3od) / (gq815b(w3gohz,myx3od) + xkcm3b(
+ &w3gohz,myx3od))
+ pdjzm4 = 1.0d0 - pvl5mc
+ yogfz6 = gq815b(w3gohz,myx3od)
+ if(.not.(pvl5mc .lt. pohw8d))goto 23137
+ pvl5mc = pohw8d
+23137 continue
+ if(.not.(pdjzm4 .lt. pohw8d))goto 23139
+ pdjzm4 = pohw8d
+23139 continue
+ qbca1x = 100.0d0 + 15.0d0 * xkcm3b(w3gohz,myx3od)
+ if(.not.(qbca1x .lt. nh2qxl))goto 23141
+ qbca1x = nh2qxl
+23141 continue
+ rxe0so = pvl5mc ** yogfz6
+ hmr3dx = rxe0so
+ scnrp6 = (1.0d0 - hmr3dx) / gq815b(w3gohz,myx3od)**2
+ qnk4zf = qnk4zf + scnrp6
+ ydb = 1.0d0
+ rxe0so = gq815b(w3gohz,myx3od) * pdjzm4 * rxe0so
+ hmr3dx = hmr3dx + rxe0so
+ scnrp6 = (1.0d0 - hmr3dx) / (gq815b(w3gohz,myx3od) + ydb)**2
+ qnk4zf = qnk4zf + scnrp6
+ ydb = 2.0d0
+23143 if(.not.(((hmr3dx .le. ogq67o) .or. (scnrp6 .gt. 1.0d-4)) .and.(
+ &ydb .lt. qbca1x)))goto 23144
+ rxe0so = (gq815b(w3gohz,myx3od) - 1.0d0 + ydb) * pdjzm4 * rxe0so /
+ & ydb
+ hmr3dx = hmr3dx + rxe0so
+ scnrp6 = (1.0d0 - hmr3dx) / (gq815b(w3gohz,myx3od) + ydb)**2
+ qnk4zf = qnk4zf + scnrp6
+ ydb = ydb + 1.0d0
+ goto 23143
+23144 continue
+ qe3jcd(w3gohz,myx3od) = -qnk4zf
+20 epx9jf = 0.0d0
+ w3gohz=w3gohz+1
+ goto 23130
+23132 continue
+ myx3od=myx3od+1
+ goto 23127
+23129 continue
+ return
+ end
+ subroutine enbin8(qe3jcd, gq815b, ncrb2f, ogq67o, nfiumb4, c4uxow,
+ & lzgs0f, hmr3dx, jftq1)
+ implicit logical (a-z)
+ integer nfiumb4, c4uxow, lzgs0f
+ double precision qe3jcd(nfiumb4, lzgs0f), gq815b(nfiumb4, lzgs0f),
+ & ncrb2f(nfiumb4, lzgs0f), ogq67o, hmr3dx, jftq1
+ integer w3gohz, myx3od, qbca1x
+ double precision rxe0so, mw6reg, xkwp2m, xndw5e, qnk4zf, d1, d2,
+ &scnrp6, onemeps
+ logical pok1, pok2, pok12
+ double precision nh5zwa, hntu8v, xkcm3b, pohw8d, ydb, kbig
+ d1 = 0.0d0
+ d2 = 0.0d0
+ pohw8d = -100.0d0 * jftq1
+ qbca1x = 3000
+ if(.not.(ogq67o .le. 0.80d0 .or. ogq67o .ge. 1.0d0))goto 23145
+ c4uxow = 0
+ return
+23145 continue
+ kbig = 1.0d4
+ nh5zwa = 0.001d0
+ onemeps = 1.0d0 - jftq1
+ hntu8v = 1.0d0 / (1.0d0 + nh5zwa)
+ c4uxow = 1
+ myx3od=1
+23147 if(.not.(myx3od.le.lzgs0f))goto 23149
+ w3gohz=1
+23150 if(.not.(w3gohz.le.nfiumb4))goto 23152
+ if(.not.(gq815b(w3gohz,myx3od) .gt. kbig))goto 23153
+ gq815b(w3gohz,myx3od) = kbig
+23153 continue
+ if(.not.(ncrb2f(w3gohz,myx3od) .lt. nh5zwa))goto 23155
+ ncrb2f(w3gohz,myx3od) = nh5zwa
+23155 continue
+ if(.not.((ncrb2f(w3gohz,myx3od) .gt. hntu8v)))goto 23157
+ xkcm3b = gq815b(w3gohz,myx3od) * (1.0d0/ncrb2f(w3gohz,myx3od) - 1.
+ &0d0)
+ qe3jcd(w3gohz,myx3od) = -xkcm3b * (1.0d0 + gq815b(w3gohz,myx3od)/(
+ &gq815b(w3gohz,myx3od) + xkcm3b)) / gq815b(w3gohz,myx3od)**2
+ if(.not.(qe3jcd(w3gohz,myx3od) .gt. pohw8d))goto 23159
+ qe3jcd(w3gohz,myx3od) = pohw8d
+23159 continue
+ goto 20
+23157 continue
+ qnk4zf = 0.0d0
+ pok1 = .true.
+ pok2 = ncrb2f(w3gohz,myx3od) .lt. (1.0d0-jftq1)
+ pok12 = pok1 .and. pok2
+ if(.not.(pok12))goto 23161
+ d2 = gq815b(w3gohz,myx3od) * dlog(ncrb2f(w3gohz,myx3od))
+ hmr3dx = dexp(d2)
+ goto 23162
+23161 continue
+ hmr3dx = 0.0d0
+23162 continue
+ scnrp6 = (1.0d0 - hmr3dx) / gq815b(w3gohz,myx3od)**2
+ qnk4zf = qnk4zf + scnrp6
+ call atez9d(gq815b(w3gohz,myx3od), xkwp2m)
+ ydb = 1.0d0
+ call atez9d(ydb + gq815b(w3gohz,myx3od), mw6reg)
+ xndw5e = 0.0d0
+ if(.not.(pok12))goto 23163
+ d1 = dlog(1.0d0 - ncrb2f(w3gohz,myx3od))
+ rxe0so = dexp(ydb * d1 + d2 + mw6reg - xkwp2m - xndw5e)
+ goto 23164
+23163 continue
+ rxe0so = 0.0d0
+23164 continue
+ hmr3dx = hmr3dx + rxe0so
+ scnrp6 = (1.0d0 - hmr3dx) / (gq815b(w3gohz,myx3od) + ydb)**2
+ qnk4zf = qnk4zf + scnrp6
+ ydb = 2.0d0
+23165 if(.not.((hmr3dx .le. ogq67o) .or. (scnrp6 .gt. 1.0d-4)))goto 2316
+ &6
+ mw6reg = mw6reg + dlog(ydb + gq815b(w3gohz,myx3od) - 1.0d0)
+ xndw5e = xndw5e + dlog(ydb)
+ if(.not.(pok12))goto 23167
+ rxe0so = dexp(ydb * d1 + d2 + mw6reg - xkwp2m - xndw5e)
+ goto 23168
+23167 continue
+ rxe0so = 0.0d0
+23168 continue
+ hmr3dx = hmr3dx + rxe0so
+ scnrp6 = (1.0d0 - hmr3dx) / (gq815b(w3gohz,myx3od) + ydb)**2
+ qnk4zf = qnk4zf + scnrp6
+ ydb = ydb + 1.0d0
+ if(.not.(ydb .gt. 1.0d3))goto 23169
+ goto 21
+23169 continue
+ goto 23165
+23166 continue
+21 qe3jcd(w3gohz,myx3od) = -qnk4zf
+20 mw6reg = 0.0d0
+ w3gohz=w3gohz+1
+ goto 23150
+23152 continue
+ myx3od=myx3od+1
+ goto 23147
+23149 continue
+ return
+ end
+ subroutine mbessi0(vectob, nfiumb4, xt3fko, d0, d1, d2, gqxvz8,
+ &kqoy6w)
+ implicit logical (a-z)
+ integer nfiumb4, xt3fko, gqxvz8, pga6nus
+ double precision yg1jzv(nfiumb4), d0(nfiumb4), d1(nfiumb4), d2(
+ &nfiumb4), kqoy6w
+ integer w3gohz, nd6mep
+ double precision f0, t0, m0, f1, t1, m1, f2, t2, m2
+ double precision toobig
+ toobig = 20.0d0
+ if(.not.(.not.(xt3fko .eq. 0 .or. xt3fko .eq. 1 .or. xt3fko .eq.
+ &2)))goto 23171
+ gqxvz8 = 1
+ return
+23171 continue
+ gqxvz8 = 0
+23172 continue
+ do 23173 nd6mep=1,nfiumb4
+ if(.not.(dabs(yg1jzv(nd6mep)) .gt. toobig))goto 23175
+ gqxvz8 = 1
+ return
+23175 continue
+ t1 = yg1jzv(nd6mep) / 2.0d0
+ f1 = t1
+ t0 = t1 * t1
+ f0 = 1.0d0 + t0
+ t2 = 0.50d0
+ f2 = t2
+ pga6nus = 15
+ if(.not.(dabs(yg1jzv(nd6mep)) .gt. 10))goto 23177
+ pga6nus = 25
+23177 continue
+ if(.not.(dabs(yg1jzv(nd6mep)) .gt. 15))goto 23179
+ pga6nus = 35
+23179 continue
+ if(.not.(dabs(yg1jzv(nd6mep)) .gt. 20))goto 23181
+ pga6nus = 40
+23181 continue
+ if(.not.(dabs(yg1jzv(nd6mep)) .gt. 30))goto 23183
+ pga6nus = 55
+23183 continue
+ do 23185 w3gohz=1,pga6nus
+ m0 = (yg1jzv(nd6mep) / (2.0d0*(w3gohz+1.0d0))) ** 2.0
+ m1 = m0 * (1.0d0 + 1.0d0/w3gohz)
+ m2 = m1 * (2.0d0*w3gohz + 1.0d0) / (2.0d0*w3gohz - 1.0d0)
+ t0 = t0 * m0
+ t1 = t1 * m1
+ t2 = t2 * m2
+ f0 = f0 + t0
+ f1 = f1 + t1
+ f2 = f2 + t2
+ if(.not.((dabs(t0) .lt. kqoy6w) .and. (dabs(t1) .lt. kqoy6w)
+ &.and. (dabs(t2) .lt. kqoy6w)))goto 23187
+ goto 23186
+23187 continue
+23185 continue
+23186 continue
+ if(.not.(0 .le. xt3fko))goto 23189
+ d0(nd6mep) = f0
+23189 continue
+ if(.not.(1 .le. xt3fko))goto 23191
+ d1(nd6mep) = f1
+23191 continue
+ if(.not.(2 .le. xt3fko))goto 23193
+ d2(nd6mep) = f2
+23193 continue
+23173 continue
+ return
+ end
diff --git a/src/zeta.f b/src/zeta.f
new file mode 100644
index 0000000..4ff9991
--- /dev/null
+++ b/src/zeta.f
@@ -0,0 +1,138 @@
+ subroutine vzetawr(x, ans, deriv, nn)
+ double precision x(nn), ans(nn), zeta8, dzeta8, ddzeta8
+ integer deriv, nn, i
+ double precision b2(12)
+ call vbecoef(b2)
+ if(.not.(deriv.eq.0))goto 23000
+ do 23002 i=1,nn
+ ans(i) = zeta8(x(i), b2)
+23002 continue
+23000 continue
+ if(.not.(deriv.eq.1))goto 23004
+ do 23006 i=1,nn
+ ans(i) = dzeta8(x(i), b2)
+23006 continue
+23004 continue
+ if(.not.(deriv.eq.2))goto 23008
+ do 23010 i=1,nn
+ ans(i) = ddzeta8(x(i), b2)
+23010 continue
+23008 continue
+ return
+ end
+ double precision function zeta8(s, b2)
+ double precision s
+ double precision b2(12)
+ integer a, k
+ integer m, n, m2
+ double precision sum, p, a2, fred
+ a = 12
+ k = 8
+ a2 = a * a
+ p = s / 2.0d0 / a2
+ sum = 1.0d0 / (s - 1.0d0) + 0.5d0 / a + b2(1) * p
+ do 23012 m = 2,k
+ m2 = m + m
+ p = p * (s + m2 - 3.0d0) * (s + m2 - 2.0d0) / (m2 - 1.0d0) / m2 /
+ &a2
+ sum = sum + p * b2(m)
+23012 continue
+ fred = dexp((s - 1.0d0) * dlog( dfloat(a) ))
+ sum = 1.0d0 + sum / fred
+ do 23014 n = 2,(a - 1)
+ sum = sum + dexp(-s * dlog( dfloat(n) ))
+23014 continue
+ zeta8 = sum
+ return
+ end
+ double precision function dzeta8(s, b2)
+ double precision s
+ double precision b2(12)
+ integer a, k
+ integer m, n, m2
+ double precision sum, p, q, a2, loga, logn
+ double precision fred
+ a = 12
+ k = 8
+ loga = dlog( dfloat(a) )
+ a2 = a * a
+ p = s / 2.0d0 / a2
+ q = 1.0d0 / s - loga
+ sum = b2(1) * p * q
+ do 23016 m = 2,k
+ m2 = m + m
+ p = p * (s + m2 - 3.0d0) * (s + m2 - 2.0d0) / (m2 - 1.0d0) / m2 /
+ &a2
+ q = q + 1.0d0 / (s + m2 - 3.0d0) + 1.0d0 / (s + m2 - 2.0d0)
+ sum = sum + b2(m) * p * q
+23016 continue
+ fred = dexp((1.0d0 - s) * loga)
+ sum = (sum - 1.0d0/ (s - 1.0d0)**2 - loga * (1.0d0/(s - 1.0d0) +
+ &0.50d0/a)) * fred
+ do 23018 n = 2, (a - 1)
+ logn = dlog( dfloat(n) )
+ sum = sum - logn / dexp(logn * s)
+23018 continue
+ dzeta8 = sum
+ return
+ end
+ double precision function upsilon8(s, b2)
+ double precision s, dzeta8, zeta8, b2(12)
+ upsilon8 = -dzeta8(s, b2) / zeta8(s, b2)
+ return
+ end
+ double precision function ddzeta8(s, b2)
+ double precision s, b2(12)
+ integer a, k
+ integer m, n, m2
+ double precision sum, p, q, r, a2, loga, logn
+ double precision fred, fred2
+ a = 12
+ k = 8
+ loga = dlog( dfloat(a) )
+ a2 = a * a
+ p = s / 2.0d0 / a2
+ q = 1.0d0 / s - loga
+ r = 1.0d0 / s / s
+ sum = b2(1) * p * (q * q - r)
+ do 23020 m = 2,k
+ m2 = m + m
+ p = p * (s + m2 - 3.0d0) * (s + m2 - 2.0d0) / (m2 - 1.0d0) / m2 /
+ &a2
+ q = q + 1.0d0 / (s + m2 - 3.0d0) + 1.0d0 / (s + m2 - 2.0d0)
+ r = r + 1.0d0 / (s + m2 - 3.0d0)**2 + 1.0d0 / (s + m2 - 2.0d0)**2
+ sum = sum + b2(m) * p * (q * q - r)
+23020 continue
+ fred = dexp((1.0d0 - s) * loga)
+ fred2 = loga**2 * (1.0d0/(s - 1.0d0) + 0.5d0/a)
+ sum = (sum + 2.0d0 / (s - 1.0d0)**3 + 2.0d0 * loga / (s - 1.0d0)**
+ &2 + fred2) * fred
+ do 23022 n = 2,(a - 1)
+ logn = dlog( dfloat(n) )
+ sum = sum + (logn)**2 / dexp(logn * s)
+23022 continue
+ ddzeta8 = sum
+ return
+ end
+ double precision function duds(s, b2)
+ double precision s, b2(12), zs, zeta8, dzeta8, ddzeta8
+ zs = zeta8(s, b2)
+ duds = (dzeta8(s, b2) / zs)**2 - ddzeta8(s, b2) / zs
+ return
+ end
+ subroutine vbecoef(b2)
+ double precision b2(12)
+ b2(1) = 1.0d0 / 6.0d0
+ b2(2) = -1.0d0 / 30.0d0
+ b2(3) = 1.0d0 / 42.0d0
+ b2(4) = -1.0d0 / 30.0d0
+ b2(5) = 5.0d0 / 66.0d0
+ b2(6) = -691.0d0 / 2730.0d0
+ b2(7) = 7.0d0 / 6.0d0
+ b2(8) = -3617.0d0 / 510.0d0
+ b2(9) = 4386.7d0 / 79.8d0
+ b2(10) = -1746.11d0 / 3.30d0
+ b2(11) = 8545.13d0 / 1.38d0
+ b2(12) = -2363.64091d0 / 0.02730d0
+ return
+ end
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list